Merge branches 'pm-cpufreq', 'pm-cpuidle', 'pm-devfreq', 'pm-opp' and 'pm-tools'
[linux-drm-fsl-dcu.git] / fs / btrfs / scrub.c
1 /*
2  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include "ctree.h"
22 #include "volumes.h"
23 #include "disk-io.h"
24 #include "ordered-data.h"
25 #include "transaction.h"
26 #include "backref.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
31 #include "raid56.h"
32
33 /*
34  * This is only the first step towards a full-features scrub. It reads all
35  * extent and super block and verifies the checksums. In case a bad checksum
36  * is found or the extent cannot be read, good data will be written back if
37  * any can be found.
38  *
39  * Future enhancements:
40  *  - In case an unrepairable extent is encountered, track which files are
41  *    affected and report them
42  *  - track and record media errors, throw out bad devices
43  *  - add a mode to also read unallocated space
44  */
45
46 struct scrub_block;
47 struct scrub_ctx;
48
49 /*
50  * the following three values only influence the performance.
51  * The last one configures the number of parallel and outstanding I/O
52  * operations. The first two values configure an upper limit for the number
53  * of (dynamically allocated) pages that are added to a bio.
54  */
55 #define SCRUB_PAGES_PER_RD_BIO  32      /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO  32      /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX     64      /* 8MB per device in flight */
58
59 /*
60  * the following value times PAGE_SIZE needs to be large enough to match the
61  * largest node/leaf/sector size that shall be supported.
62  * Values larger than BTRFS_STRIPE_LEN are not supported.
63  */
64 #define SCRUB_MAX_PAGES_PER_BLOCK       16      /* 64k per node/leaf/sector */
65
66 struct scrub_recover {
67         atomic_t                refs;
68         struct btrfs_bio        *bbio;
69         u64                     *raid_map;
70         u64                     map_length;
71 };
72
73 struct scrub_page {
74         struct scrub_block      *sblock;
75         struct page             *page;
76         struct btrfs_device     *dev;
77         struct list_head        list;
78         u64                     flags;  /* extent flags */
79         u64                     generation;
80         u64                     logical;
81         u64                     physical;
82         u64                     physical_for_dev_replace;
83         atomic_t                ref_count;
84         struct {
85                 unsigned int    mirror_num:8;
86                 unsigned int    have_csum:1;
87                 unsigned int    io_error:1;
88         };
89         u8                      csum[BTRFS_CSUM_SIZE];
90
91         struct scrub_recover    *recover;
92 };
93
94 struct scrub_bio {
95         int                     index;
96         struct scrub_ctx        *sctx;
97         struct btrfs_device     *dev;
98         struct bio              *bio;
99         int                     err;
100         u64                     logical;
101         u64                     physical;
102 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
103         struct scrub_page       *pagev[SCRUB_PAGES_PER_WR_BIO];
104 #else
105         struct scrub_page       *pagev[SCRUB_PAGES_PER_RD_BIO];
106 #endif
107         int                     page_count;
108         int                     next_free;
109         struct btrfs_work       work;
110 };
111
112 struct scrub_block {
113         struct scrub_page       *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
114         int                     page_count;
115         atomic_t                outstanding_pages;
116         atomic_t                ref_count; /* free mem on transition to zero */
117         struct scrub_ctx        *sctx;
118         struct scrub_parity     *sparity;
119         struct {
120                 unsigned int    header_error:1;
121                 unsigned int    checksum_error:1;
122                 unsigned int    no_io_error_seen:1;
123                 unsigned int    generation_error:1; /* also sets header_error */
124
125                 /* The following is for the data used to check parity */
126                 /* It is for the data with checksum */
127                 unsigned int    data_corrected:1;
128         };
129 };
130
131 /* Used for the chunks with parity stripe such RAID5/6 */
132 struct scrub_parity {
133         struct scrub_ctx        *sctx;
134
135         struct btrfs_device     *scrub_dev;
136
137         u64                     logic_start;
138
139         u64                     logic_end;
140
141         int                     nsectors;
142
143         int                     stripe_len;
144
145         atomic_t                ref_count;
146
147         struct list_head        spages;
148
149         /* Work of parity check and repair */
150         struct btrfs_work       work;
151
152         /* Mark the parity blocks which have data */
153         unsigned long           *dbitmap;
154
155         /*
156          * Mark the parity blocks which have data, but errors happen when
157          * read data or check data
158          */
159         unsigned long           *ebitmap;
160
161         unsigned long           bitmap[0];
162 };
163
164 struct scrub_wr_ctx {
165         struct scrub_bio *wr_curr_bio;
166         struct btrfs_device *tgtdev;
167         int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
168         atomic_t flush_all_writes;
169         struct mutex wr_lock;
170 };
171
172 struct scrub_ctx {
173         struct scrub_bio        *bios[SCRUB_BIOS_PER_SCTX];
174         struct btrfs_root       *dev_root;
175         int                     first_free;
176         int                     curr;
177         atomic_t                bios_in_flight;
178         atomic_t                workers_pending;
179         spinlock_t              list_lock;
180         wait_queue_head_t       list_wait;
181         u16                     csum_size;
182         struct list_head        csum_list;
183         atomic_t                cancel_req;
184         int                     readonly;
185         int                     pages_per_rd_bio;
186         u32                     sectorsize;
187         u32                     nodesize;
188
189         int                     is_dev_replace;
190         struct scrub_wr_ctx     wr_ctx;
191
192         /*
193          * statistics
194          */
195         struct btrfs_scrub_progress stat;
196         spinlock_t              stat_lock;
197 };
198
199 struct scrub_fixup_nodatasum {
200         struct scrub_ctx        *sctx;
201         struct btrfs_device     *dev;
202         u64                     logical;
203         struct btrfs_root       *root;
204         struct btrfs_work       work;
205         int                     mirror_num;
206 };
207
208 struct scrub_nocow_inode {
209         u64                     inum;
210         u64                     offset;
211         u64                     root;
212         struct list_head        list;
213 };
214
215 struct scrub_copy_nocow_ctx {
216         struct scrub_ctx        *sctx;
217         u64                     logical;
218         u64                     len;
219         int                     mirror_num;
220         u64                     physical_for_dev_replace;
221         struct list_head        inodes;
222         struct btrfs_work       work;
223 };
224
225 struct scrub_warning {
226         struct btrfs_path       *path;
227         u64                     extent_item_size;
228         const char              *errstr;
229         sector_t                sector;
230         u64                     logical;
231         struct btrfs_device     *dev;
232 };
233
234 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
235 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
236 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
237 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
238 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
239 static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
240                                      struct btrfs_fs_info *fs_info,
241                                      struct scrub_block *original_sblock,
242                                      u64 length, u64 logical,
243                                      struct scrub_block *sblocks_for_recheck);
244 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
245                                 struct scrub_block *sblock, int is_metadata,
246                                 int have_csum, u8 *csum, u64 generation,
247                                 u16 csum_size, int retry_failed_mirror);
248 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
249                                          struct scrub_block *sblock,
250                                          int is_metadata, int have_csum,
251                                          const u8 *csum, u64 generation,
252                                          u16 csum_size);
253 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
254                                              struct scrub_block *sblock_good,
255                                              int force_write);
256 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
257                                             struct scrub_block *sblock_good,
258                                             int page_num, int force_write);
259 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
260 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
261                                            int page_num);
262 static int scrub_checksum_data(struct scrub_block *sblock);
263 static int scrub_checksum_tree_block(struct scrub_block *sblock);
264 static int scrub_checksum_super(struct scrub_block *sblock);
265 static void scrub_block_get(struct scrub_block *sblock);
266 static void scrub_block_put(struct scrub_block *sblock);
267 static void scrub_page_get(struct scrub_page *spage);
268 static void scrub_page_put(struct scrub_page *spage);
269 static void scrub_parity_get(struct scrub_parity *sparity);
270 static void scrub_parity_put(struct scrub_parity *sparity);
271 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
272                                     struct scrub_page *spage);
273 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
274                        u64 physical, struct btrfs_device *dev, u64 flags,
275                        u64 gen, int mirror_num, u8 *csum, int force,
276                        u64 physical_for_dev_replace);
277 static void scrub_bio_end_io(struct bio *bio, int err);
278 static void scrub_bio_end_io_worker(struct btrfs_work *work);
279 static void scrub_block_complete(struct scrub_block *sblock);
280 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
281                                u64 extent_logical, u64 extent_len,
282                                u64 *extent_physical,
283                                struct btrfs_device **extent_dev,
284                                int *extent_mirror_num);
285 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
286                               struct scrub_wr_ctx *wr_ctx,
287                               struct btrfs_fs_info *fs_info,
288                               struct btrfs_device *dev,
289                               int is_dev_replace);
290 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
291 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
292                                     struct scrub_page *spage);
293 static void scrub_wr_submit(struct scrub_ctx *sctx);
294 static void scrub_wr_bio_end_io(struct bio *bio, int err);
295 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
296 static int write_page_nocow(struct scrub_ctx *sctx,
297                             u64 physical_for_dev_replace, struct page *page);
298 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
299                                       struct scrub_copy_nocow_ctx *ctx);
300 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
301                             int mirror_num, u64 physical_for_dev_replace);
302 static void copy_nocow_pages_worker(struct btrfs_work *work);
303 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
304 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
305
306
307 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
308 {
309         atomic_inc(&sctx->bios_in_flight);
310 }
311
312 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
313 {
314         atomic_dec(&sctx->bios_in_flight);
315         wake_up(&sctx->list_wait);
316 }
317
318 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
319 {
320         while (atomic_read(&fs_info->scrub_pause_req)) {
321                 mutex_unlock(&fs_info->scrub_lock);
322                 wait_event(fs_info->scrub_pause_wait,
323                    atomic_read(&fs_info->scrub_pause_req) == 0);
324                 mutex_lock(&fs_info->scrub_lock);
325         }
326 }
327
328 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
329 {
330         atomic_inc(&fs_info->scrubs_paused);
331         wake_up(&fs_info->scrub_pause_wait);
332
333         mutex_lock(&fs_info->scrub_lock);
334         __scrub_blocked_if_needed(fs_info);
335         atomic_dec(&fs_info->scrubs_paused);
336         mutex_unlock(&fs_info->scrub_lock);
337
338         wake_up(&fs_info->scrub_pause_wait);
339 }
340
341 /*
342  * used for workers that require transaction commits (i.e., for the
343  * NOCOW case)
344  */
345 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
346 {
347         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
348
349         /*
350          * increment scrubs_running to prevent cancel requests from
351          * completing as long as a worker is running. we must also
352          * increment scrubs_paused to prevent deadlocking on pause
353          * requests used for transactions commits (as the worker uses a
354          * transaction context). it is safe to regard the worker
355          * as paused for all matters practical. effectively, we only
356          * avoid cancellation requests from completing.
357          */
358         mutex_lock(&fs_info->scrub_lock);
359         atomic_inc(&fs_info->scrubs_running);
360         atomic_inc(&fs_info->scrubs_paused);
361         mutex_unlock(&fs_info->scrub_lock);
362
363         /*
364          * check if @scrubs_running=@scrubs_paused condition
365          * inside wait_event() is not an atomic operation.
366          * which means we may inc/dec @scrub_running/paused
367          * at any time. Let's wake up @scrub_pause_wait as
368          * much as we can to let commit transaction blocked less.
369          */
370         wake_up(&fs_info->scrub_pause_wait);
371
372         atomic_inc(&sctx->workers_pending);
373 }
374
375 /* used for workers that require transaction commits */
376 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
377 {
378         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
379
380         /*
381          * see scrub_pending_trans_workers_inc() why we're pretending
382          * to be paused in the scrub counters
383          */
384         mutex_lock(&fs_info->scrub_lock);
385         atomic_dec(&fs_info->scrubs_running);
386         atomic_dec(&fs_info->scrubs_paused);
387         mutex_unlock(&fs_info->scrub_lock);
388         atomic_dec(&sctx->workers_pending);
389         wake_up(&fs_info->scrub_pause_wait);
390         wake_up(&sctx->list_wait);
391 }
392
393 static void scrub_free_csums(struct scrub_ctx *sctx)
394 {
395         while (!list_empty(&sctx->csum_list)) {
396                 struct btrfs_ordered_sum *sum;
397                 sum = list_first_entry(&sctx->csum_list,
398                                        struct btrfs_ordered_sum, list);
399                 list_del(&sum->list);
400                 kfree(sum);
401         }
402 }
403
404 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
405 {
406         int i;
407
408         if (!sctx)
409                 return;
410
411         scrub_free_wr_ctx(&sctx->wr_ctx);
412
413         /* this can happen when scrub is cancelled */
414         if (sctx->curr != -1) {
415                 struct scrub_bio *sbio = sctx->bios[sctx->curr];
416
417                 for (i = 0; i < sbio->page_count; i++) {
418                         WARN_ON(!sbio->pagev[i]->page);
419                         scrub_block_put(sbio->pagev[i]->sblock);
420                 }
421                 bio_put(sbio->bio);
422         }
423
424         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
425                 struct scrub_bio *sbio = sctx->bios[i];
426
427                 if (!sbio)
428                         break;
429                 kfree(sbio);
430         }
431
432         scrub_free_csums(sctx);
433         kfree(sctx);
434 }
435
436 static noinline_for_stack
437 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
438 {
439         struct scrub_ctx *sctx;
440         int             i;
441         struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
442         int pages_per_rd_bio;
443         int ret;
444
445         /*
446          * the setting of pages_per_rd_bio is correct for scrub but might
447          * be wrong for the dev_replace code where we might read from
448          * different devices in the initial huge bios. However, that
449          * code is able to correctly handle the case when adding a page
450          * to a bio fails.
451          */
452         if (dev->bdev)
453                 pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
454                                          bio_get_nr_vecs(dev->bdev));
455         else
456                 pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
457         sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
458         if (!sctx)
459                 goto nomem;
460         sctx->is_dev_replace = is_dev_replace;
461         sctx->pages_per_rd_bio = pages_per_rd_bio;
462         sctx->curr = -1;
463         sctx->dev_root = dev->dev_root;
464         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
465                 struct scrub_bio *sbio;
466
467                 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
468                 if (!sbio)
469                         goto nomem;
470                 sctx->bios[i] = sbio;
471
472                 sbio->index = i;
473                 sbio->sctx = sctx;
474                 sbio->page_count = 0;
475                 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
476                                 scrub_bio_end_io_worker, NULL, NULL);
477
478                 if (i != SCRUB_BIOS_PER_SCTX - 1)
479                         sctx->bios[i]->next_free = i + 1;
480                 else
481                         sctx->bios[i]->next_free = -1;
482         }
483         sctx->first_free = 0;
484         sctx->nodesize = dev->dev_root->nodesize;
485         sctx->sectorsize = dev->dev_root->sectorsize;
486         atomic_set(&sctx->bios_in_flight, 0);
487         atomic_set(&sctx->workers_pending, 0);
488         atomic_set(&sctx->cancel_req, 0);
489         sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
490         INIT_LIST_HEAD(&sctx->csum_list);
491
492         spin_lock_init(&sctx->list_lock);
493         spin_lock_init(&sctx->stat_lock);
494         init_waitqueue_head(&sctx->list_wait);
495
496         ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
497                                  fs_info->dev_replace.tgtdev, is_dev_replace);
498         if (ret) {
499                 scrub_free_ctx(sctx);
500                 return ERR_PTR(ret);
501         }
502         return sctx;
503
504 nomem:
505         scrub_free_ctx(sctx);
506         return ERR_PTR(-ENOMEM);
507 }
508
509 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
510                                      void *warn_ctx)
511 {
512         u64 isize;
513         u32 nlink;
514         int ret;
515         int i;
516         struct extent_buffer *eb;
517         struct btrfs_inode_item *inode_item;
518         struct scrub_warning *swarn = warn_ctx;
519         struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
520         struct inode_fs_paths *ipath = NULL;
521         struct btrfs_root *local_root;
522         struct btrfs_key root_key;
523
524         root_key.objectid = root;
525         root_key.type = BTRFS_ROOT_ITEM_KEY;
526         root_key.offset = (u64)-1;
527         local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
528         if (IS_ERR(local_root)) {
529                 ret = PTR_ERR(local_root);
530                 goto err;
531         }
532
533         ret = inode_item_info(inum, 0, local_root, swarn->path);
534         if (ret) {
535                 btrfs_release_path(swarn->path);
536                 goto err;
537         }
538
539         eb = swarn->path->nodes[0];
540         inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
541                                         struct btrfs_inode_item);
542         isize = btrfs_inode_size(eb, inode_item);
543         nlink = btrfs_inode_nlink(eb, inode_item);
544         btrfs_release_path(swarn->path);
545
546         ipath = init_ipath(4096, local_root, swarn->path);
547         if (IS_ERR(ipath)) {
548                 ret = PTR_ERR(ipath);
549                 ipath = NULL;
550                 goto err;
551         }
552         ret = paths_from_inode(inum, ipath);
553
554         if (ret < 0)
555                 goto err;
556
557         /*
558          * we deliberately ignore the bit ipath might have been too small to
559          * hold all of the paths here
560          */
561         for (i = 0; i < ipath->fspath->elem_cnt; ++i)
562                 printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
563                         "%s, sector %llu, root %llu, inode %llu, offset %llu, "
564                         "length %llu, links %u (path: %s)\n", swarn->errstr,
565                         swarn->logical, rcu_str_deref(swarn->dev->name),
566                         (unsigned long long)swarn->sector, root, inum, offset,
567                         min(isize - offset, (u64)PAGE_SIZE), nlink,
568                         (char *)(unsigned long)ipath->fspath->val[i]);
569
570         free_ipath(ipath);
571         return 0;
572
573 err:
574         printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
575                 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
576                 "resolving failed with ret=%d\n", swarn->errstr,
577                 swarn->logical, rcu_str_deref(swarn->dev->name),
578                 (unsigned long long)swarn->sector, root, inum, offset, ret);
579
580         free_ipath(ipath);
581         return 0;
582 }
583
584 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
585 {
586         struct btrfs_device *dev;
587         struct btrfs_fs_info *fs_info;
588         struct btrfs_path *path;
589         struct btrfs_key found_key;
590         struct extent_buffer *eb;
591         struct btrfs_extent_item *ei;
592         struct scrub_warning swarn;
593         unsigned long ptr = 0;
594         u64 extent_item_pos;
595         u64 flags = 0;
596         u64 ref_root;
597         u32 item_size;
598         u8 ref_level;
599         int ret;
600
601         WARN_ON(sblock->page_count < 1);
602         dev = sblock->pagev[0]->dev;
603         fs_info = sblock->sctx->dev_root->fs_info;
604
605         path = btrfs_alloc_path();
606         if (!path)
607                 return;
608
609         swarn.sector = (sblock->pagev[0]->physical) >> 9;
610         swarn.logical = sblock->pagev[0]->logical;
611         swarn.errstr = errstr;
612         swarn.dev = NULL;
613
614         ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
615                                   &flags);
616         if (ret < 0)
617                 goto out;
618
619         extent_item_pos = swarn.logical - found_key.objectid;
620         swarn.extent_item_size = found_key.offset;
621
622         eb = path->nodes[0];
623         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
624         item_size = btrfs_item_size_nr(eb, path->slots[0]);
625
626         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
627                 do {
628                         ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
629                                                       item_size, &ref_root,
630                                                       &ref_level);
631                         printk_in_rcu(KERN_WARNING
632                                 "BTRFS: %s at logical %llu on dev %s, "
633                                 "sector %llu: metadata %s (level %d) in tree "
634                                 "%llu\n", errstr, swarn.logical,
635                                 rcu_str_deref(dev->name),
636                                 (unsigned long long)swarn.sector,
637                                 ref_level ? "node" : "leaf",
638                                 ret < 0 ? -1 : ref_level,
639                                 ret < 0 ? -1 : ref_root);
640                 } while (ret != 1);
641                 btrfs_release_path(path);
642         } else {
643                 btrfs_release_path(path);
644                 swarn.path = path;
645                 swarn.dev = dev;
646                 iterate_extent_inodes(fs_info, found_key.objectid,
647                                         extent_item_pos, 1,
648                                         scrub_print_warning_inode, &swarn);
649         }
650
651 out:
652         btrfs_free_path(path);
653 }
654
655 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
656 {
657         struct page *page = NULL;
658         unsigned long index;
659         struct scrub_fixup_nodatasum *fixup = fixup_ctx;
660         int ret;
661         int corrected = 0;
662         struct btrfs_key key;
663         struct inode *inode = NULL;
664         struct btrfs_fs_info *fs_info;
665         u64 end = offset + PAGE_SIZE - 1;
666         struct btrfs_root *local_root;
667         int srcu_index;
668
669         key.objectid = root;
670         key.type = BTRFS_ROOT_ITEM_KEY;
671         key.offset = (u64)-1;
672
673         fs_info = fixup->root->fs_info;
674         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
675
676         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
677         if (IS_ERR(local_root)) {
678                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
679                 return PTR_ERR(local_root);
680         }
681
682         key.type = BTRFS_INODE_ITEM_KEY;
683         key.objectid = inum;
684         key.offset = 0;
685         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
686         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
687         if (IS_ERR(inode))
688                 return PTR_ERR(inode);
689
690         index = offset >> PAGE_CACHE_SHIFT;
691
692         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
693         if (!page) {
694                 ret = -ENOMEM;
695                 goto out;
696         }
697
698         if (PageUptodate(page)) {
699                 if (PageDirty(page)) {
700                         /*
701                          * we need to write the data to the defect sector. the
702                          * data that was in that sector is not in memory,
703                          * because the page was modified. we must not write the
704                          * modified page to that sector.
705                          *
706                          * TODO: what could be done here: wait for the delalloc
707                          *       runner to write out that page (might involve
708                          *       COW) and see whether the sector is still
709                          *       referenced afterwards.
710                          *
711                          * For the meantime, we'll treat this error
712                          * incorrectable, although there is a chance that a
713                          * later scrub will find the bad sector again and that
714                          * there's no dirty page in memory, then.
715                          */
716                         ret = -EIO;
717                         goto out;
718                 }
719                 ret = repair_io_failure(inode, offset, PAGE_SIZE,
720                                         fixup->logical, page,
721                                         offset - page_offset(page),
722                                         fixup->mirror_num);
723                 unlock_page(page);
724                 corrected = !ret;
725         } else {
726                 /*
727                  * we need to get good data first. the general readpage path
728                  * will call repair_io_failure for us, we just have to make
729                  * sure we read the bad mirror.
730                  */
731                 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
732                                         EXTENT_DAMAGED, GFP_NOFS);
733                 if (ret) {
734                         /* set_extent_bits should give proper error */
735                         WARN_ON(ret > 0);
736                         if (ret > 0)
737                                 ret = -EFAULT;
738                         goto out;
739                 }
740
741                 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
742                                                 btrfs_get_extent,
743                                                 fixup->mirror_num);
744                 wait_on_page_locked(page);
745
746                 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
747                                                 end, EXTENT_DAMAGED, 0, NULL);
748                 if (!corrected)
749                         clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
750                                                 EXTENT_DAMAGED, GFP_NOFS);
751         }
752
753 out:
754         if (page)
755                 put_page(page);
756
757         iput(inode);
758
759         if (ret < 0)
760                 return ret;
761
762         if (ret == 0 && corrected) {
763                 /*
764                  * we only need to call readpage for one of the inodes belonging
765                  * to this extent. so make iterate_extent_inodes stop
766                  */
767                 return 1;
768         }
769
770         return -EIO;
771 }
772
773 static void scrub_fixup_nodatasum(struct btrfs_work *work)
774 {
775         int ret;
776         struct scrub_fixup_nodatasum *fixup;
777         struct scrub_ctx *sctx;
778         struct btrfs_trans_handle *trans = NULL;
779         struct btrfs_path *path;
780         int uncorrectable = 0;
781
782         fixup = container_of(work, struct scrub_fixup_nodatasum, work);
783         sctx = fixup->sctx;
784
785         path = btrfs_alloc_path();
786         if (!path) {
787                 spin_lock(&sctx->stat_lock);
788                 ++sctx->stat.malloc_errors;
789                 spin_unlock(&sctx->stat_lock);
790                 uncorrectable = 1;
791                 goto out;
792         }
793
794         trans = btrfs_join_transaction(fixup->root);
795         if (IS_ERR(trans)) {
796                 uncorrectable = 1;
797                 goto out;
798         }
799
800         /*
801          * the idea is to trigger a regular read through the standard path. we
802          * read a page from the (failed) logical address by specifying the
803          * corresponding copynum of the failed sector. thus, that readpage is
804          * expected to fail.
805          * that is the point where on-the-fly error correction will kick in
806          * (once it's finished) and rewrite the failed sector if a good copy
807          * can be found.
808          */
809         ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
810                                                 path, scrub_fixup_readpage,
811                                                 fixup);
812         if (ret < 0) {
813                 uncorrectable = 1;
814                 goto out;
815         }
816         WARN_ON(ret != 1);
817
818         spin_lock(&sctx->stat_lock);
819         ++sctx->stat.corrected_errors;
820         spin_unlock(&sctx->stat_lock);
821
822 out:
823         if (trans && !IS_ERR(trans))
824                 btrfs_end_transaction(trans, fixup->root);
825         if (uncorrectable) {
826                 spin_lock(&sctx->stat_lock);
827                 ++sctx->stat.uncorrectable_errors;
828                 spin_unlock(&sctx->stat_lock);
829                 btrfs_dev_replace_stats_inc(
830                         &sctx->dev_root->fs_info->dev_replace.
831                         num_uncorrectable_read_errors);
832                 printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
833                     "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
834                         fixup->logical, rcu_str_deref(fixup->dev->name));
835         }
836
837         btrfs_free_path(path);
838         kfree(fixup);
839
840         scrub_pending_trans_workers_dec(sctx);
841 }
842
843 static inline void scrub_get_recover(struct scrub_recover *recover)
844 {
845         atomic_inc(&recover->refs);
846 }
847
848 static inline void scrub_put_recover(struct scrub_recover *recover)
849 {
850         if (atomic_dec_and_test(&recover->refs)) {
851                 kfree(recover->bbio);
852                 kfree(recover->raid_map);
853                 kfree(recover);
854         }
855 }
856
857 /*
858  * scrub_handle_errored_block gets called when either verification of the
859  * pages failed or the bio failed to read, e.g. with EIO. In the latter
860  * case, this function handles all pages in the bio, even though only one
861  * may be bad.
862  * The goal of this function is to repair the errored block by using the
863  * contents of one of the mirrors.
864  */
865 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
866 {
867         struct scrub_ctx *sctx = sblock_to_check->sctx;
868         struct btrfs_device *dev;
869         struct btrfs_fs_info *fs_info;
870         u64 length;
871         u64 logical;
872         u64 generation;
873         unsigned int failed_mirror_index;
874         unsigned int is_metadata;
875         unsigned int have_csum;
876         u8 *csum;
877         struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
878         struct scrub_block *sblock_bad;
879         int ret;
880         int mirror_index;
881         int page_num;
882         int success;
883         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
884                                       DEFAULT_RATELIMIT_BURST);
885
886         BUG_ON(sblock_to_check->page_count < 1);
887         fs_info = sctx->dev_root->fs_info;
888         if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
889                 /*
890                  * if we find an error in a super block, we just report it.
891                  * They will get written with the next transaction commit
892                  * anyway
893                  */
894                 spin_lock(&sctx->stat_lock);
895                 ++sctx->stat.super_errors;
896                 spin_unlock(&sctx->stat_lock);
897                 return 0;
898         }
899         length = sblock_to_check->page_count * PAGE_SIZE;
900         logical = sblock_to_check->pagev[0]->logical;
901         generation = sblock_to_check->pagev[0]->generation;
902         BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
903         failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
904         is_metadata = !(sblock_to_check->pagev[0]->flags &
905                         BTRFS_EXTENT_FLAG_DATA);
906         have_csum = sblock_to_check->pagev[0]->have_csum;
907         csum = sblock_to_check->pagev[0]->csum;
908         dev = sblock_to_check->pagev[0]->dev;
909
910         if (sctx->is_dev_replace && !is_metadata && !have_csum) {
911                 sblocks_for_recheck = NULL;
912                 goto nodatasum_case;
913         }
914
915         /*
916          * read all mirrors one after the other. This includes to
917          * re-read the extent or metadata block that failed (that was
918          * the cause that this fixup code is called) another time,
919          * page by page this time in order to know which pages
920          * caused I/O errors and which ones are good (for all mirrors).
921          * It is the goal to handle the situation when more than one
922          * mirror contains I/O errors, but the errors do not
923          * overlap, i.e. the data can be repaired by selecting the
924          * pages from those mirrors without I/O error on the
925          * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
926          * would be that mirror #1 has an I/O error on the first page,
927          * the second page is good, and mirror #2 has an I/O error on
928          * the second page, but the first page is good.
929          * Then the first page of the first mirror can be repaired by
930          * taking the first page of the second mirror, and the
931          * second page of the second mirror can be repaired by
932          * copying the contents of the 2nd page of the 1st mirror.
933          * One more note: if the pages of one mirror contain I/O
934          * errors, the checksum cannot be verified. In order to get
935          * the best data for repairing, the first attempt is to find
936          * a mirror without I/O errors and with a validated checksum.
937          * Only if this is not possible, the pages are picked from
938          * mirrors with I/O errors without considering the checksum.
939          * If the latter is the case, at the end, the checksum of the
940          * repaired area is verified in order to correctly maintain
941          * the statistics.
942          */
943
944         sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
945                                      sizeof(*sblocks_for_recheck),
946                                      GFP_NOFS);
947         if (!sblocks_for_recheck) {
948                 spin_lock(&sctx->stat_lock);
949                 sctx->stat.malloc_errors++;
950                 sctx->stat.read_errors++;
951                 sctx->stat.uncorrectable_errors++;
952                 spin_unlock(&sctx->stat_lock);
953                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
954                 goto out;
955         }
956
957         /* setup the context, map the logical blocks and alloc the pages */
958         ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length,
959                                         logical, sblocks_for_recheck);
960         if (ret) {
961                 spin_lock(&sctx->stat_lock);
962                 sctx->stat.read_errors++;
963                 sctx->stat.uncorrectable_errors++;
964                 spin_unlock(&sctx->stat_lock);
965                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
966                 goto out;
967         }
968         BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
969         sblock_bad = sblocks_for_recheck + failed_mirror_index;
970
971         /* build and submit the bios for the failed mirror, check checksums */
972         scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
973                             csum, generation, sctx->csum_size, 1);
974
975         if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
976             sblock_bad->no_io_error_seen) {
977                 /*
978                  * the error disappeared after reading page by page, or
979                  * the area was part of a huge bio and other parts of the
980                  * bio caused I/O errors, or the block layer merged several
981                  * read requests into one and the error is caused by a
982                  * different bio (usually one of the two latter cases is
983                  * the cause)
984                  */
985                 spin_lock(&sctx->stat_lock);
986                 sctx->stat.unverified_errors++;
987                 sblock_to_check->data_corrected = 1;
988                 spin_unlock(&sctx->stat_lock);
989
990                 if (sctx->is_dev_replace)
991                         scrub_write_block_to_dev_replace(sblock_bad);
992                 goto out;
993         }
994
995         if (!sblock_bad->no_io_error_seen) {
996                 spin_lock(&sctx->stat_lock);
997                 sctx->stat.read_errors++;
998                 spin_unlock(&sctx->stat_lock);
999                 if (__ratelimit(&_rs))
1000                         scrub_print_warning("i/o error", sblock_to_check);
1001                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1002         } else if (sblock_bad->checksum_error) {
1003                 spin_lock(&sctx->stat_lock);
1004                 sctx->stat.csum_errors++;
1005                 spin_unlock(&sctx->stat_lock);
1006                 if (__ratelimit(&_rs))
1007                         scrub_print_warning("checksum error", sblock_to_check);
1008                 btrfs_dev_stat_inc_and_print(dev,
1009                                              BTRFS_DEV_STAT_CORRUPTION_ERRS);
1010         } else if (sblock_bad->header_error) {
1011                 spin_lock(&sctx->stat_lock);
1012                 sctx->stat.verify_errors++;
1013                 spin_unlock(&sctx->stat_lock);
1014                 if (__ratelimit(&_rs))
1015                         scrub_print_warning("checksum/header error",
1016                                             sblock_to_check);
1017                 if (sblock_bad->generation_error)
1018                         btrfs_dev_stat_inc_and_print(dev,
1019                                 BTRFS_DEV_STAT_GENERATION_ERRS);
1020                 else
1021                         btrfs_dev_stat_inc_and_print(dev,
1022                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1023         }
1024
1025         if (sctx->readonly) {
1026                 ASSERT(!sctx->is_dev_replace);
1027                 goto out;
1028         }
1029
1030         if (!is_metadata && !have_csum) {
1031                 struct scrub_fixup_nodatasum *fixup_nodatasum;
1032
1033 nodatasum_case:
1034                 WARN_ON(sctx->is_dev_replace);
1035
1036                 /*
1037                  * !is_metadata and !have_csum, this means that the data
1038                  * might not be COW'ed, that it might be modified
1039                  * concurrently. The general strategy to work on the
1040                  * commit root does not help in the case when COW is not
1041                  * used.
1042                  */
1043                 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1044                 if (!fixup_nodatasum)
1045                         goto did_not_correct_error;
1046                 fixup_nodatasum->sctx = sctx;
1047                 fixup_nodatasum->dev = dev;
1048                 fixup_nodatasum->logical = logical;
1049                 fixup_nodatasum->root = fs_info->extent_root;
1050                 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1051                 scrub_pending_trans_workers_inc(sctx);
1052                 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1053                                 scrub_fixup_nodatasum, NULL, NULL);
1054                 btrfs_queue_work(fs_info->scrub_workers,
1055                                  &fixup_nodatasum->work);
1056                 goto out;
1057         }
1058
1059         /*
1060          * now build and submit the bios for the other mirrors, check
1061          * checksums.
1062          * First try to pick the mirror which is completely without I/O
1063          * errors and also does not have a checksum error.
1064          * If one is found, and if a checksum is present, the full block
1065          * that is known to contain an error is rewritten. Afterwards
1066          * the block is known to be corrected.
1067          * If a mirror is found which is completely correct, and no
1068          * checksum is present, only those pages are rewritten that had
1069          * an I/O error in the block to be repaired, since it cannot be
1070          * determined, which copy of the other pages is better (and it
1071          * could happen otherwise that a correct page would be
1072          * overwritten by a bad one).
1073          */
1074         for (mirror_index = 0;
1075              mirror_index < BTRFS_MAX_MIRRORS &&
1076              sblocks_for_recheck[mirror_index].page_count > 0;
1077              mirror_index++) {
1078                 struct scrub_block *sblock_other;
1079
1080                 if (mirror_index == failed_mirror_index)
1081                         continue;
1082                 sblock_other = sblocks_for_recheck + mirror_index;
1083
1084                 /* build and submit the bios, check checksums */
1085                 scrub_recheck_block(fs_info, sblock_other, is_metadata,
1086                                     have_csum, csum, generation,
1087                                     sctx->csum_size, 0);
1088
1089                 if (!sblock_other->header_error &&
1090                     !sblock_other->checksum_error &&
1091                     sblock_other->no_io_error_seen) {
1092                         if (sctx->is_dev_replace) {
1093                                 scrub_write_block_to_dev_replace(sblock_other);
1094                         } else {
1095                                 int force_write = is_metadata || have_csum;
1096
1097                                 ret = scrub_repair_block_from_good_copy(
1098                                                 sblock_bad, sblock_other,
1099                                                 force_write);
1100                         }
1101                         if (0 == ret)
1102                                 goto corrected_error;
1103                 }
1104         }
1105
1106         /*
1107          * for dev_replace, pick good pages and write to the target device.
1108          */
1109         if (sctx->is_dev_replace) {
1110                 success = 1;
1111                 for (page_num = 0; page_num < sblock_bad->page_count;
1112                      page_num++) {
1113                         int sub_success;
1114
1115                         sub_success = 0;
1116                         for (mirror_index = 0;
1117                              mirror_index < BTRFS_MAX_MIRRORS &&
1118                              sblocks_for_recheck[mirror_index].page_count > 0;
1119                              mirror_index++) {
1120                                 struct scrub_block *sblock_other =
1121                                         sblocks_for_recheck + mirror_index;
1122                                 struct scrub_page *page_other =
1123                                         sblock_other->pagev[page_num];
1124
1125                                 if (!page_other->io_error) {
1126                                         ret = scrub_write_page_to_dev_replace(
1127                                                         sblock_other, page_num);
1128                                         if (ret == 0) {
1129                                                 /* succeeded for this page */
1130                                                 sub_success = 1;
1131                                                 break;
1132                                         } else {
1133                                                 btrfs_dev_replace_stats_inc(
1134                                                         &sctx->dev_root->
1135                                                         fs_info->dev_replace.
1136                                                         num_write_errors);
1137                                         }
1138                                 }
1139                         }
1140
1141                         if (!sub_success) {
1142                                 /*
1143                                  * did not find a mirror to fetch the page
1144                                  * from. scrub_write_page_to_dev_replace()
1145                                  * handles this case (page->io_error), by
1146                                  * filling the block with zeros before
1147                                  * submitting the write request
1148                                  */
1149                                 success = 0;
1150                                 ret = scrub_write_page_to_dev_replace(
1151                                                 sblock_bad, page_num);
1152                                 if (ret)
1153                                         btrfs_dev_replace_stats_inc(
1154                                                 &sctx->dev_root->fs_info->
1155                                                 dev_replace.num_write_errors);
1156                         }
1157                 }
1158
1159                 goto out;
1160         }
1161
1162         /*
1163          * for regular scrub, repair those pages that are errored.
1164          * In case of I/O errors in the area that is supposed to be
1165          * repaired, continue by picking good copies of those pages.
1166          * Select the good pages from mirrors to rewrite bad pages from
1167          * the area to fix. Afterwards verify the checksum of the block
1168          * that is supposed to be repaired. This verification step is
1169          * only done for the purpose of statistic counting and for the
1170          * final scrub report, whether errors remain.
1171          * A perfect algorithm could make use of the checksum and try
1172          * all possible combinations of pages from the different mirrors
1173          * until the checksum verification succeeds. For example, when
1174          * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1175          * of mirror #2 is readable but the final checksum test fails,
1176          * then the 2nd page of mirror #3 could be tried, whether now
1177          * the final checksum succeedes. But this would be a rare
1178          * exception and is therefore not implemented. At least it is
1179          * avoided that the good copy is overwritten.
1180          * A more useful improvement would be to pick the sectors
1181          * without I/O error based on sector sizes (512 bytes on legacy
1182          * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1183          * mirror could be repaired by taking 512 byte of a different
1184          * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1185          * area are unreadable.
1186          */
1187
1188         /* can only fix I/O errors from here on */
1189         if (sblock_bad->no_io_error_seen)
1190                 goto did_not_correct_error;
1191
1192         success = 1;
1193         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1194                 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1195
1196                 if (!page_bad->io_error)
1197                         continue;
1198
1199                 for (mirror_index = 0;
1200                      mirror_index < BTRFS_MAX_MIRRORS &&
1201                      sblocks_for_recheck[mirror_index].page_count > 0;
1202                      mirror_index++) {
1203                         struct scrub_block *sblock_other = sblocks_for_recheck +
1204                                                            mirror_index;
1205                         struct scrub_page *page_other = sblock_other->pagev[
1206                                                         page_num];
1207
1208                         if (!page_other->io_error) {
1209                                 ret = scrub_repair_page_from_good_copy(
1210                                         sblock_bad, sblock_other, page_num, 0);
1211                                 if (0 == ret) {
1212                                         page_bad->io_error = 0;
1213                                         break; /* succeeded for this page */
1214                                 }
1215                         }
1216                 }
1217
1218                 if (page_bad->io_error) {
1219                         /* did not find a mirror to copy the page from */
1220                         success = 0;
1221                 }
1222         }
1223
1224         if (success) {
1225                 if (is_metadata || have_csum) {
1226                         /*
1227                          * need to verify the checksum now that all
1228                          * sectors on disk are repaired (the write
1229                          * request for data to be repaired is on its way).
1230                          * Just be lazy and use scrub_recheck_block()
1231                          * which re-reads the data before the checksum
1232                          * is verified, but most likely the data comes out
1233                          * of the page cache.
1234                          */
1235                         scrub_recheck_block(fs_info, sblock_bad,
1236                                             is_metadata, have_csum, csum,
1237                                             generation, sctx->csum_size, 1);
1238                         if (!sblock_bad->header_error &&
1239                             !sblock_bad->checksum_error &&
1240                             sblock_bad->no_io_error_seen)
1241                                 goto corrected_error;
1242                         else
1243                                 goto did_not_correct_error;
1244                 } else {
1245 corrected_error:
1246                         spin_lock(&sctx->stat_lock);
1247                         sctx->stat.corrected_errors++;
1248                         sblock_to_check->data_corrected = 1;
1249                         spin_unlock(&sctx->stat_lock);
1250                         printk_ratelimited_in_rcu(KERN_ERR
1251                                 "BTRFS: fixed up error at logical %llu on dev %s\n",
1252                                 logical, rcu_str_deref(dev->name));
1253                 }
1254         } else {
1255 did_not_correct_error:
1256                 spin_lock(&sctx->stat_lock);
1257                 sctx->stat.uncorrectable_errors++;
1258                 spin_unlock(&sctx->stat_lock);
1259                 printk_ratelimited_in_rcu(KERN_ERR
1260                         "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
1261                         logical, rcu_str_deref(dev->name));
1262         }
1263
1264 out:
1265         if (sblocks_for_recheck) {
1266                 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1267                      mirror_index++) {
1268                         struct scrub_block *sblock = sblocks_for_recheck +
1269                                                      mirror_index;
1270                         struct scrub_recover *recover;
1271                         int page_index;
1272
1273                         for (page_index = 0; page_index < sblock->page_count;
1274                              page_index++) {
1275                                 sblock->pagev[page_index]->sblock = NULL;
1276                                 recover = sblock->pagev[page_index]->recover;
1277                                 if (recover) {
1278                                         scrub_put_recover(recover);
1279                                         sblock->pagev[page_index]->recover =
1280                                                                         NULL;
1281                                 }
1282                                 scrub_page_put(sblock->pagev[page_index]);
1283                         }
1284                 }
1285                 kfree(sblocks_for_recheck);
1286         }
1287
1288         return 0;
1289 }
1290
1291 static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map)
1292 {
1293         if (raid_map) {
1294                 if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
1295                         return 3;
1296                 else
1297                         return 2;
1298         } else {
1299                 return (int)bbio->num_stripes;
1300         }
1301 }
1302
1303 static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
1304                                                  u64 mapped_length,
1305                                                  int nstripes, int mirror,
1306                                                  int *stripe_index,
1307                                                  u64 *stripe_offset)
1308 {
1309         int i;
1310
1311         if (raid_map) {
1312                 /* RAID5/6 */
1313                 for (i = 0; i < nstripes; i++) {
1314                         if (raid_map[i] == RAID6_Q_STRIPE ||
1315                             raid_map[i] == RAID5_P_STRIPE)
1316                                 continue;
1317
1318                         if (logical >= raid_map[i] &&
1319                             logical < raid_map[i] + mapped_length)
1320                                 break;
1321                 }
1322
1323                 *stripe_index = i;
1324                 *stripe_offset = logical - raid_map[i];
1325         } else {
1326                 /* The other RAID type */
1327                 *stripe_index = mirror;
1328                 *stripe_offset = 0;
1329         }
1330 }
1331
1332 static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
1333                                      struct btrfs_fs_info *fs_info,
1334                                      struct scrub_block *original_sblock,
1335                                      u64 length, u64 logical,
1336                                      struct scrub_block *sblocks_for_recheck)
1337 {
1338         struct scrub_recover *recover;
1339         struct btrfs_bio *bbio;
1340         u64 *raid_map;
1341         u64 sublen;
1342         u64 mapped_length;
1343         u64 stripe_offset;
1344         int stripe_index;
1345         int page_index;
1346         int mirror_index;
1347         int nmirrors;
1348         int ret;
1349
1350         /*
1351          * note: the two members ref_count and outstanding_pages
1352          * are not used (and not set) in the blocks that are used for
1353          * the recheck procedure
1354          */
1355
1356         page_index = 0;
1357         while (length > 0) {
1358                 sublen = min_t(u64, length, PAGE_SIZE);
1359                 mapped_length = sublen;
1360                 bbio = NULL;
1361                 raid_map = NULL;
1362
1363                 /*
1364                  * with a length of PAGE_SIZE, each returned stripe
1365                  * represents one mirror
1366                  */
1367                 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
1368                                        &mapped_length, &bbio, 0, &raid_map);
1369                 if (ret || !bbio || mapped_length < sublen) {
1370                         kfree(bbio);
1371                         kfree(raid_map);
1372                         return -EIO;
1373                 }
1374
1375                 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1376                 if (!recover) {
1377                         kfree(bbio);
1378                         kfree(raid_map);
1379                         return -ENOMEM;
1380                 }
1381
1382                 atomic_set(&recover->refs, 1);
1383                 recover->bbio = bbio;
1384                 recover->raid_map = raid_map;
1385                 recover->map_length = mapped_length;
1386
1387                 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
1388
1389                 nmirrors = scrub_nr_raid_mirrors(bbio, raid_map);
1390                 for (mirror_index = 0; mirror_index < nmirrors;
1391                      mirror_index++) {
1392                         struct scrub_block *sblock;
1393                         struct scrub_page *page;
1394
1395                         if (mirror_index >= BTRFS_MAX_MIRRORS)
1396                                 continue;
1397
1398                         sblock = sblocks_for_recheck + mirror_index;
1399                         sblock->sctx = sctx;
1400                         page = kzalloc(sizeof(*page), GFP_NOFS);
1401                         if (!page) {
1402 leave_nomem:
1403                                 spin_lock(&sctx->stat_lock);
1404                                 sctx->stat.malloc_errors++;
1405                                 spin_unlock(&sctx->stat_lock);
1406                                 scrub_put_recover(recover);
1407                                 return -ENOMEM;
1408                         }
1409                         scrub_page_get(page);
1410                         sblock->pagev[page_index] = page;
1411                         page->logical = logical;
1412
1413                         scrub_stripe_index_and_offset(logical, raid_map,
1414                                                       mapped_length,
1415                                                       bbio->num_stripes,
1416                                                       mirror_index,
1417                                                       &stripe_index,
1418                                                       &stripe_offset);
1419                         page->physical = bbio->stripes[stripe_index].physical +
1420                                          stripe_offset;
1421                         page->dev = bbio->stripes[stripe_index].dev;
1422
1423                         BUG_ON(page_index >= original_sblock->page_count);
1424                         page->physical_for_dev_replace =
1425                                 original_sblock->pagev[page_index]->
1426                                 physical_for_dev_replace;
1427                         /* for missing devices, dev->bdev is NULL */
1428                         page->mirror_num = mirror_index + 1;
1429                         sblock->page_count++;
1430                         page->page = alloc_page(GFP_NOFS);
1431                         if (!page->page)
1432                                 goto leave_nomem;
1433
1434                         scrub_get_recover(recover);
1435                         page->recover = recover;
1436                 }
1437                 scrub_put_recover(recover);
1438                 length -= sublen;
1439                 logical += sublen;
1440                 page_index++;
1441         }
1442
1443         return 0;
1444 }
1445
1446 struct scrub_bio_ret {
1447         struct completion event;
1448         int error;
1449 };
1450
1451 static void scrub_bio_wait_endio(struct bio *bio, int error)
1452 {
1453         struct scrub_bio_ret *ret = bio->bi_private;
1454
1455         ret->error = error;
1456         complete(&ret->event);
1457 }
1458
1459 static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1460 {
1461         return page->recover && page->recover->raid_map;
1462 }
1463
1464 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1465                                         struct bio *bio,
1466                                         struct scrub_page *page)
1467 {
1468         struct scrub_bio_ret done;
1469         int ret;
1470
1471         init_completion(&done.event);
1472         done.error = 0;
1473         bio->bi_iter.bi_sector = page->logical >> 9;
1474         bio->bi_private = &done;
1475         bio->bi_end_io = scrub_bio_wait_endio;
1476
1477         ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
1478                                     page->recover->raid_map,
1479                                     page->recover->map_length,
1480                                     page->mirror_num, 0);
1481         if (ret)
1482                 return ret;
1483
1484         wait_for_completion(&done.event);
1485         if (done.error)
1486                 return -EIO;
1487
1488         return 0;
1489 }
1490
1491 /*
1492  * this function will check the on disk data for checksum errors, header
1493  * errors and read I/O errors. If any I/O errors happen, the exact pages
1494  * which are errored are marked as being bad. The goal is to enable scrub
1495  * to take those pages that are not errored from all the mirrors so that
1496  * the pages that are errored in the just handled mirror can be repaired.
1497  */
1498 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1499                                 struct scrub_block *sblock, int is_metadata,
1500                                 int have_csum, u8 *csum, u64 generation,
1501                                 u16 csum_size, int retry_failed_mirror)
1502 {
1503         int page_num;
1504
1505         sblock->no_io_error_seen = 1;
1506         sblock->header_error = 0;
1507         sblock->checksum_error = 0;
1508
1509         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1510                 struct bio *bio;
1511                 struct scrub_page *page = sblock->pagev[page_num];
1512
1513                 if (page->dev->bdev == NULL) {
1514                         page->io_error = 1;
1515                         sblock->no_io_error_seen = 0;
1516                         continue;
1517                 }
1518
1519                 WARN_ON(!page->page);
1520                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1521                 if (!bio) {
1522                         page->io_error = 1;
1523                         sblock->no_io_error_seen = 0;
1524                         continue;
1525                 }
1526                 bio->bi_bdev = page->dev->bdev;
1527
1528                 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1529                 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1530                         if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1531                                 sblock->no_io_error_seen = 0;
1532                 } else {
1533                         bio->bi_iter.bi_sector = page->physical >> 9;
1534
1535                         if (btrfsic_submit_bio_wait(READ, bio))
1536                                 sblock->no_io_error_seen = 0;
1537                 }
1538
1539                 bio_put(bio);
1540         }
1541
1542         if (sblock->no_io_error_seen)
1543                 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1544                                              have_csum, csum, generation,
1545                                              csum_size);
1546
1547         return;
1548 }
1549
1550 static inline int scrub_check_fsid(u8 fsid[],
1551                                    struct scrub_page *spage)
1552 {
1553         struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1554         int ret;
1555
1556         ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1557         return !ret;
1558 }
1559
1560 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1561                                          struct scrub_block *sblock,
1562                                          int is_metadata, int have_csum,
1563                                          const u8 *csum, u64 generation,
1564                                          u16 csum_size)
1565 {
1566         int page_num;
1567         u8 calculated_csum[BTRFS_CSUM_SIZE];
1568         u32 crc = ~(u32)0;
1569         void *mapped_buffer;
1570
1571         WARN_ON(!sblock->pagev[0]->page);
1572         if (is_metadata) {
1573                 struct btrfs_header *h;
1574
1575                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1576                 h = (struct btrfs_header *)mapped_buffer;
1577
1578                 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
1579                     !scrub_check_fsid(h->fsid, sblock->pagev[0]) ||
1580                     memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1581                            BTRFS_UUID_SIZE)) {
1582                         sblock->header_error = 1;
1583                 } else if (generation != btrfs_stack_header_generation(h)) {
1584                         sblock->header_error = 1;
1585                         sblock->generation_error = 1;
1586                 }
1587                 csum = h->csum;
1588         } else {
1589                 if (!have_csum)
1590                         return;
1591
1592                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1593         }
1594
1595         for (page_num = 0;;) {
1596                 if (page_num == 0 && is_metadata)
1597                         crc = btrfs_csum_data(
1598                                 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1599                                 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1600                 else
1601                         crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
1602
1603                 kunmap_atomic(mapped_buffer);
1604                 page_num++;
1605                 if (page_num >= sblock->page_count)
1606                         break;
1607                 WARN_ON(!sblock->pagev[page_num]->page);
1608
1609                 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
1610         }
1611
1612         btrfs_csum_final(crc, calculated_csum);
1613         if (memcmp(calculated_csum, csum, csum_size))
1614                 sblock->checksum_error = 1;
1615 }
1616
1617 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1618                                              struct scrub_block *sblock_good,
1619                                              int force_write)
1620 {
1621         int page_num;
1622         int ret = 0;
1623
1624         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1625                 int ret_sub;
1626
1627                 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1628                                                            sblock_good,
1629                                                            page_num,
1630                                                            force_write);
1631                 if (ret_sub)
1632                         ret = ret_sub;
1633         }
1634
1635         return ret;
1636 }
1637
1638 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1639                                             struct scrub_block *sblock_good,
1640                                             int page_num, int force_write)
1641 {
1642         struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1643         struct scrub_page *page_good = sblock_good->pagev[page_num];
1644
1645         BUG_ON(page_bad->page == NULL);
1646         BUG_ON(page_good->page == NULL);
1647         if (force_write || sblock_bad->header_error ||
1648             sblock_bad->checksum_error || page_bad->io_error) {
1649                 struct bio *bio;
1650                 int ret;
1651
1652                 if (!page_bad->dev->bdev) {
1653                         printk_ratelimited(KERN_WARNING "BTRFS: "
1654                                 "scrub_repair_page_from_good_copy(bdev == NULL) "
1655                                 "is unexpected!\n");
1656                         return -EIO;
1657                 }
1658
1659                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1660                 if (!bio)
1661                         return -EIO;
1662                 bio->bi_bdev = page_bad->dev->bdev;
1663                 bio->bi_iter.bi_sector = page_bad->physical >> 9;
1664
1665                 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1666                 if (PAGE_SIZE != ret) {
1667                         bio_put(bio);
1668                         return -EIO;
1669                 }
1670
1671                 if (btrfsic_submit_bio_wait(WRITE, bio)) {
1672                         btrfs_dev_stat_inc_and_print(page_bad->dev,
1673                                 BTRFS_DEV_STAT_WRITE_ERRS);
1674                         btrfs_dev_replace_stats_inc(
1675                                 &sblock_bad->sctx->dev_root->fs_info->
1676                                 dev_replace.num_write_errors);
1677                         bio_put(bio);
1678                         return -EIO;
1679                 }
1680                 bio_put(bio);
1681         }
1682
1683         return 0;
1684 }
1685
1686 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1687 {
1688         int page_num;
1689
1690         /*
1691          * This block is used for the check of the parity on the source device,
1692          * so the data needn't be written into the destination device.
1693          */
1694         if (sblock->sparity)
1695                 return;
1696
1697         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1698                 int ret;
1699
1700                 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1701                 if (ret)
1702                         btrfs_dev_replace_stats_inc(
1703                                 &sblock->sctx->dev_root->fs_info->dev_replace.
1704                                 num_write_errors);
1705         }
1706 }
1707
1708 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1709                                            int page_num)
1710 {
1711         struct scrub_page *spage = sblock->pagev[page_num];
1712
1713         BUG_ON(spage->page == NULL);
1714         if (spage->io_error) {
1715                 void *mapped_buffer = kmap_atomic(spage->page);
1716
1717                 memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
1718                 flush_dcache_page(spage->page);
1719                 kunmap_atomic(mapped_buffer);
1720         }
1721         return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1722 }
1723
1724 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1725                                     struct scrub_page *spage)
1726 {
1727         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1728         struct scrub_bio *sbio;
1729         int ret;
1730
1731         mutex_lock(&wr_ctx->wr_lock);
1732 again:
1733         if (!wr_ctx->wr_curr_bio) {
1734                 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1735                                               GFP_NOFS);
1736                 if (!wr_ctx->wr_curr_bio) {
1737                         mutex_unlock(&wr_ctx->wr_lock);
1738                         return -ENOMEM;
1739                 }
1740                 wr_ctx->wr_curr_bio->sctx = sctx;
1741                 wr_ctx->wr_curr_bio->page_count = 0;
1742         }
1743         sbio = wr_ctx->wr_curr_bio;
1744         if (sbio->page_count == 0) {
1745                 struct bio *bio;
1746
1747                 sbio->physical = spage->physical_for_dev_replace;
1748                 sbio->logical = spage->logical;
1749                 sbio->dev = wr_ctx->tgtdev;
1750                 bio = sbio->bio;
1751                 if (!bio) {
1752                         bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
1753                         if (!bio) {
1754                                 mutex_unlock(&wr_ctx->wr_lock);
1755                                 return -ENOMEM;
1756                         }
1757                         sbio->bio = bio;
1758                 }
1759
1760                 bio->bi_private = sbio;
1761                 bio->bi_end_io = scrub_wr_bio_end_io;
1762                 bio->bi_bdev = sbio->dev->bdev;
1763                 bio->bi_iter.bi_sector = sbio->physical >> 9;
1764                 sbio->err = 0;
1765         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1766                    spage->physical_for_dev_replace ||
1767                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1768                    spage->logical) {
1769                 scrub_wr_submit(sctx);
1770                 goto again;
1771         }
1772
1773         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1774         if (ret != PAGE_SIZE) {
1775                 if (sbio->page_count < 1) {
1776                         bio_put(sbio->bio);
1777                         sbio->bio = NULL;
1778                         mutex_unlock(&wr_ctx->wr_lock);
1779                         return -EIO;
1780                 }
1781                 scrub_wr_submit(sctx);
1782                 goto again;
1783         }
1784
1785         sbio->pagev[sbio->page_count] = spage;
1786         scrub_page_get(spage);
1787         sbio->page_count++;
1788         if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1789                 scrub_wr_submit(sctx);
1790         mutex_unlock(&wr_ctx->wr_lock);
1791
1792         return 0;
1793 }
1794
1795 static void scrub_wr_submit(struct scrub_ctx *sctx)
1796 {
1797         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1798         struct scrub_bio *sbio;
1799
1800         if (!wr_ctx->wr_curr_bio)
1801                 return;
1802
1803         sbio = wr_ctx->wr_curr_bio;
1804         wr_ctx->wr_curr_bio = NULL;
1805         WARN_ON(!sbio->bio->bi_bdev);
1806         scrub_pending_bio_inc(sctx);
1807         /* process all writes in a single worker thread. Then the block layer
1808          * orders the requests before sending them to the driver which
1809          * doubled the write performance on spinning disks when measured
1810          * with Linux 3.5 */
1811         btrfsic_submit_bio(WRITE, sbio->bio);
1812 }
1813
1814 static void scrub_wr_bio_end_io(struct bio *bio, int err)
1815 {
1816         struct scrub_bio *sbio = bio->bi_private;
1817         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1818
1819         sbio->err = err;
1820         sbio->bio = bio;
1821
1822         btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1823                          scrub_wr_bio_end_io_worker, NULL, NULL);
1824         btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1825 }
1826
1827 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1828 {
1829         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1830         struct scrub_ctx *sctx = sbio->sctx;
1831         int i;
1832
1833         WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1834         if (sbio->err) {
1835                 struct btrfs_dev_replace *dev_replace =
1836                         &sbio->sctx->dev_root->fs_info->dev_replace;
1837
1838                 for (i = 0; i < sbio->page_count; i++) {
1839                         struct scrub_page *spage = sbio->pagev[i];
1840
1841                         spage->io_error = 1;
1842                         btrfs_dev_replace_stats_inc(&dev_replace->
1843                                                     num_write_errors);
1844                 }
1845         }
1846
1847         for (i = 0; i < sbio->page_count; i++)
1848                 scrub_page_put(sbio->pagev[i]);
1849
1850         bio_put(sbio->bio);
1851         kfree(sbio);
1852         scrub_pending_bio_dec(sctx);
1853 }
1854
1855 static int scrub_checksum(struct scrub_block *sblock)
1856 {
1857         u64 flags;
1858         int ret;
1859
1860         WARN_ON(sblock->page_count < 1);
1861         flags = sblock->pagev[0]->flags;
1862         ret = 0;
1863         if (flags & BTRFS_EXTENT_FLAG_DATA)
1864                 ret = scrub_checksum_data(sblock);
1865         else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1866                 ret = scrub_checksum_tree_block(sblock);
1867         else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1868                 (void)scrub_checksum_super(sblock);
1869         else
1870                 WARN_ON(1);
1871         if (ret)
1872                 scrub_handle_errored_block(sblock);
1873
1874         return ret;
1875 }
1876
1877 static int scrub_checksum_data(struct scrub_block *sblock)
1878 {
1879         struct scrub_ctx *sctx = sblock->sctx;
1880         u8 csum[BTRFS_CSUM_SIZE];
1881         u8 *on_disk_csum;
1882         struct page *page;
1883         void *buffer;
1884         u32 crc = ~(u32)0;
1885         int fail = 0;
1886         u64 len;
1887         int index;
1888
1889         BUG_ON(sblock->page_count < 1);
1890         if (!sblock->pagev[0]->have_csum)
1891                 return 0;
1892
1893         on_disk_csum = sblock->pagev[0]->csum;
1894         page = sblock->pagev[0]->page;
1895         buffer = kmap_atomic(page);
1896
1897         len = sctx->sectorsize;
1898         index = 0;
1899         for (;;) {
1900                 u64 l = min_t(u64, len, PAGE_SIZE);
1901
1902                 crc = btrfs_csum_data(buffer, crc, l);
1903                 kunmap_atomic(buffer);
1904                 len -= l;
1905                 if (len == 0)
1906                         break;
1907                 index++;
1908                 BUG_ON(index >= sblock->page_count);
1909                 BUG_ON(!sblock->pagev[index]->page);
1910                 page = sblock->pagev[index]->page;
1911                 buffer = kmap_atomic(page);
1912         }
1913
1914         btrfs_csum_final(crc, csum);
1915         if (memcmp(csum, on_disk_csum, sctx->csum_size))
1916                 fail = 1;
1917
1918         return fail;
1919 }
1920
1921 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1922 {
1923         struct scrub_ctx *sctx = sblock->sctx;
1924         struct btrfs_header *h;
1925         struct btrfs_root *root = sctx->dev_root;
1926         struct btrfs_fs_info *fs_info = root->fs_info;
1927         u8 calculated_csum[BTRFS_CSUM_SIZE];
1928         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1929         struct page *page;
1930         void *mapped_buffer;
1931         u64 mapped_size;
1932         void *p;
1933         u32 crc = ~(u32)0;
1934         int fail = 0;
1935         int crc_fail = 0;
1936         u64 len;
1937         int index;
1938
1939         BUG_ON(sblock->page_count < 1);
1940         page = sblock->pagev[0]->page;
1941         mapped_buffer = kmap_atomic(page);
1942         h = (struct btrfs_header *)mapped_buffer;
1943         memcpy(on_disk_csum, h->csum, sctx->csum_size);
1944
1945         /*
1946          * we don't use the getter functions here, as we
1947          * a) don't have an extent buffer and
1948          * b) the page is already kmapped
1949          */
1950
1951         if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1952                 ++fail;
1953
1954         if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
1955                 ++fail;
1956
1957         if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
1958                 ++fail;
1959
1960         if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1961                    BTRFS_UUID_SIZE))
1962                 ++fail;
1963
1964         len = sctx->nodesize - BTRFS_CSUM_SIZE;
1965         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1966         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1967         index = 0;
1968         for (;;) {
1969                 u64 l = min_t(u64, len, mapped_size);
1970
1971                 crc = btrfs_csum_data(p, crc, l);
1972                 kunmap_atomic(mapped_buffer);
1973                 len -= l;
1974                 if (len == 0)
1975                         break;
1976                 index++;
1977                 BUG_ON(index >= sblock->page_count);
1978                 BUG_ON(!sblock->pagev[index]->page);
1979                 page = sblock->pagev[index]->page;
1980                 mapped_buffer = kmap_atomic(page);
1981                 mapped_size = PAGE_SIZE;
1982                 p = mapped_buffer;
1983         }
1984
1985         btrfs_csum_final(crc, calculated_csum);
1986         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1987                 ++crc_fail;
1988
1989         return fail || crc_fail;
1990 }
1991
1992 static int scrub_checksum_super(struct scrub_block *sblock)
1993 {
1994         struct btrfs_super_block *s;
1995         struct scrub_ctx *sctx = sblock->sctx;
1996         u8 calculated_csum[BTRFS_CSUM_SIZE];
1997         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1998         struct page *page;
1999         void *mapped_buffer;
2000         u64 mapped_size;
2001         void *p;
2002         u32 crc = ~(u32)0;
2003         int fail_gen = 0;
2004         int fail_cor = 0;
2005         u64 len;
2006         int index;
2007
2008         BUG_ON(sblock->page_count < 1);
2009         page = sblock->pagev[0]->page;
2010         mapped_buffer = kmap_atomic(page);
2011         s = (struct btrfs_super_block *)mapped_buffer;
2012         memcpy(on_disk_csum, s->csum, sctx->csum_size);
2013
2014         if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
2015                 ++fail_cor;
2016
2017         if (sblock->pagev[0]->generation != btrfs_super_generation(s))
2018                 ++fail_gen;
2019
2020         if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
2021                 ++fail_cor;
2022
2023         len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
2024         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2025         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2026         index = 0;
2027         for (;;) {
2028                 u64 l = min_t(u64, len, mapped_size);
2029
2030                 crc = btrfs_csum_data(p, crc, l);
2031                 kunmap_atomic(mapped_buffer);
2032                 len -= l;
2033                 if (len == 0)
2034                         break;
2035                 index++;
2036                 BUG_ON(index >= sblock->page_count);
2037                 BUG_ON(!sblock->pagev[index]->page);
2038                 page = sblock->pagev[index]->page;
2039                 mapped_buffer = kmap_atomic(page);
2040                 mapped_size = PAGE_SIZE;
2041                 p = mapped_buffer;
2042         }
2043
2044         btrfs_csum_final(crc, calculated_csum);
2045         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
2046                 ++fail_cor;
2047
2048         if (fail_cor + fail_gen) {
2049                 /*
2050                  * if we find an error in a super block, we just report it.
2051                  * They will get written with the next transaction commit
2052                  * anyway
2053                  */
2054                 spin_lock(&sctx->stat_lock);
2055                 ++sctx->stat.super_errors;
2056                 spin_unlock(&sctx->stat_lock);
2057                 if (fail_cor)
2058                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2059                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2060                 else
2061                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2062                                 BTRFS_DEV_STAT_GENERATION_ERRS);
2063         }
2064
2065         return fail_cor + fail_gen;
2066 }
2067
2068 static void scrub_block_get(struct scrub_block *sblock)
2069 {
2070         atomic_inc(&sblock->ref_count);
2071 }
2072
2073 static void scrub_block_put(struct scrub_block *sblock)
2074 {
2075         if (atomic_dec_and_test(&sblock->ref_count)) {
2076                 int i;
2077
2078                 if (sblock->sparity)
2079                         scrub_parity_put(sblock->sparity);
2080
2081                 for (i = 0; i < sblock->page_count; i++)
2082                         scrub_page_put(sblock->pagev[i]);
2083                 kfree(sblock);
2084         }
2085 }
2086
2087 static void scrub_page_get(struct scrub_page *spage)
2088 {
2089         atomic_inc(&spage->ref_count);
2090 }
2091
2092 static void scrub_page_put(struct scrub_page *spage)
2093 {
2094         if (atomic_dec_and_test(&spage->ref_count)) {
2095                 if (spage->page)
2096                         __free_page(spage->page);
2097                 kfree(spage);
2098         }
2099 }
2100
2101 static void scrub_submit(struct scrub_ctx *sctx)
2102 {
2103         struct scrub_bio *sbio;
2104
2105         if (sctx->curr == -1)
2106                 return;
2107
2108         sbio = sctx->bios[sctx->curr];
2109         sctx->curr = -1;
2110         scrub_pending_bio_inc(sctx);
2111
2112         if (!sbio->bio->bi_bdev) {
2113                 /*
2114                  * this case should not happen. If btrfs_map_block() is
2115                  * wrong, it could happen for dev-replace operations on
2116                  * missing devices when no mirrors are available, but in
2117                  * this case it should already fail the mount.
2118                  * This case is handled correctly (but _very_ slowly).
2119                  */
2120                 printk_ratelimited(KERN_WARNING
2121                         "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
2122                 bio_endio(sbio->bio, -EIO);
2123         } else {
2124                 btrfsic_submit_bio(READ, sbio->bio);
2125         }
2126 }
2127
2128 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2129                                     struct scrub_page *spage)
2130 {
2131         struct scrub_block *sblock = spage->sblock;
2132         struct scrub_bio *sbio;
2133         int ret;
2134
2135 again:
2136         /*
2137          * grab a fresh bio or wait for one to become available
2138          */
2139         while (sctx->curr == -1) {
2140                 spin_lock(&sctx->list_lock);
2141                 sctx->curr = sctx->first_free;
2142                 if (sctx->curr != -1) {
2143                         sctx->first_free = sctx->bios[sctx->curr]->next_free;
2144                         sctx->bios[sctx->curr]->next_free = -1;
2145                         sctx->bios[sctx->curr]->page_count = 0;
2146                         spin_unlock(&sctx->list_lock);
2147                 } else {
2148                         spin_unlock(&sctx->list_lock);
2149                         wait_event(sctx->list_wait, sctx->first_free != -1);
2150                 }
2151         }
2152         sbio = sctx->bios[sctx->curr];
2153         if (sbio->page_count == 0) {
2154                 struct bio *bio;
2155
2156                 sbio->physical = spage->physical;
2157                 sbio->logical = spage->logical;
2158                 sbio->dev = spage->dev;
2159                 bio = sbio->bio;
2160                 if (!bio) {
2161                         bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
2162                         if (!bio)
2163                                 return -ENOMEM;
2164                         sbio->bio = bio;
2165                 }
2166
2167                 bio->bi_private = sbio;
2168                 bio->bi_end_io = scrub_bio_end_io;
2169                 bio->bi_bdev = sbio->dev->bdev;
2170                 bio->bi_iter.bi_sector = sbio->physical >> 9;
2171                 sbio->err = 0;
2172         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2173                    spage->physical ||
2174                    sbio->logical + sbio->page_count * PAGE_SIZE !=
2175                    spage->logical ||
2176                    sbio->dev != spage->dev) {
2177                 scrub_submit(sctx);
2178                 goto again;
2179         }
2180
2181         sbio->pagev[sbio->page_count] = spage;
2182         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2183         if (ret != PAGE_SIZE) {
2184                 if (sbio->page_count < 1) {
2185                         bio_put(sbio->bio);
2186                         sbio->bio = NULL;
2187                         return -EIO;
2188                 }
2189                 scrub_submit(sctx);
2190                 goto again;
2191         }
2192
2193         scrub_block_get(sblock); /* one for the page added to the bio */
2194         atomic_inc(&sblock->outstanding_pages);
2195         sbio->page_count++;
2196         if (sbio->page_count == sctx->pages_per_rd_bio)
2197                 scrub_submit(sctx);
2198
2199         return 0;
2200 }
2201
2202 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2203                        u64 physical, struct btrfs_device *dev, u64 flags,
2204                        u64 gen, int mirror_num, u8 *csum, int force,
2205                        u64 physical_for_dev_replace)
2206 {
2207         struct scrub_block *sblock;
2208         int index;
2209
2210         sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2211         if (!sblock) {
2212                 spin_lock(&sctx->stat_lock);
2213                 sctx->stat.malloc_errors++;
2214                 spin_unlock(&sctx->stat_lock);
2215                 return -ENOMEM;
2216         }
2217
2218         /* one ref inside this function, plus one for each page added to
2219          * a bio later on */
2220         atomic_set(&sblock->ref_count, 1);
2221         sblock->sctx = sctx;
2222         sblock->no_io_error_seen = 1;
2223
2224         for (index = 0; len > 0; index++) {
2225                 struct scrub_page *spage;
2226                 u64 l = min_t(u64, len, PAGE_SIZE);
2227
2228                 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2229                 if (!spage) {
2230 leave_nomem:
2231                         spin_lock(&sctx->stat_lock);
2232                         sctx->stat.malloc_errors++;
2233                         spin_unlock(&sctx->stat_lock);
2234                         scrub_block_put(sblock);
2235                         return -ENOMEM;
2236                 }
2237                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2238                 scrub_page_get(spage);
2239                 sblock->pagev[index] = spage;
2240                 spage->sblock = sblock;
2241                 spage->dev = dev;
2242                 spage->flags = flags;
2243                 spage->generation = gen;
2244                 spage->logical = logical;
2245                 spage->physical = physical;
2246                 spage->physical_for_dev_replace = physical_for_dev_replace;
2247                 spage->mirror_num = mirror_num;
2248                 if (csum) {
2249                         spage->have_csum = 1;
2250                         memcpy(spage->csum, csum, sctx->csum_size);
2251                 } else {
2252                         spage->have_csum = 0;
2253                 }
2254                 sblock->page_count++;
2255                 spage->page = alloc_page(GFP_NOFS);
2256                 if (!spage->page)
2257                         goto leave_nomem;
2258                 len -= l;
2259                 logical += l;
2260                 physical += l;
2261                 physical_for_dev_replace += l;
2262         }
2263
2264         WARN_ON(sblock->page_count == 0);
2265         for (index = 0; index < sblock->page_count; index++) {
2266                 struct scrub_page *spage = sblock->pagev[index];
2267                 int ret;
2268
2269                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2270                 if (ret) {
2271                         scrub_block_put(sblock);
2272                         return ret;
2273                 }
2274         }
2275
2276         if (force)
2277                 scrub_submit(sctx);
2278
2279         /* last one frees, either here or in bio completion for last page */
2280         scrub_block_put(sblock);
2281         return 0;
2282 }
2283
2284 static void scrub_bio_end_io(struct bio *bio, int err)
2285 {
2286         struct scrub_bio *sbio = bio->bi_private;
2287         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
2288
2289         sbio->err = err;
2290         sbio->bio = bio;
2291
2292         btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2293 }
2294
2295 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2296 {
2297         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2298         struct scrub_ctx *sctx = sbio->sctx;
2299         int i;
2300
2301         BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2302         if (sbio->err) {
2303                 for (i = 0; i < sbio->page_count; i++) {
2304                         struct scrub_page *spage = sbio->pagev[i];
2305
2306                         spage->io_error = 1;
2307                         spage->sblock->no_io_error_seen = 0;
2308                 }
2309         }
2310
2311         /* now complete the scrub_block items that have all pages completed */
2312         for (i = 0; i < sbio->page_count; i++) {
2313                 struct scrub_page *spage = sbio->pagev[i];
2314                 struct scrub_block *sblock = spage->sblock;
2315
2316                 if (atomic_dec_and_test(&sblock->outstanding_pages))
2317                         scrub_block_complete(sblock);
2318                 scrub_block_put(sblock);
2319         }
2320
2321         bio_put(sbio->bio);
2322         sbio->bio = NULL;
2323         spin_lock(&sctx->list_lock);
2324         sbio->next_free = sctx->first_free;
2325         sctx->first_free = sbio->index;
2326         spin_unlock(&sctx->list_lock);
2327
2328         if (sctx->is_dev_replace &&
2329             atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2330                 mutex_lock(&sctx->wr_ctx.wr_lock);
2331                 scrub_wr_submit(sctx);
2332                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2333         }
2334
2335         scrub_pending_bio_dec(sctx);
2336 }
2337
2338 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2339                                        unsigned long *bitmap,
2340                                        u64 start, u64 len)
2341 {
2342         int offset;
2343         int nsectors;
2344         int sectorsize = sparity->sctx->dev_root->sectorsize;
2345
2346         if (len >= sparity->stripe_len) {
2347                 bitmap_set(bitmap, 0, sparity->nsectors);
2348                 return;
2349         }
2350
2351         start -= sparity->logic_start;
2352         offset = (int)do_div(start, sparity->stripe_len);
2353         offset /= sectorsize;
2354         nsectors = (int)len / sectorsize;
2355
2356         if (offset + nsectors <= sparity->nsectors) {
2357                 bitmap_set(bitmap, offset, nsectors);
2358                 return;
2359         }
2360
2361         bitmap_set(bitmap, offset, sparity->nsectors - offset);
2362         bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2363 }
2364
2365 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2366                                                    u64 start, u64 len)
2367 {
2368         __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2369 }
2370
2371 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2372                                                   u64 start, u64 len)
2373 {
2374         __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2375 }
2376
2377 static void scrub_block_complete(struct scrub_block *sblock)
2378 {
2379         int corrupted = 0;
2380
2381         if (!sblock->no_io_error_seen) {
2382                 corrupted = 1;
2383                 scrub_handle_errored_block(sblock);
2384         } else {
2385                 /*
2386                  * if has checksum error, write via repair mechanism in
2387                  * dev replace case, otherwise write here in dev replace
2388                  * case.
2389                  */
2390                 corrupted = scrub_checksum(sblock);
2391                 if (!corrupted && sblock->sctx->is_dev_replace)
2392                         scrub_write_block_to_dev_replace(sblock);
2393         }
2394
2395         if (sblock->sparity && corrupted && !sblock->data_corrected) {
2396                 u64 start = sblock->pagev[0]->logical;
2397                 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2398                           PAGE_SIZE;
2399
2400                 scrub_parity_mark_sectors_error(sblock->sparity,
2401                                                 start, end - start);
2402         }
2403 }
2404
2405 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
2406                            u8 *csum)
2407 {
2408         struct btrfs_ordered_sum *sum = NULL;
2409         unsigned long index;
2410         unsigned long num_sectors;
2411
2412         while (!list_empty(&sctx->csum_list)) {
2413                 sum = list_first_entry(&sctx->csum_list,
2414                                        struct btrfs_ordered_sum, list);
2415                 if (sum->bytenr > logical)
2416                         return 0;
2417                 if (sum->bytenr + sum->len > logical)
2418                         break;
2419
2420                 ++sctx->stat.csum_discards;
2421                 list_del(&sum->list);
2422                 kfree(sum);
2423                 sum = NULL;
2424         }
2425         if (!sum)
2426                 return 0;
2427
2428         index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
2429         num_sectors = sum->len / sctx->sectorsize;
2430         memcpy(csum, sum->sums + index, sctx->csum_size);
2431         if (index == num_sectors - 1) {
2432                 list_del(&sum->list);
2433                 kfree(sum);
2434         }
2435         return 1;
2436 }
2437
2438 /* scrub extent tries to collect up to 64 kB for each bio */
2439 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2440                         u64 physical, struct btrfs_device *dev, u64 flags,
2441                         u64 gen, int mirror_num, u64 physical_for_dev_replace)
2442 {
2443         int ret;
2444         u8 csum[BTRFS_CSUM_SIZE];
2445         u32 blocksize;
2446
2447         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2448                 blocksize = sctx->sectorsize;
2449                 spin_lock(&sctx->stat_lock);
2450                 sctx->stat.data_extents_scrubbed++;
2451                 sctx->stat.data_bytes_scrubbed += len;
2452                 spin_unlock(&sctx->stat_lock);
2453         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2454                 blocksize = sctx->nodesize;
2455                 spin_lock(&sctx->stat_lock);
2456                 sctx->stat.tree_extents_scrubbed++;
2457                 sctx->stat.tree_bytes_scrubbed += len;
2458                 spin_unlock(&sctx->stat_lock);
2459         } else {
2460                 blocksize = sctx->sectorsize;
2461                 WARN_ON(1);
2462         }
2463
2464         while (len) {
2465                 u64 l = min_t(u64, len, blocksize);
2466                 int have_csum = 0;
2467
2468                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2469                         /* push csums to sbio */
2470                         have_csum = scrub_find_csum(sctx, logical, l, csum);
2471                         if (have_csum == 0)
2472                                 ++sctx->stat.no_csum;
2473                         if (sctx->is_dev_replace && !have_csum) {
2474                                 ret = copy_nocow_pages(sctx, logical, l,
2475                                                        mirror_num,
2476                                                       physical_for_dev_replace);
2477                                 goto behind_scrub_pages;
2478                         }
2479                 }
2480                 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2481                                   mirror_num, have_csum ? csum : NULL, 0,
2482                                   physical_for_dev_replace);
2483 behind_scrub_pages:
2484                 if (ret)
2485                         return ret;
2486                 len -= l;
2487                 logical += l;
2488                 physical += l;
2489                 physical_for_dev_replace += l;
2490         }
2491         return 0;
2492 }
2493
2494 static int scrub_pages_for_parity(struct scrub_parity *sparity,
2495                                   u64 logical, u64 len,
2496                                   u64 physical, struct btrfs_device *dev,
2497                                   u64 flags, u64 gen, int mirror_num, u8 *csum)
2498 {
2499         struct scrub_ctx *sctx = sparity->sctx;
2500         struct scrub_block *sblock;
2501         int index;
2502
2503         sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2504         if (!sblock) {
2505                 spin_lock(&sctx->stat_lock);
2506                 sctx->stat.malloc_errors++;
2507                 spin_unlock(&sctx->stat_lock);
2508                 return -ENOMEM;
2509         }
2510
2511         /* one ref inside this function, plus one for each page added to
2512          * a bio later on */
2513         atomic_set(&sblock->ref_count, 1);
2514         sblock->sctx = sctx;
2515         sblock->no_io_error_seen = 1;
2516         sblock->sparity = sparity;
2517         scrub_parity_get(sparity);
2518
2519         for (index = 0; len > 0; index++) {
2520                 struct scrub_page *spage;
2521                 u64 l = min_t(u64, len, PAGE_SIZE);
2522
2523                 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2524                 if (!spage) {
2525 leave_nomem:
2526                         spin_lock(&sctx->stat_lock);
2527                         sctx->stat.malloc_errors++;
2528                         spin_unlock(&sctx->stat_lock);
2529                         scrub_block_put(sblock);
2530                         return -ENOMEM;
2531                 }
2532                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2533                 /* For scrub block */
2534                 scrub_page_get(spage);
2535                 sblock->pagev[index] = spage;
2536                 /* For scrub parity */
2537                 scrub_page_get(spage);
2538                 list_add_tail(&spage->list, &sparity->spages);
2539                 spage->sblock = sblock;
2540                 spage->dev = dev;
2541                 spage->flags = flags;
2542                 spage->generation = gen;
2543                 spage->logical = logical;
2544                 spage->physical = physical;
2545                 spage->mirror_num = mirror_num;
2546                 if (csum) {
2547                         spage->have_csum = 1;
2548                         memcpy(spage->csum, csum, sctx->csum_size);
2549                 } else {
2550                         spage->have_csum = 0;
2551                 }
2552                 sblock->page_count++;
2553                 spage->page = alloc_page(GFP_NOFS);
2554                 if (!spage->page)
2555                         goto leave_nomem;
2556                 len -= l;
2557                 logical += l;
2558                 physical += l;
2559         }
2560
2561         WARN_ON(sblock->page_count == 0);
2562         for (index = 0; index < sblock->page_count; index++) {
2563                 struct scrub_page *spage = sblock->pagev[index];
2564                 int ret;
2565
2566                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2567                 if (ret) {
2568                         scrub_block_put(sblock);
2569                         return ret;
2570                 }
2571         }
2572
2573         /* last one frees, either here or in bio completion for last page */
2574         scrub_block_put(sblock);
2575         return 0;
2576 }
2577
2578 static int scrub_extent_for_parity(struct scrub_parity *sparity,
2579                                    u64 logical, u64 len,
2580                                    u64 physical, struct btrfs_device *dev,
2581                                    u64 flags, u64 gen, int mirror_num)
2582 {
2583         struct scrub_ctx *sctx = sparity->sctx;
2584         int ret;
2585         u8 csum[BTRFS_CSUM_SIZE];
2586         u32 blocksize;
2587
2588         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2589                 blocksize = sctx->sectorsize;
2590         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2591                 blocksize = sctx->nodesize;
2592         } else {
2593                 blocksize = sctx->sectorsize;
2594                 WARN_ON(1);
2595         }
2596
2597         while (len) {
2598                 u64 l = min_t(u64, len, blocksize);
2599                 int have_csum = 0;
2600
2601                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2602                         /* push csums to sbio */
2603                         have_csum = scrub_find_csum(sctx, logical, l, csum);
2604                         if (have_csum == 0)
2605                                 goto skip;
2606                 }
2607                 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2608                                              flags, gen, mirror_num,
2609                                              have_csum ? csum : NULL);
2610                 if (ret)
2611                         return ret;
2612 skip:
2613                 len -= l;
2614                 logical += l;
2615                 physical += l;
2616         }
2617         return 0;
2618 }
2619
2620 /*
2621  * Given a physical address, this will calculate it's
2622  * logical offset. if this is a parity stripe, it will return
2623  * the most left data stripe's logical offset.
2624  *
2625  * return 0 if it is a data stripe, 1 means parity stripe.
2626  */
2627 static int get_raid56_logic_offset(u64 physical, int num,
2628                                    struct map_lookup *map, u64 *offset,
2629                                    u64 *stripe_start)
2630 {
2631         int i;
2632         int j = 0;
2633         u64 stripe_nr;
2634         u64 last_offset;
2635         int stripe_index;
2636         int rot;
2637
2638         last_offset = (physical - map->stripes[num].physical) *
2639                       nr_data_stripes(map);
2640         if (stripe_start)
2641                 *stripe_start = last_offset;
2642
2643         *offset = last_offset;
2644         for (i = 0; i < nr_data_stripes(map); i++) {
2645                 *offset = last_offset + i * map->stripe_len;
2646
2647                 stripe_nr = *offset;
2648                 do_div(stripe_nr, map->stripe_len);
2649                 do_div(stripe_nr, nr_data_stripes(map));
2650
2651                 /* Work out the disk rotation on this stripe-set */
2652                 rot = do_div(stripe_nr, map->num_stripes);
2653                 /* calculate which stripe this data locates */
2654                 rot += i;
2655                 stripe_index = rot % map->num_stripes;
2656                 if (stripe_index == num)
2657                         return 0;
2658                 if (stripe_index < num)
2659                         j++;
2660         }
2661         *offset = last_offset + j * map->stripe_len;
2662         return 1;
2663 }
2664
2665 static void scrub_free_parity(struct scrub_parity *sparity)
2666 {
2667         struct scrub_ctx *sctx = sparity->sctx;
2668         struct scrub_page *curr, *next;
2669         int nbits;
2670
2671         nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2672         if (nbits) {
2673                 spin_lock(&sctx->stat_lock);
2674                 sctx->stat.read_errors += nbits;
2675                 sctx->stat.uncorrectable_errors += nbits;
2676                 spin_unlock(&sctx->stat_lock);
2677         }
2678
2679         list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2680                 list_del_init(&curr->list);
2681                 scrub_page_put(curr);
2682         }
2683
2684         kfree(sparity);
2685 }
2686
2687 static void scrub_parity_bio_endio(struct bio *bio, int error)
2688 {
2689         struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2690         struct scrub_ctx *sctx = sparity->sctx;
2691
2692         if (error)
2693                 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2694                           sparity->nsectors);
2695
2696         scrub_free_parity(sparity);
2697         scrub_pending_bio_dec(sctx);
2698         bio_put(bio);
2699 }
2700
2701 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2702 {
2703         struct scrub_ctx *sctx = sparity->sctx;
2704         struct bio *bio;
2705         struct btrfs_raid_bio *rbio;
2706         struct scrub_page *spage;
2707         struct btrfs_bio *bbio = NULL;
2708         u64 *raid_map = NULL;
2709         u64 length;
2710         int ret;
2711
2712         if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2713                            sparity->nsectors))
2714                 goto out;
2715
2716         length = sparity->logic_end - sparity->logic_start + 1;
2717         ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
2718                                sparity->logic_start,
2719                                &length, &bbio, 0, &raid_map);
2720         if (ret || !bbio || !raid_map)
2721                 goto bbio_out;
2722
2723         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2724         if (!bio)
2725                 goto bbio_out;
2726
2727         bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2728         bio->bi_private = sparity;
2729         bio->bi_end_io = scrub_parity_bio_endio;
2730
2731         rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
2732                                               raid_map, length,
2733                                               sparity->scrub_dev,
2734                                               sparity->dbitmap,
2735                                               sparity->nsectors);
2736         if (!rbio)
2737                 goto rbio_out;
2738
2739         list_for_each_entry(spage, &sparity->spages, list)
2740                 raid56_parity_add_scrub_pages(rbio, spage->page,
2741                                               spage->logical);
2742
2743         scrub_pending_bio_inc(sctx);
2744         raid56_parity_submit_scrub_rbio(rbio);
2745         return;
2746
2747 rbio_out:
2748         bio_put(bio);
2749 bbio_out:
2750         kfree(bbio);
2751         kfree(raid_map);
2752         bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2753                   sparity->nsectors);
2754         spin_lock(&sctx->stat_lock);
2755         sctx->stat.malloc_errors++;
2756         spin_unlock(&sctx->stat_lock);
2757 out:
2758         scrub_free_parity(sparity);
2759 }
2760
2761 static inline int scrub_calc_parity_bitmap_len(int nsectors)
2762 {
2763         return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * (BITS_PER_LONG / 8);
2764 }
2765
2766 static void scrub_parity_get(struct scrub_parity *sparity)
2767 {
2768         atomic_inc(&sparity->ref_count);
2769 }
2770
2771 static void scrub_parity_put(struct scrub_parity *sparity)
2772 {
2773         if (!atomic_dec_and_test(&sparity->ref_count))
2774                 return;
2775
2776         scrub_parity_check_and_repair(sparity);
2777 }
2778
2779 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2780                                                   struct map_lookup *map,
2781                                                   struct btrfs_device *sdev,
2782                                                   struct btrfs_path *path,
2783                                                   u64 logic_start,
2784                                                   u64 logic_end)
2785 {
2786         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2787         struct btrfs_root *root = fs_info->extent_root;
2788         struct btrfs_root *csum_root = fs_info->csum_root;
2789         struct btrfs_extent_item *extent;
2790         u64 flags;
2791         int ret;
2792         int slot;
2793         struct extent_buffer *l;
2794         struct btrfs_key key;
2795         u64 generation;
2796         u64 extent_logical;
2797         u64 extent_physical;
2798         u64 extent_len;
2799         struct btrfs_device *extent_dev;
2800         struct scrub_parity *sparity;
2801         int nsectors;
2802         int bitmap_len;
2803         int extent_mirror_num;
2804         int stop_loop = 0;
2805
2806         nsectors = map->stripe_len / root->sectorsize;
2807         bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2808         sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2809                           GFP_NOFS);
2810         if (!sparity) {
2811                 spin_lock(&sctx->stat_lock);
2812                 sctx->stat.malloc_errors++;
2813                 spin_unlock(&sctx->stat_lock);
2814                 return -ENOMEM;
2815         }
2816
2817         sparity->stripe_len = map->stripe_len;
2818         sparity->nsectors = nsectors;
2819         sparity->sctx = sctx;
2820         sparity->scrub_dev = sdev;
2821         sparity->logic_start = logic_start;
2822         sparity->logic_end = logic_end;
2823         atomic_set(&sparity->ref_count, 1);
2824         INIT_LIST_HEAD(&sparity->spages);
2825         sparity->dbitmap = sparity->bitmap;
2826         sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2827
2828         ret = 0;
2829         while (logic_start < logic_end) {
2830                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2831                         key.type = BTRFS_METADATA_ITEM_KEY;
2832                 else
2833                         key.type = BTRFS_EXTENT_ITEM_KEY;
2834                 key.objectid = logic_start;
2835                 key.offset = (u64)-1;
2836
2837                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2838                 if (ret < 0)
2839                         goto out;
2840
2841                 if (ret > 0) {
2842                         ret = btrfs_previous_extent_item(root, path, 0);
2843                         if (ret < 0)
2844                                 goto out;
2845                         if (ret > 0) {
2846                                 btrfs_release_path(path);
2847                                 ret = btrfs_search_slot(NULL, root, &key,
2848                                                         path, 0, 0);
2849                                 if (ret < 0)
2850                                         goto out;
2851                         }
2852                 }
2853
2854                 stop_loop = 0;
2855                 while (1) {
2856                         u64 bytes;
2857
2858                         l = path->nodes[0];
2859                         slot = path->slots[0];
2860                         if (slot >= btrfs_header_nritems(l)) {
2861                                 ret = btrfs_next_leaf(root, path);
2862                                 if (ret == 0)
2863                                         continue;
2864                                 if (ret < 0)
2865                                         goto out;
2866
2867                                 stop_loop = 1;
2868                                 break;
2869                         }
2870                         btrfs_item_key_to_cpu(l, &key, slot);
2871
2872                         if (key.type == BTRFS_METADATA_ITEM_KEY)
2873                                 bytes = root->nodesize;
2874                         else
2875                                 bytes = key.offset;
2876
2877                         if (key.objectid + bytes <= logic_start)
2878                                 goto next;
2879
2880                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2881                             key.type != BTRFS_METADATA_ITEM_KEY)
2882                                 goto next;
2883
2884                         if (key.objectid > logic_end) {
2885                                 stop_loop = 1;
2886                                 break;
2887                         }
2888
2889                         while (key.objectid >= logic_start + map->stripe_len)
2890                                 logic_start += map->stripe_len;
2891
2892                         extent = btrfs_item_ptr(l, slot,
2893                                                 struct btrfs_extent_item);
2894                         flags = btrfs_extent_flags(l, extent);
2895                         generation = btrfs_extent_generation(l, extent);
2896
2897                         if (key.objectid < logic_start &&
2898                             (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
2899                                 btrfs_err(fs_info,
2900                                           "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2901                                            key.objectid, logic_start);
2902                                 goto next;
2903                         }
2904 again:
2905                         extent_logical = key.objectid;
2906                         extent_len = bytes;
2907
2908                         if (extent_logical < logic_start) {
2909                                 extent_len -= logic_start - extent_logical;
2910                                 extent_logical = logic_start;
2911                         }
2912
2913                         if (extent_logical + extent_len >
2914                             logic_start + map->stripe_len)
2915                                 extent_len = logic_start + map->stripe_len -
2916                                              extent_logical;
2917
2918                         scrub_parity_mark_sectors_data(sparity, extent_logical,
2919                                                        extent_len);
2920
2921                         scrub_remap_extent(fs_info, extent_logical,
2922                                            extent_len, &extent_physical,
2923                                            &extent_dev,
2924                                            &extent_mirror_num);
2925
2926                         ret = btrfs_lookup_csums_range(csum_root,
2927                                                 extent_logical,
2928                                                 extent_logical + extent_len - 1,
2929                                                 &sctx->csum_list, 1);
2930                         if (ret)
2931                                 goto out;
2932
2933                         ret = scrub_extent_for_parity(sparity, extent_logical,
2934                                                       extent_len,
2935                                                       extent_physical,
2936                                                       extent_dev, flags,
2937                                                       generation,
2938                                                       extent_mirror_num);
2939                         if (ret)
2940                                 goto out;
2941
2942                         scrub_free_csums(sctx);
2943                         if (extent_logical + extent_len <
2944                             key.objectid + bytes) {
2945                                 logic_start += map->stripe_len;
2946
2947                                 if (logic_start >= logic_end) {
2948                                         stop_loop = 1;
2949                                         break;
2950                                 }
2951
2952                                 if (logic_start < key.objectid + bytes) {
2953                                         cond_resched();
2954                                         goto again;
2955                                 }
2956                         }
2957 next:
2958                         path->slots[0]++;
2959                 }
2960
2961                 btrfs_release_path(path);
2962
2963                 if (stop_loop)
2964                         break;
2965
2966                 logic_start += map->stripe_len;
2967         }
2968 out:
2969         if (ret < 0)
2970                 scrub_parity_mark_sectors_error(sparity, logic_start,
2971                                                 logic_end - logic_start + 1);
2972         scrub_parity_put(sparity);
2973         scrub_submit(sctx);
2974         mutex_lock(&sctx->wr_ctx.wr_lock);
2975         scrub_wr_submit(sctx);
2976         mutex_unlock(&sctx->wr_ctx.wr_lock);
2977
2978         btrfs_release_path(path);
2979         return ret < 0 ? ret : 0;
2980 }
2981
2982 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2983                                            struct map_lookup *map,
2984                                            struct btrfs_device *scrub_dev,
2985                                            int num, u64 base, u64 length,
2986                                            int is_dev_replace)
2987 {
2988         struct btrfs_path *path, *ppath;
2989         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2990         struct btrfs_root *root = fs_info->extent_root;
2991         struct btrfs_root *csum_root = fs_info->csum_root;
2992         struct btrfs_extent_item *extent;
2993         struct blk_plug plug;
2994         u64 flags;
2995         int ret;
2996         int slot;
2997         u64 nstripes;
2998         struct extent_buffer *l;
2999         struct btrfs_key key;
3000         u64 physical;
3001         u64 logical;
3002         u64 logic_end;
3003         u64 physical_end;
3004         u64 generation;
3005         int mirror_num;
3006         struct reada_control *reada1;
3007         struct reada_control *reada2;
3008         struct btrfs_key key_start;
3009         struct btrfs_key key_end;
3010         u64 increment = map->stripe_len;
3011         u64 offset;
3012         u64 extent_logical;
3013         u64 extent_physical;
3014         u64 extent_len;
3015         u64 stripe_logical;
3016         u64 stripe_end;
3017         struct btrfs_device *extent_dev;
3018         int extent_mirror_num;
3019         int stop_loop = 0;
3020
3021         nstripes = length;
3022         physical = map->stripes[num].physical;
3023         offset = 0;
3024         do_div(nstripes, map->stripe_len);
3025         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3026                 offset = map->stripe_len * num;
3027                 increment = map->stripe_len * map->num_stripes;
3028                 mirror_num = 1;
3029         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3030                 int factor = map->num_stripes / map->sub_stripes;
3031                 offset = map->stripe_len * (num / map->sub_stripes);
3032                 increment = map->stripe_len * factor;
3033                 mirror_num = num % map->sub_stripes + 1;
3034         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3035                 increment = map->stripe_len;
3036                 mirror_num = num % map->num_stripes + 1;
3037         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3038                 increment = map->stripe_len;
3039                 mirror_num = num % map->num_stripes + 1;
3040         } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3041                                 BTRFS_BLOCK_GROUP_RAID6)) {
3042                 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3043                 increment = map->stripe_len * nr_data_stripes(map);
3044                 mirror_num = 1;
3045         } else {
3046                 increment = map->stripe_len;
3047                 mirror_num = 1;
3048         }
3049
3050         path = btrfs_alloc_path();
3051         if (!path)
3052                 return -ENOMEM;
3053
3054         ppath = btrfs_alloc_path();
3055         if (!ppath) {
3056                 btrfs_free_path(path);
3057                 return -ENOMEM;
3058         }
3059
3060         /*
3061          * work on commit root. The related disk blocks are static as
3062          * long as COW is applied. This means, it is save to rewrite
3063          * them to repair disk errors without any race conditions
3064          */
3065         path->search_commit_root = 1;
3066         path->skip_locking = 1;
3067
3068         ppath->search_commit_root = 1;
3069         ppath->skip_locking = 1;
3070         /*
3071          * trigger the readahead for extent tree csum tree and wait for
3072          * completion. During readahead, the scrub is officially paused
3073          * to not hold off transaction commits
3074          */
3075         logical = base + offset;
3076         physical_end = physical + nstripes * map->stripe_len;
3077         if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3078                          BTRFS_BLOCK_GROUP_RAID6)) {
3079                 get_raid56_logic_offset(physical_end, num,
3080                                         map, &logic_end, NULL);
3081                 logic_end += base;
3082         } else {
3083                 logic_end = logical + increment * nstripes;
3084         }
3085         wait_event(sctx->list_wait,
3086                    atomic_read(&sctx->bios_in_flight) == 0);
3087         scrub_blocked_if_needed(fs_info);
3088
3089         /* FIXME it might be better to start readahead at commit root */
3090         key_start.objectid = logical;
3091         key_start.type = BTRFS_EXTENT_ITEM_KEY;
3092         key_start.offset = (u64)0;
3093         key_end.objectid = logic_end;
3094         key_end.type = BTRFS_METADATA_ITEM_KEY;
3095         key_end.offset = (u64)-1;
3096         reada1 = btrfs_reada_add(root, &key_start, &key_end);
3097
3098         key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3099         key_start.type = BTRFS_EXTENT_CSUM_KEY;
3100         key_start.offset = logical;
3101         key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3102         key_end.type = BTRFS_EXTENT_CSUM_KEY;
3103         key_end.offset = logic_end;
3104         reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
3105
3106         if (!IS_ERR(reada1))
3107                 btrfs_reada_wait(reada1);
3108         if (!IS_ERR(reada2))
3109                 btrfs_reada_wait(reada2);
3110
3111
3112         /*
3113          * collect all data csums for the stripe to avoid seeking during
3114          * the scrub. This might currently (crc32) end up to be about 1MB
3115          */
3116         blk_start_plug(&plug);
3117
3118         /*
3119          * now find all extents for each stripe and scrub them
3120          */
3121         ret = 0;
3122         while (physical < physical_end) {
3123                 /* for raid56, we skip parity stripe */
3124                 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3125                                 BTRFS_BLOCK_GROUP_RAID6)) {
3126                         ret = get_raid56_logic_offset(physical, num,
3127                                         map, &logical, &stripe_logical);
3128                         logical += base;
3129                         if (ret) {
3130                                 stripe_logical += base;
3131                                 stripe_end = stripe_logical + increment - 1;
3132                                 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3133                                                 ppath, stripe_logical,
3134                                                 stripe_end);
3135                                 if (ret)
3136                                         goto out;
3137                                 goto skip;
3138                         }
3139                 }
3140                 /*
3141                  * canceled?
3142                  */
3143                 if (atomic_read(&fs_info->scrub_cancel_req) ||
3144                     atomic_read(&sctx->cancel_req)) {
3145                         ret = -ECANCELED;
3146                         goto out;
3147                 }
3148                 /*
3149                  * check to see if we have to pause
3150                  */
3151                 if (atomic_read(&fs_info->scrub_pause_req)) {
3152                         /* push queued extents */
3153                         atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3154                         scrub_submit(sctx);
3155                         mutex_lock(&sctx->wr_ctx.wr_lock);
3156                         scrub_wr_submit(sctx);
3157                         mutex_unlock(&sctx->wr_ctx.wr_lock);
3158                         wait_event(sctx->list_wait,
3159                                    atomic_read(&sctx->bios_in_flight) == 0);
3160                         atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3161                         scrub_blocked_if_needed(fs_info);
3162                 }
3163
3164                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3165                         key.type = BTRFS_METADATA_ITEM_KEY;
3166                 else
3167                         key.type = BTRFS_EXTENT_ITEM_KEY;
3168                 key.objectid = logical;
3169                 key.offset = (u64)-1;
3170
3171                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3172                 if (ret < 0)
3173                         goto out;
3174
3175                 if (ret > 0) {
3176                         ret = btrfs_previous_extent_item(root, path, 0);
3177                         if (ret < 0)
3178                                 goto out;
3179                         if (ret > 0) {
3180                                 /* there's no smaller item, so stick with the
3181                                  * larger one */
3182                                 btrfs_release_path(path);
3183                                 ret = btrfs_search_slot(NULL, root, &key,
3184                                                         path, 0, 0);
3185                                 if (ret < 0)
3186                                         goto out;
3187                         }
3188                 }
3189
3190                 stop_loop = 0;
3191                 while (1) {
3192                         u64 bytes;
3193
3194                         l = path->nodes[0];
3195                         slot = path->slots[0];
3196                         if (slot >= btrfs_header_nritems(l)) {
3197                                 ret = btrfs_next_leaf(root, path);
3198                                 if (ret == 0)
3199                                         continue;
3200                                 if (ret < 0)
3201                                         goto out;
3202
3203                                 stop_loop = 1;
3204                                 break;
3205                         }
3206                         btrfs_item_key_to_cpu(l, &key, slot);
3207
3208                         if (key.type == BTRFS_METADATA_ITEM_KEY)
3209                                 bytes = root->nodesize;
3210                         else
3211                                 bytes = key.offset;
3212
3213                         if (key.objectid + bytes <= logical)
3214                                 goto next;
3215
3216                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3217                             key.type != BTRFS_METADATA_ITEM_KEY)
3218                                 goto next;
3219
3220                         if (key.objectid >= logical + map->stripe_len) {
3221                                 /* out of this device extent */
3222                                 if (key.objectid >= logic_end)
3223                                         stop_loop = 1;
3224                                 break;
3225                         }
3226
3227                         extent = btrfs_item_ptr(l, slot,
3228                                                 struct btrfs_extent_item);
3229                         flags = btrfs_extent_flags(l, extent);
3230                         generation = btrfs_extent_generation(l, extent);
3231
3232                         if (key.objectid < logical &&
3233                             (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
3234                                 btrfs_err(fs_info,
3235                                            "scrub: tree block %llu spanning "
3236                                            "stripes, ignored. logical=%llu",
3237                                        key.objectid, logical);
3238                                 goto next;
3239                         }
3240
3241 again:
3242                         extent_logical = key.objectid;
3243                         extent_len = bytes;
3244
3245                         /*
3246                          * trim extent to this stripe
3247                          */
3248                         if (extent_logical < logical) {
3249                                 extent_len -= logical - extent_logical;
3250                                 extent_logical = logical;
3251                         }
3252                         if (extent_logical + extent_len >
3253                             logical + map->stripe_len) {
3254                                 extent_len = logical + map->stripe_len -
3255                                              extent_logical;
3256                         }
3257
3258                         extent_physical = extent_logical - logical + physical;
3259                         extent_dev = scrub_dev;
3260                         extent_mirror_num = mirror_num;
3261                         if (is_dev_replace)
3262                                 scrub_remap_extent(fs_info, extent_logical,
3263                                                    extent_len, &extent_physical,
3264                                                    &extent_dev,
3265                                                    &extent_mirror_num);
3266
3267                         ret = btrfs_lookup_csums_range(csum_root, logical,
3268                                                 logical + map->stripe_len - 1,
3269                                                 &sctx->csum_list, 1);
3270                         if (ret)
3271                                 goto out;
3272
3273                         ret = scrub_extent(sctx, extent_logical, extent_len,
3274                                            extent_physical, extent_dev, flags,
3275                                            generation, extent_mirror_num,
3276                                            extent_logical - logical + physical);
3277                         if (ret)
3278                                 goto out;
3279
3280                         scrub_free_csums(sctx);
3281                         if (extent_logical + extent_len <
3282                             key.objectid + bytes) {
3283                                 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3284                                         BTRFS_BLOCK_GROUP_RAID6)) {
3285                                         /*
3286                                          * loop until we find next data stripe
3287                                          * or we have finished all stripes.
3288                                          */
3289 loop:
3290                                         physical += map->stripe_len;
3291                                         ret = get_raid56_logic_offset(physical,
3292                                                         num, map, &logical,
3293                                                         &stripe_logical);
3294                                         logical += base;
3295
3296                                         if (ret && physical < physical_end) {
3297                                                 stripe_logical += base;
3298                                                 stripe_end = stripe_logical +
3299                                                                 increment - 1;
3300                                                 ret = scrub_raid56_parity(sctx,
3301                                                         map, scrub_dev, ppath,
3302                                                         stripe_logical,
3303                                                         stripe_end);
3304                                                 if (ret)
3305                                                         goto out;
3306                                                 goto loop;
3307                                         }
3308                                 } else {
3309                                         physical += map->stripe_len;
3310                                         logical += increment;
3311                                 }
3312                                 if (logical < key.objectid + bytes) {
3313                                         cond_resched();
3314                                         goto again;
3315                                 }
3316
3317                                 if (physical >= physical_end) {
3318                                         stop_loop = 1;
3319                                         break;
3320                                 }
3321                         }
3322 next:
3323                         path->slots[0]++;
3324                 }
3325                 btrfs_release_path(path);
3326 skip:
3327                 logical += increment;
3328                 physical += map->stripe_len;
3329                 spin_lock(&sctx->stat_lock);
3330                 if (stop_loop)
3331                         sctx->stat.last_physical = map->stripes[num].physical +
3332                                                    length;
3333                 else
3334                         sctx->stat.last_physical = physical;
3335                 spin_unlock(&sctx->stat_lock);
3336                 if (stop_loop)
3337                         break;
3338         }
3339 out:
3340         /* push queued extents */
3341         scrub_submit(sctx);
3342         mutex_lock(&sctx->wr_ctx.wr_lock);
3343         scrub_wr_submit(sctx);
3344         mutex_unlock(&sctx->wr_ctx.wr_lock);
3345
3346         blk_finish_plug(&plug);
3347         btrfs_free_path(path);
3348         btrfs_free_path(ppath);
3349         return ret < 0 ? ret : 0;
3350 }
3351
3352 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3353                                           struct btrfs_device *scrub_dev,
3354                                           u64 chunk_tree, u64 chunk_objectid,
3355                                           u64 chunk_offset, u64 length,
3356                                           u64 dev_offset, int is_dev_replace)
3357 {
3358         struct btrfs_mapping_tree *map_tree =
3359                 &sctx->dev_root->fs_info->mapping_tree;
3360         struct map_lookup *map;
3361         struct extent_map *em;
3362         int i;
3363         int ret = 0;
3364
3365         read_lock(&map_tree->map_tree.lock);
3366         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3367         read_unlock(&map_tree->map_tree.lock);
3368
3369         if (!em)
3370                 return -EINVAL;
3371
3372         map = (struct map_lookup *)em->bdev;
3373         if (em->start != chunk_offset)
3374                 goto out;
3375
3376         if (em->len < length)
3377                 goto out;
3378
3379         for (i = 0; i < map->num_stripes; ++i) {
3380                 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3381                     map->stripes[i].physical == dev_offset) {
3382                         ret = scrub_stripe(sctx, map, scrub_dev, i,
3383                                            chunk_offset, length,
3384                                            is_dev_replace);
3385                         if (ret)
3386                                 goto out;
3387                 }
3388         }
3389 out:
3390         free_extent_map(em);
3391
3392         return ret;
3393 }
3394
3395 static noinline_for_stack
3396 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3397                            struct btrfs_device *scrub_dev, u64 start, u64 end,
3398                            int is_dev_replace)
3399 {
3400         struct btrfs_dev_extent *dev_extent = NULL;
3401         struct btrfs_path *path;
3402         struct btrfs_root *root = sctx->dev_root;
3403         struct btrfs_fs_info *fs_info = root->fs_info;
3404         u64 length;
3405         u64 chunk_tree;
3406         u64 chunk_objectid;
3407         u64 chunk_offset;
3408         int ret;
3409         int slot;
3410         struct extent_buffer *l;
3411         struct btrfs_key key;
3412         struct btrfs_key found_key;
3413         struct btrfs_block_group_cache *cache;
3414         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3415
3416         path = btrfs_alloc_path();
3417         if (!path)
3418                 return -ENOMEM;
3419
3420         path->reada = 2;
3421         path->search_commit_root = 1;
3422         path->skip_locking = 1;
3423
3424         key.objectid = scrub_dev->devid;
3425         key.offset = 0ull;
3426         key.type = BTRFS_DEV_EXTENT_KEY;
3427
3428         while (1) {
3429                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3430                 if (ret < 0)
3431                         break;
3432                 if (ret > 0) {
3433                         if (path->slots[0] >=
3434                             btrfs_header_nritems(path->nodes[0])) {
3435                                 ret = btrfs_next_leaf(root, path);
3436                                 if (ret)
3437                                         break;
3438                         }
3439                 }
3440
3441                 l = path->nodes[0];
3442                 slot = path->slots[0];
3443
3444                 btrfs_item_key_to_cpu(l, &found_key, slot);
3445
3446                 if (found_key.objectid != scrub_dev->devid)
3447                         break;
3448
3449                 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3450                         break;
3451
3452                 if (found_key.offset >= end)
3453                         break;
3454
3455                 if (found_key.offset < key.offset)
3456                         break;
3457
3458                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3459                 length = btrfs_dev_extent_length(l, dev_extent);
3460
3461                 if (found_key.offset + length <= start)
3462                         goto skip;
3463
3464                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3465                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3466                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3467
3468                 /*
3469                  * get a reference on the corresponding block group to prevent
3470                  * the chunk from going away while we scrub it
3471                  */
3472                 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3473
3474                 /* some chunks are removed but not committed to disk yet,
3475                  * continue scrubbing */
3476                 if (!cache)
3477                         goto skip;
3478
3479                 dev_replace->cursor_right = found_key.offset + length;
3480                 dev_replace->cursor_left = found_key.offset;
3481                 dev_replace->item_needs_writeback = 1;
3482                 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
3483                                   chunk_offset, length, found_key.offset,
3484                                   is_dev_replace);
3485
3486                 /*
3487                  * flush, submit all pending read and write bios, afterwards
3488                  * wait for them.
3489                  * Note that in the dev replace case, a read request causes
3490                  * write requests that are submitted in the read completion
3491                  * worker. Therefore in the current situation, it is required
3492                  * that all write requests are flushed, so that all read and
3493                  * write requests are really completed when bios_in_flight
3494                  * changes to 0.
3495                  */
3496                 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3497                 scrub_submit(sctx);
3498                 mutex_lock(&sctx->wr_ctx.wr_lock);
3499                 scrub_wr_submit(sctx);
3500                 mutex_unlock(&sctx->wr_ctx.wr_lock);
3501
3502                 wait_event(sctx->list_wait,
3503                            atomic_read(&sctx->bios_in_flight) == 0);
3504                 atomic_inc(&fs_info->scrubs_paused);
3505                 wake_up(&fs_info->scrub_pause_wait);
3506
3507                 /*
3508                  * must be called before we decrease @scrub_paused.
3509                  * make sure we don't block transaction commit while
3510                  * we are waiting pending workers finished.
3511                  */
3512                 wait_event(sctx->list_wait,
3513                            atomic_read(&sctx->workers_pending) == 0);
3514                 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3515
3516                 mutex_lock(&fs_info->scrub_lock);
3517                 __scrub_blocked_if_needed(fs_info);
3518                 atomic_dec(&fs_info->scrubs_paused);
3519                 mutex_unlock(&fs_info->scrub_lock);
3520                 wake_up(&fs_info->scrub_pause_wait);
3521
3522                 btrfs_put_block_group(cache);
3523                 if (ret)
3524                         break;
3525                 if (is_dev_replace &&
3526                     atomic64_read(&dev_replace->num_write_errors) > 0) {
3527                         ret = -EIO;
3528                         break;
3529                 }
3530                 if (sctx->stat.malloc_errors > 0) {
3531                         ret = -ENOMEM;
3532                         break;
3533                 }
3534
3535                 dev_replace->cursor_left = dev_replace->cursor_right;
3536                 dev_replace->item_needs_writeback = 1;
3537 skip:
3538                 key.offset = found_key.offset + length;
3539                 btrfs_release_path(path);
3540         }
3541
3542         btrfs_free_path(path);
3543
3544         /*
3545          * ret can still be 1 from search_slot or next_leaf,
3546          * that's not an error
3547          */
3548         return ret < 0 ? ret : 0;
3549 }
3550
3551 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3552                                            struct btrfs_device *scrub_dev)
3553 {
3554         int     i;
3555         u64     bytenr;
3556         u64     gen;
3557         int     ret;
3558         struct btrfs_root *root = sctx->dev_root;
3559
3560         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
3561                 return -EIO;
3562
3563         /* Seed devices of a new filesystem has their own generation. */
3564         if (scrub_dev->fs_devices != root->fs_info->fs_devices)
3565                 gen = scrub_dev->generation;
3566         else
3567                 gen = root->fs_info->last_trans_committed;
3568
3569         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3570                 bytenr = btrfs_sb_offset(i);
3571                 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3572                     scrub_dev->commit_total_bytes)
3573                         break;
3574
3575                 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3576                                   scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3577                                   NULL, 1, bytenr);
3578                 if (ret)
3579                         return ret;
3580         }
3581         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3582
3583         return 0;
3584 }
3585
3586 /*
3587  * get a reference count on fs_info->scrub_workers. start worker if necessary
3588  */
3589 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3590                                                 int is_dev_replace)
3591 {
3592         int ret = 0;
3593         int flags = WQ_FREEZABLE | WQ_UNBOUND;
3594         int max_active = fs_info->thread_pool_size;
3595
3596         if (fs_info->scrub_workers_refcnt == 0) {
3597                 if (is_dev_replace)
3598                         fs_info->scrub_workers =
3599                                 btrfs_alloc_workqueue("btrfs-scrub", flags,
3600                                                       1, 4);
3601                 else
3602                         fs_info->scrub_workers =
3603                                 btrfs_alloc_workqueue("btrfs-scrub", flags,
3604                                                       max_active, 4);
3605                 if (!fs_info->scrub_workers) {
3606                         ret = -ENOMEM;
3607                         goto out;
3608                 }
3609                 fs_info->scrub_wr_completion_workers =
3610                         btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
3611                                               max_active, 2);
3612                 if (!fs_info->scrub_wr_completion_workers) {
3613                         ret = -ENOMEM;
3614                         goto out;
3615                 }
3616                 fs_info->scrub_nocow_workers =
3617                         btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
3618                 if (!fs_info->scrub_nocow_workers) {
3619                         ret = -ENOMEM;
3620                         goto out;
3621                 }
3622         }
3623         ++fs_info->scrub_workers_refcnt;
3624 out:
3625         return ret;
3626 }
3627
3628 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
3629 {
3630         if (--fs_info->scrub_workers_refcnt == 0) {
3631                 btrfs_destroy_workqueue(fs_info->scrub_workers);
3632                 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3633                 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3634         }
3635         WARN_ON(fs_info->scrub_workers_refcnt < 0);
3636 }
3637
3638 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3639                     u64 end, struct btrfs_scrub_progress *progress,
3640                     int readonly, int is_dev_replace)
3641 {
3642         struct scrub_ctx *sctx;
3643         int ret;
3644         struct btrfs_device *dev;
3645         struct rcu_string *name;
3646
3647         if (btrfs_fs_closing(fs_info))
3648                 return -EINVAL;
3649
3650         if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
3651                 /*
3652                  * in this case scrub is unable to calculate the checksum
3653                  * the way scrub is implemented. Do not handle this
3654                  * situation at all because it won't ever happen.
3655                  */
3656                 btrfs_err(fs_info,
3657                            "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3658                        fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
3659                 return -EINVAL;
3660         }
3661
3662         if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
3663                 /* not supported for data w/o checksums */
3664                 btrfs_err(fs_info,
3665                            "scrub: size assumption sectorsize != PAGE_SIZE "
3666                            "(%d != %lu) fails",
3667                        fs_info->chunk_root->sectorsize, PAGE_SIZE);
3668                 return -EINVAL;
3669         }
3670
3671         if (fs_info->chunk_root->nodesize >
3672             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3673             fs_info->chunk_root->sectorsize >
3674             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3675                 /*
3676                  * would exhaust the array bounds of pagev member in
3677                  * struct scrub_block
3678                  */
3679                 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
3680                            "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3681                        fs_info->chunk_root->nodesize,
3682                        SCRUB_MAX_PAGES_PER_BLOCK,
3683                        fs_info->chunk_root->sectorsize,
3684                        SCRUB_MAX_PAGES_PER_BLOCK);
3685                 return -EINVAL;
3686         }
3687
3688
3689         mutex_lock(&fs_info->fs_devices->device_list_mutex);
3690         dev = btrfs_find_device(fs_info, devid, NULL, NULL);
3691         if (!dev || (dev->missing && !is_dev_replace)) {
3692                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3693                 return -ENODEV;
3694         }
3695
3696         if (!is_dev_replace && !readonly && !dev->writeable) {
3697                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3698                 rcu_read_lock();
3699                 name = rcu_dereference(dev->name);
3700                 btrfs_err(fs_info, "scrub: device %s is not writable",
3701                           name->str);
3702                 rcu_read_unlock();
3703                 return -EROFS;
3704         }
3705
3706         mutex_lock(&fs_info->scrub_lock);
3707         if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
3708                 mutex_unlock(&fs_info->scrub_lock);
3709                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3710                 return -EIO;
3711         }
3712
3713         btrfs_dev_replace_lock(&fs_info->dev_replace);
3714         if (dev->scrub_device ||
3715             (!is_dev_replace &&
3716              btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3717                 btrfs_dev_replace_unlock(&fs_info->dev_replace);
3718                 mutex_unlock(&fs_info->scrub_lock);
3719                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3720                 return -EINPROGRESS;
3721         }
3722         btrfs_dev_replace_unlock(&fs_info->dev_replace);
3723
3724         ret = scrub_workers_get(fs_info, is_dev_replace);
3725         if (ret) {
3726                 mutex_unlock(&fs_info->scrub_lock);
3727                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3728                 return ret;
3729         }
3730
3731         sctx = scrub_setup_ctx(dev, is_dev_replace);
3732         if (IS_ERR(sctx)) {
3733                 mutex_unlock(&fs_info->scrub_lock);
3734                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3735                 scrub_workers_put(fs_info);
3736                 return PTR_ERR(sctx);
3737         }
3738         sctx->readonly = readonly;
3739         dev->scrub_device = sctx;
3740         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3741
3742         /*
3743          * checking @scrub_pause_req here, we can avoid
3744          * race between committing transaction and scrubbing.
3745          */
3746         __scrub_blocked_if_needed(fs_info);
3747         atomic_inc(&fs_info->scrubs_running);
3748         mutex_unlock(&fs_info->scrub_lock);
3749
3750         if (!is_dev_replace) {
3751                 /*
3752                  * by holding device list mutex, we can
3753                  * kick off writing super in log tree sync.
3754                  */
3755                 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3756                 ret = scrub_supers(sctx, dev);
3757                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3758         }
3759
3760         if (!ret)
3761                 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3762                                              is_dev_replace);
3763
3764         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3765         atomic_dec(&fs_info->scrubs_running);
3766         wake_up(&fs_info->scrub_pause_wait);
3767
3768         wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3769
3770         if (progress)
3771                 memcpy(progress, &sctx->stat, sizeof(*progress));
3772
3773         mutex_lock(&fs_info->scrub_lock);
3774         dev->scrub_device = NULL;
3775         scrub_workers_put(fs_info);
3776         mutex_unlock(&fs_info->scrub_lock);
3777
3778         scrub_free_ctx(sctx);
3779
3780         return ret;
3781 }
3782
3783 void btrfs_scrub_pause(struct btrfs_root *root)
3784 {
3785         struct btrfs_fs_info *fs_info = root->fs_info;
3786
3787         mutex_lock(&fs_info->scrub_lock);
3788         atomic_inc(&fs_info->scrub_pause_req);
3789         while (atomic_read(&fs_info->scrubs_paused) !=
3790                atomic_read(&fs_info->scrubs_running)) {
3791                 mutex_unlock(&fs_info->scrub_lock);
3792                 wait_event(fs_info->scrub_pause_wait,
3793                            atomic_read(&fs_info->scrubs_paused) ==
3794                            atomic_read(&fs_info->scrubs_running));
3795                 mutex_lock(&fs_info->scrub_lock);
3796         }
3797         mutex_unlock(&fs_info->scrub_lock);
3798 }
3799
3800 void btrfs_scrub_continue(struct btrfs_root *root)
3801 {
3802         struct btrfs_fs_info *fs_info = root->fs_info;
3803
3804         atomic_dec(&fs_info->scrub_pause_req);
3805         wake_up(&fs_info->scrub_pause_wait);
3806 }
3807
3808 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3809 {
3810         mutex_lock(&fs_info->scrub_lock);
3811         if (!atomic_read(&fs_info->scrubs_running)) {
3812                 mutex_unlock(&fs_info->scrub_lock);
3813                 return -ENOTCONN;
3814         }
3815
3816         atomic_inc(&fs_info->scrub_cancel_req);
3817         while (atomic_read(&fs_info->scrubs_running)) {
3818                 mutex_unlock(&fs_info->scrub_lock);
3819                 wait_event(fs_info->scrub_pause_wait,
3820                            atomic_read(&fs_info->scrubs_running) == 0);
3821                 mutex_lock(&fs_info->scrub_lock);
3822         }
3823         atomic_dec(&fs_info->scrub_cancel_req);
3824         mutex_unlock(&fs_info->scrub_lock);
3825
3826         return 0;
3827 }
3828
3829 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3830                            struct btrfs_device *dev)
3831 {
3832         struct scrub_ctx *sctx;
3833
3834         mutex_lock(&fs_info->scrub_lock);
3835         sctx = dev->scrub_device;
3836         if (!sctx) {
3837                 mutex_unlock(&fs_info->scrub_lock);
3838                 return -ENOTCONN;
3839         }
3840         atomic_inc(&sctx->cancel_req);
3841         while (dev->scrub_device) {
3842                 mutex_unlock(&fs_info->scrub_lock);
3843                 wait_event(fs_info->scrub_pause_wait,
3844                            dev->scrub_device == NULL);
3845                 mutex_lock(&fs_info->scrub_lock);
3846         }
3847         mutex_unlock(&fs_info->scrub_lock);
3848
3849         return 0;
3850 }
3851
3852 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
3853                          struct btrfs_scrub_progress *progress)
3854 {
3855         struct btrfs_device *dev;
3856         struct scrub_ctx *sctx = NULL;
3857
3858         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3859         dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
3860         if (dev)
3861                 sctx = dev->scrub_device;
3862         if (sctx)
3863                 memcpy(progress, &sctx->stat, sizeof(*progress));
3864         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3865
3866         return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3867 }
3868
3869 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
3870                                u64 extent_logical, u64 extent_len,
3871                                u64 *extent_physical,
3872                                struct btrfs_device **extent_dev,
3873                                int *extent_mirror_num)
3874 {
3875         u64 mapped_length;
3876         struct btrfs_bio *bbio = NULL;
3877         int ret;
3878
3879         mapped_length = extent_len;
3880         ret = btrfs_map_block(fs_info, READ, extent_logical,
3881                               &mapped_length, &bbio, 0);
3882         if (ret || !bbio || mapped_length < extent_len ||
3883             !bbio->stripes[0].dev->bdev) {
3884                 kfree(bbio);
3885                 return;
3886         }
3887
3888         *extent_physical = bbio->stripes[0].physical;
3889         *extent_mirror_num = bbio->mirror_num;
3890         *extent_dev = bbio->stripes[0].dev;
3891         kfree(bbio);
3892 }
3893
3894 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
3895                               struct scrub_wr_ctx *wr_ctx,
3896                               struct btrfs_fs_info *fs_info,
3897                               struct btrfs_device *dev,
3898                               int is_dev_replace)
3899 {
3900         WARN_ON(wr_ctx->wr_curr_bio != NULL);
3901
3902         mutex_init(&wr_ctx->wr_lock);
3903         wr_ctx->wr_curr_bio = NULL;
3904         if (!is_dev_replace)
3905                 return 0;
3906
3907         WARN_ON(!dev->bdev);
3908         wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
3909                                          bio_get_nr_vecs(dev->bdev));
3910         wr_ctx->tgtdev = dev;
3911         atomic_set(&wr_ctx->flush_all_writes, 0);
3912         return 0;
3913 }
3914
3915 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
3916 {
3917         mutex_lock(&wr_ctx->wr_lock);
3918         kfree(wr_ctx->wr_curr_bio);
3919         wr_ctx->wr_curr_bio = NULL;
3920         mutex_unlock(&wr_ctx->wr_lock);
3921 }
3922
3923 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
3924                             int mirror_num, u64 physical_for_dev_replace)
3925 {
3926         struct scrub_copy_nocow_ctx *nocow_ctx;
3927         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
3928
3929         nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
3930         if (!nocow_ctx) {
3931                 spin_lock(&sctx->stat_lock);
3932                 sctx->stat.malloc_errors++;
3933                 spin_unlock(&sctx->stat_lock);
3934                 return -ENOMEM;
3935         }
3936
3937         scrub_pending_trans_workers_inc(sctx);
3938
3939         nocow_ctx->sctx = sctx;
3940         nocow_ctx->logical = logical;
3941         nocow_ctx->len = len;
3942         nocow_ctx->mirror_num = mirror_num;
3943         nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
3944         btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
3945                         copy_nocow_pages_worker, NULL, NULL);
3946         INIT_LIST_HEAD(&nocow_ctx->inodes);
3947         btrfs_queue_work(fs_info->scrub_nocow_workers,
3948                          &nocow_ctx->work);
3949
3950         return 0;
3951 }
3952
3953 static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
3954 {
3955         struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
3956         struct scrub_nocow_inode *nocow_inode;
3957
3958         nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
3959         if (!nocow_inode)
3960                 return -ENOMEM;
3961         nocow_inode->inum = inum;
3962         nocow_inode->offset = offset;
3963         nocow_inode->root = root;
3964         list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
3965         return 0;
3966 }
3967
3968 #define COPY_COMPLETE 1
3969
3970 static void copy_nocow_pages_worker(struct btrfs_work *work)
3971 {
3972         struct scrub_copy_nocow_ctx *nocow_ctx =
3973                 container_of(work, struct scrub_copy_nocow_ctx, work);
3974         struct scrub_ctx *sctx = nocow_ctx->sctx;
3975         u64 logical = nocow_ctx->logical;
3976         u64 len = nocow_ctx->len;
3977         int mirror_num = nocow_ctx->mirror_num;
3978         u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3979         int ret;
3980         struct btrfs_trans_handle *trans = NULL;
3981         struct btrfs_fs_info *fs_info;
3982         struct btrfs_path *path;
3983         struct btrfs_root *root;
3984         int not_written = 0;
3985
3986         fs_info = sctx->dev_root->fs_info;
3987         root = fs_info->extent_root;
3988
3989         path = btrfs_alloc_path();
3990         if (!path) {
3991                 spin_lock(&sctx->stat_lock);
3992                 sctx->stat.malloc_errors++;
3993                 spin_unlock(&sctx->stat_lock);
3994                 not_written = 1;
3995                 goto out;
3996         }
3997
3998         trans = btrfs_join_transaction(root);
3999         if (IS_ERR(trans)) {
4000                 not_written = 1;
4001                 goto out;
4002         }
4003
4004         ret = iterate_inodes_from_logical(logical, fs_info, path,
4005                                           record_inode_for_nocow, nocow_ctx);
4006         if (ret != 0 && ret != -ENOENT) {
4007                 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
4008                         "phys %llu, len %llu, mir %u, ret %d",
4009                         logical, physical_for_dev_replace, len, mirror_num,
4010                         ret);
4011                 not_written = 1;
4012                 goto out;
4013         }
4014
4015         btrfs_end_transaction(trans, root);
4016         trans = NULL;
4017         while (!list_empty(&nocow_ctx->inodes)) {
4018                 struct scrub_nocow_inode *entry;
4019                 entry = list_first_entry(&nocow_ctx->inodes,
4020                                          struct scrub_nocow_inode,
4021                                          list);
4022                 list_del_init(&entry->list);
4023                 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4024                                                  entry->root, nocow_ctx);
4025                 kfree(entry);
4026                 if (ret == COPY_COMPLETE) {
4027                         ret = 0;
4028                         break;
4029                 } else if (ret) {
4030                         break;
4031                 }
4032         }
4033 out:
4034         while (!list_empty(&nocow_ctx->inodes)) {
4035                 struct scrub_nocow_inode *entry;
4036                 entry = list_first_entry(&nocow_ctx->inodes,
4037                                          struct scrub_nocow_inode,
4038                                          list);
4039                 list_del_init(&entry->list);
4040                 kfree(entry);
4041         }
4042         if (trans && !IS_ERR(trans))
4043                 btrfs_end_transaction(trans, root);
4044         if (not_written)
4045                 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4046                                             num_uncorrectable_read_errors);
4047
4048         btrfs_free_path(path);
4049         kfree(nocow_ctx);
4050
4051         scrub_pending_trans_workers_dec(sctx);
4052 }
4053
4054 static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
4055                                  u64 logical)
4056 {
4057         struct extent_state *cached_state = NULL;
4058         struct btrfs_ordered_extent *ordered;
4059         struct extent_io_tree *io_tree;
4060         struct extent_map *em;
4061         u64 lockstart = start, lockend = start + len - 1;
4062         int ret = 0;
4063
4064         io_tree = &BTRFS_I(inode)->io_tree;
4065
4066         lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
4067         ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4068         if (ordered) {
4069                 btrfs_put_ordered_extent(ordered);
4070                 ret = 1;
4071                 goto out_unlock;
4072         }
4073
4074         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4075         if (IS_ERR(em)) {
4076                 ret = PTR_ERR(em);
4077                 goto out_unlock;
4078         }
4079
4080         /*
4081          * This extent does not actually cover the logical extent anymore,
4082          * move on to the next inode.
4083          */
4084         if (em->block_start > logical ||
4085             em->block_start + em->block_len < logical + len) {
4086                 free_extent_map(em);
4087                 ret = 1;
4088                 goto out_unlock;
4089         }
4090         free_extent_map(em);
4091
4092 out_unlock:
4093         unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4094                              GFP_NOFS);
4095         return ret;
4096 }
4097
4098 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4099                                       struct scrub_copy_nocow_ctx *nocow_ctx)
4100 {
4101         struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
4102         struct btrfs_key key;
4103         struct inode *inode;
4104         struct page *page;
4105         struct btrfs_root *local_root;
4106         struct extent_io_tree *io_tree;
4107         u64 physical_for_dev_replace;
4108         u64 nocow_ctx_logical;
4109         u64 len = nocow_ctx->len;
4110         unsigned long index;
4111         int srcu_index;
4112         int ret = 0;
4113         int err = 0;
4114
4115         key.objectid = root;
4116         key.type = BTRFS_ROOT_ITEM_KEY;
4117         key.offset = (u64)-1;
4118
4119         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4120
4121         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
4122         if (IS_ERR(local_root)) {
4123                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4124                 return PTR_ERR(local_root);
4125         }
4126
4127         key.type = BTRFS_INODE_ITEM_KEY;
4128         key.objectid = inum;
4129         key.offset = 0;
4130         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
4131         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4132         if (IS_ERR(inode))
4133                 return PTR_ERR(inode);
4134
4135         /* Avoid truncate/dio/punch hole.. */
4136         mutex_lock(&inode->i_mutex);
4137         inode_dio_wait(inode);
4138
4139         physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4140         io_tree = &BTRFS_I(inode)->io_tree;
4141         nocow_ctx_logical = nocow_ctx->logical;
4142
4143         ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
4144         if (ret) {
4145                 ret = ret > 0 ? 0 : ret;
4146                 goto out;
4147         }
4148
4149         while (len >= PAGE_CACHE_SIZE) {
4150                 index = offset >> PAGE_CACHE_SHIFT;
4151 again:
4152                 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4153                 if (!page) {
4154                         btrfs_err(fs_info, "find_or_create_page() failed");
4155                         ret = -ENOMEM;
4156                         goto out;
4157                 }
4158
4159                 if (PageUptodate(page)) {
4160                         if (PageDirty(page))
4161                                 goto next_page;
4162                 } else {
4163                         ClearPageError(page);
4164                         err = extent_read_full_page(io_tree, page,
4165                                                            btrfs_get_extent,
4166                                                            nocow_ctx->mirror_num);
4167                         if (err) {
4168                                 ret = err;
4169                                 goto next_page;
4170                         }
4171
4172                         lock_page(page);
4173                         /*
4174                          * If the page has been remove from the page cache,
4175                          * the data on it is meaningless, because it may be
4176                          * old one, the new data may be written into the new
4177                          * page in the page cache.
4178                          */
4179                         if (page->mapping != inode->i_mapping) {
4180                                 unlock_page(page);
4181                                 page_cache_release(page);
4182                                 goto again;
4183                         }
4184                         if (!PageUptodate(page)) {
4185                                 ret = -EIO;
4186                                 goto next_page;
4187                         }
4188                 }
4189
4190                 ret = check_extent_to_block(inode, offset, len,
4191                                             nocow_ctx_logical);
4192                 if (ret) {
4193                         ret = ret > 0 ? 0 : ret;
4194                         goto next_page;
4195                 }
4196
4197                 err = write_page_nocow(nocow_ctx->sctx,
4198                                        physical_for_dev_replace, page);
4199                 if (err)
4200                         ret = err;
4201 next_page:
4202                 unlock_page(page);
4203                 page_cache_release(page);
4204
4205                 if (ret)
4206                         break;
4207
4208                 offset += PAGE_CACHE_SIZE;
4209                 physical_for_dev_replace += PAGE_CACHE_SIZE;
4210                 nocow_ctx_logical += PAGE_CACHE_SIZE;
4211                 len -= PAGE_CACHE_SIZE;
4212         }
4213         ret = COPY_COMPLETE;
4214 out:
4215         mutex_unlock(&inode->i_mutex);
4216         iput(inode);
4217         return ret;
4218 }
4219
4220 static int write_page_nocow(struct scrub_ctx *sctx,
4221                             u64 physical_for_dev_replace, struct page *page)
4222 {
4223         struct bio *bio;
4224         struct btrfs_device *dev;
4225         int ret;
4226
4227         dev = sctx->wr_ctx.tgtdev;
4228         if (!dev)
4229                 return -EIO;
4230         if (!dev->bdev) {
4231                 printk_ratelimited(KERN_WARNING
4232                         "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
4233                 return -EIO;
4234         }
4235         bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
4236         if (!bio) {
4237                 spin_lock(&sctx->stat_lock);
4238                 sctx->stat.malloc_errors++;
4239                 spin_unlock(&sctx->stat_lock);
4240                 return -ENOMEM;
4241         }
4242         bio->bi_iter.bi_size = 0;
4243         bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4244         bio->bi_bdev = dev->bdev;
4245         ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
4246         if (ret != PAGE_CACHE_SIZE) {
4247 leave_with_eio:
4248                 bio_put(bio);
4249                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4250                 return -EIO;
4251         }
4252
4253         if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
4254                 goto leave_with_eio;
4255
4256         bio_put(bio);
4257         return 0;
4258 }