Btrfs: fix compile warnings on on avr32 platform
[linux.git] / fs / btrfs / scrub.c
index 1fd3f33c330abe930fbd03de1deb5968e32fd7b5..68a5a26997bf57caebe9efc3a2d26da4a773c77e 100644 (file)
@@ -256,6 +256,8 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
                            int mirror_num, u64 physical_for_dev_replace);
 static void copy_nocow_pages_worker(struct btrfs_work *work);
+static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
+static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
 
 
 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
@@ -269,6 +271,29 @@ static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
        wake_up(&sctx->list_wait);
 }
 
+static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
+{
+       while (atomic_read(&fs_info->scrub_pause_req)) {
+               mutex_unlock(&fs_info->scrub_lock);
+               wait_event(fs_info->scrub_pause_wait,
+                  atomic_read(&fs_info->scrub_pause_req) == 0);
+               mutex_lock(&fs_info->scrub_lock);
+       }
+}
+
+static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
+{
+       atomic_inc(&fs_info->scrubs_paused);
+       wake_up(&fs_info->scrub_pause_wait);
+
+       mutex_lock(&fs_info->scrub_lock);
+       __scrub_blocked_if_needed(fs_info);
+       atomic_dec(&fs_info->scrubs_paused);
+       mutex_unlock(&fs_info->scrub_lock);
+
+       wake_up(&fs_info->scrub_pause_wait);
+}
+
 /*
  * used for workers that require transaction commits (i.e., for the
  * NOCOW case)
@@ -290,6 +315,16 @@ static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
        atomic_inc(&fs_info->scrubs_running);
        atomic_inc(&fs_info->scrubs_paused);
        mutex_unlock(&fs_info->scrub_lock);
+
+       /*
+        * check if @scrubs_running=@scrubs_paused condition
+        * inside wait_event() is not an atomic operation.
+        * which means we may inc/dec @scrub_running/paused
+        * at any time. Let's wake up @scrub_pause_wait as
+        * much as we can to let commit transaction blocked less.
+        */
+       wake_up(&fs_info->scrub_pause_wait);
+
        atomic_inc(&sctx->workers_pending);
 }
 
@@ -393,7 +428,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
                sbio->index = i;
                sbio->sctx = sctx;
                sbio->page_count = 0;
-               sbio->work.func = scrub_bio_end_io_worker;
+               btrfs_init_work(&sbio->work, scrub_bio_end_io_worker,
+                               NULL, NULL);
 
                if (i != SCRUB_BIOS_PER_SCTX - 1)
                        sctx->bios[i]->next_free = i + 1;
@@ -480,7 +516,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
         * hold all of the paths here
         */
        for (i = 0; i < ipath->fspath->elem_cnt; ++i)
-               printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
+               printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
                        "%s, sector %llu, root %llu, inode %llu, offset %llu, "
                        "length %llu, links %u (path: %s)\n", swarn->errstr,
                        swarn->logical, rcu_str_deref(swarn->dev->name),
@@ -492,7 +528,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
        return 0;
 
 err:
-       printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
+       printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
                "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
                "resolving failed with ret=%d\n", swarn->errstr,
                swarn->logical, rcu_str_deref(swarn->dev->name),
@@ -555,7 +591,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
                        ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
                                                        &ref_root, &ref_level);
                        printk_in_rcu(KERN_WARNING
-                               "btrfs: %s at logical %llu on dev %s, "
+                               "BTRFS: %s at logical %llu on dev %s, "
                                "sector %llu: metadata %s (level %d) in tree "
                                "%llu\n", errstr, swarn.logical,
                                rcu_str_deref(dev->name),
@@ -704,13 +740,11 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
        struct scrub_fixup_nodatasum *fixup;
        struct scrub_ctx *sctx;
        struct btrfs_trans_handle *trans = NULL;
-       struct btrfs_fs_info *fs_info;
        struct btrfs_path *path;
        int uncorrectable = 0;
 
        fixup = container_of(work, struct scrub_fixup_nodatasum, work);
        sctx = fixup->sctx;
-       fs_info = fixup->root->fs_info;
 
        path = btrfs_alloc_path();
        if (!path) {
@@ -759,8 +793,8 @@ out:
                btrfs_dev_replace_stats_inc(
                        &sctx->dev_root->fs_info->dev_replace.
                        num_uncorrectable_read_errors);
-               printk_ratelimited_in_rcu(KERN_ERR
-                       "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
+               printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
+                   "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
                        fixup->logical, rcu_str_deref(fixup->dev->name));
        }
 
@@ -964,9 +998,10 @@ nodatasum_case:
                fixup_nodatasum->root = fs_info->extent_root;
                fixup_nodatasum->mirror_num = failed_mirror_index + 1;
                scrub_pending_trans_workers_inc(sctx);
-               fixup_nodatasum->work.func = scrub_fixup_nodatasum;
-               btrfs_queue_worker(&fs_info->scrub_workers,
-                                  &fixup_nodatasum->work);
+               btrfs_init_work(&fixup_nodatasum->work, scrub_fixup_nodatasum,
+                               NULL, NULL);
+               btrfs_queue_work(fs_info->scrub_workers,
+                                &fixup_nodatasum->work);
                goto out;
        }
 
@@ -1161,7 +1196,7 @@ corrected_error:
                        sctx->stat.corrected_errors++;
                        spin_unlock(&sctx->stat_lock);
                        printk_ratelimited_in_rcu(KERN_ERR
-                               "btrfs: fixed up error at logical %llu on dev %s\n",
+                               "BTRFS: fixed up error at logical %llu on dev %s\n",
                                logical, rcu_str_deref(dev->name));
                }
        } else {
@@ -1170,7 +1205,7 @@ did_not_correct_error:
                sctx->stat.uncorrectable_errors++;
                spin_unlock(&sctx->stat_lock);
                printk_ratelimited_in_rcu(KERN_ERR
-                       "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
+                       "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
                        logical, rcu_str_deref(dev->name));
        }
 
@@ -1418,8 +1453,9 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
                int ret;
 
                if (!page_bad->dev->bdev) {
-                       printk_ratelimited(KERN_WARNING
-                               "btrfs: scrub_repair_page_from_good_copy(bdev == NULL) is unexpected!\n");
+                       printk_ratelimited(KERN_WARNING "BTRFS: "
+                               "scrub_repair_page_from_good_copy(bdev == NULL) "
+                               "is unexpected!\n");
                        return -EIO;
                }
 
@@ -1579,8 +1615,8 @@ static void scrub_wr_bio_end_io(struct bio *bio, int err)
        sbio->err = err;
        sbio->bio = bio;
 
-       sbio->work.func = scrub_wr_bio_end_io_worker;
-       btrfs_queue_worker(&fs_info->scrub_wr_completion_workers, &sbio->work);
+       btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
+       btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
 }
 
 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
@@ -1877,7 +1913,7 @@ static void scrub_submit(struct scrub_ctx *sctx)
                 * This case is handled correctly (but _very_ slowly).
                 */
                printk_ratelimited(KERN_WARNING
-                       "btrfs: scrub_submit(bio bdev == NULL) is unexpected!\n");
+                       "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
                bio_endio(sbio->bio, -EIO);
        } else {
                btrfsic_submit_bio(READ, sbio->bio);
@@ -2048,7 +2084,7 @@ static void scrub_bio_end_io(struct bio *bio, int err)
        sbio->err = err;
        sbio->bio = bio;
 
-       btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
+       btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
 }
 
 static void scrub_bio_end_io_worker(struct btrfs_work *work)
@@ -2199,6 +2235,47 @@ behind_scrub_pages:
        return 0;
 }
 
+/*
+ * Given a physical address, this will calculate it's
+ * logical offset. if this is a parity stripe, it will return
+ * the most left data stripe's logical offset.
+ *
+ * return 0 if it is a data stripe, 1 means parity stripe.
+ */
+static int get_raid56_logic_offset(u64 physical, int num,
+                                  struct map_lookup *map, u64 *offset)
+{
+       int i;
+       int j = 0;
+       u64 stripe_nr;
+       u64 last_offset;
+       int stripe_index;
+       int rot;
+
+       last_offset = (physical - map->stripes[num].physical) *
+                     nr_data_stripes(map);
+       *offset = last_offset;
+       for (i = 0; i < nr_data_stripes(map); i++) {
+               *offset = last_offset + i * map->stripe_len;
+
+               stripe_nr = *offset;
+               do_div(stripe_nr, map->stripe_len);
+               do_div(stripe_nr, nr_data_stripes(map));
+
+               /* Work out the disk rotation on this stripe-set */
+               rot = do_div(stripe_nr, map->num_stripes);
+               /* calculate which stripe this data locates */
+               rot += i;
+               stripe_index = rot % map->num_stripes;
+               if (stripe_index == num)
+                       return 0;
+               if (stripe_index < num)
+                       j++;
+       }
+       *offset = last_offset + j * map->stripe_len;
+       return 1;
+}
+
 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
                                           struct map_lookup *map,
                                           struct btrfs_device *scrub_dev,
@@ -2220,6 +2297,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        u64 physical;
        u64 logical;
        u64 logic_end;
+       u64 physical_end;
        u64 generation;
        int mirror_num;
        struct reada_control *reada1;
@@ -2233,16 +2311,10 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        u64 extent_len;
        struct btrfs_device *extent_dev;
        int extent_mirror_num;
-       int stop_loop;
-
-       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                        BTRFS_BLOCK_GROUP_RAID6)) {
-               if (num >= nr_data_stripes(map)) {
-                       return 0;
-               }
-       }
+       int stop_loop = 0;
 
        nstripes = length;
+       physical = map->stripes[num].physical;
        offset = 0;
        do_div(nstripes, map->stripe_len);
        if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
@@ -2260,6 +2332,11 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
                increment = map->stripe_len;
                mirror_num = num % map->num_stripes + 1;
+       } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+                               BTRFS_BLOCK_GROUP_RAID6)) {
+               get_raid56_logic_offset(physical, num, map, &offset);
+               increment = map->stripe_len * nr_data_stripes(map);
+               mirror_num = 1;
        } else {
                increment = map->stripe_len;
                mirror_num = 1;
@@ -2283,17 +2360,24 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
         * to not hold off transaction commits
         */
        logical = base + offset;
-
+       physical_end = physical + nstripes * map->stripe_len;
+       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+                        BTRFS_BLOCK_GROUP_RAID6)) {
+               get_raid56_logic_offset(physical_end, num,
+                                       map, &logic_end);
+               logic_end += base;
+       } else {
+               logic_end = logical + increment * nstripes;
+       }
        wait_event(sctx->list_wait,
                   atomic_read(&sctx->bios_in_flight) == 0);
-       atomic_inc(&fs_info->scrubs_paused);
-       wake_up(&fs_info->scrub_pause_wait);
+       scrub_blocked_if_needed(fs_info);
 
        /* FIXME it might be better to start readahead at commit root */
        key_start.objectid = logical;
        key_start.type = BTRFS_EXTENT_ITEM_KEY;
        key_start.offset = (u64)0;
-       key_end.objectid = base + offset + nstripes * increment;
+       key_end.objectid = logic_end;
        key_end.type = BTRFS_METADATA_ITEM_KEY;
        key_end.offset = (u64)-1;
        reada1 = btrfs_reada_add(root, &key_start, &key_end);
@@ -2303,7 +2387,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        key_start.offset = logical;
        key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
        key_end.type = BTRFS_EXTENT_CSUM_KEY;
-       key_end.offset = base + offset + nstripes * increment;
+       key_end.offset = logic_end;
        reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
 
        if (!IS_ERR(reada1))
@@ -2311,16 +2395,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        if (!IS_ERR(reada2))
                btrfs_reada_wait(reada2);
 
-       mutex_lock(&fs_info->scrub_lock);
-       while (atomic_read(&fs_info->scrub_pause_req)) {
-               mutex_unlock(&fs_info->scrub_lock);
-               wait_event(fs_info->scrub_pause_wait,
-                  atomic_read(&fs_info->scrub_pause_req) == 0);
-               mutex_lock(&fs_info->scrub_lock);
-       }
-       atomic_dec(&fs_info->scrubs_paused);
-       mutex_unlock(&fs_info->scrub_lock);
-       wake_up(&fs_info->scrub_pause_wait);
 
        /*
         * collect all data csums for the stripe to avoid seeking during
@@ -2331,11 +2405,17 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        /*
         * now find all extents for each stripe and scrub them
         */
-       logical = base + offset;
-       physical = map->stripes[num].physical;
-       logic_end = logical + increment * nstripes;
        ret = 0;
-       while (logical < logic_end) {
+       while (physical < physical_end) {
+               /* for raid56, we skip parity stripe */
+               if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+                               BTRFS_BLOCK_GROUP_RAID6)) {
+                       ret = get_raid56_logic_offset(physical, num,
+                                       map, &logical);
+                       logical += base;
+                       if (ret)
+                               goto skip;
+               }
                /*
                 * canceled?
                 */
@@ -2357,22 +2437,14 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
                        wait_event(sctx->list_wait,
                                   atomic_read(&sctx->bios_in_flight) == 0);
                        atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
-                       atomic_inc(&fs_info->scrubs_paused);
-                       wake_up(&fs_info->scrub_pause_wait);
-                       mutex_lock(&fs_info->scrub_lock);
-                       while (atomic_read(&fs_info->scrub_pause_req)) {
-                               mutex_unlock(&fs_info->scrub_lock);
-                               wait_event(fs_info->scrub_pause_wait,
-                                  atomic_read(&fs_info->scrub_pause_req) == 0);
-                               mutex_lock(&fs_info->scrub_lock);
-                       }
-                       atomic_dec(&fs_info->scrubs_paused);
-                       mutex_unlock(&fs_info->scrub_lock);
-                       wake_up(&fs_info->scrub_pause_wait);
+                       scrub_blocked_if_needed(fs_info);
                }
 
+               if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
+                       key.type = BTRFS_METADATA_ITEM_KEY;
+               else
+                       key.type = BTRFS_EXTENT_ITEM_KEY;
                key.objectid = logical;
-               key.type = BTRFS_EXTENT_ITEM_KEY;
                key.offset = (u64)-1;
 
                ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -2380,8 +2452,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
                        goto out;
 
                if (ret > 0) {
-                       ret = btrfs_previous_item(root, path, 0,
-                                                 BTRFS_EXTENT_ITEM_KEY);
+                       ret = btrfs_previous_extent_item(root, path, 0);
                        if (ret < 0)
                                goto out;
                        if (ret > 0) {
@@ -2439,9 +2510,9 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 
                        if (key.objectid < logical &&
                            (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
-                               printk(KERN_ERR
-                                      "btrfs scrub: tree block %llu spanning "
-                                      "stripes, ignored. logical=%llu\n",
+                               btrfs_err(fs_info,
+                                          "scrub: tree block %llu spanning "
+                                          "stripes, ignored. logical=%llu",
                                       key.objectid, logical);
                                goto next;
                        }
@@ -2488,15 +2559,29 @@ again:
                        scrub_free_csums(sctx);
                        if (extent_logical + extent_len <
                            key.objectid + bytes) {
-                               logical += increment;
-                               physical += map->stripe_len;
-
+                               if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+                                       BTRFS_BLOCK_GROUP_RAID6)) {
+                                       /*
+                                        * loop until we find next data stripe
+                                        * or we have finished all stripes.
+                                        */
+                                       do {
+                                               physical += map->stripe_len;
+                                               ret = get_raid56_logic_offset(
+                                                               physical, num,
+                                                               map, &logical);
+                                               logical += base;
+                                       } while (physical < physical_end && ret);
+                               } else {
+                                       physical += map->stripe_len;
+                                       logical += increment;
+                               }
                                if (logical < key.objectid + bytes) {
                                        cond_resched();
                                        goto again;
                                }
 
-                               if (logical >= logic_end) {
+                               if (physical >= physical_end) {
                                        stop_loop = 1;
                                        break;
                                }
@@ -2505,6 +2590,7 @@ next:
                        path->slots[0]++;
                }
                btrfs_release_path(path);
+skip:
                logical += increment;
                physical += map->stripe_len;
                spin_lock(&sctx->stat_lock);
@@ -2682,19 +2768,20 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 
                wait_event(sctx->list_wait,
                           atomic_read(&sctx->bios_in_flight) == 0);
-               atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
                atomic_inc(&fs_info->scrubs_paused);
                wake_up(&fs_info->scrub_pause_wait);
+
+               /*
+                * must be called before we decrease @scrub_paused.
+                * make sure we don't block transaction commit while
+                * we are waiting pending workers finished.
+                */
                wait_event(sctx->list_wait,
                           atomic_read(&sctx->workers_pending) == 0);
+               atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
 
                mutex_lock(&fs_info->scrub_lock);
-               while (atomic_read(&fs_info->scrub_pause_req)) {
-                       mutex_unlock(&fs_info->scrub_lock);
-                       wait_event(fs_info->scrub_pause_wait,
-                          atomic_read(&fs_info->scrub_pause_req) == 0);
-                       mutex_lock(&fs_info->scrub_lock);
-               }
+               __scrub_blocked_if_needed(fs_info);
                atomic_dec(&fs_info->scrubs_paused);
                mutex_unlock(&fs_info->scrub_lock);
                wake_up(&fs_info->scrub_pause_wait);
@@ -2765,33 +2852,35 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
                                                int is_dev_replace)
 {
        int ret = 0;
+       int flags = WQ_FREEZABLE | WQ_UNBOUND;
+       int max_active = fs_info->thread_pool_size;
 
        if (fs_info->scrub_workers_refcnt == 0) {
                if (is_dev_replace)
-                       btrfs_init_workers(&fs_info->scrub_workers, "scrub", 1,
-                                       &fs_info->generic_worker);
+                       fs_info->scrub_workers =
+                               btrfs_alloc_workqueue("btrfs-scrub", flags,
+                                                     1, 4);
                else
-                       btrfs_init_workers(&fs_info->scrub_workers, "scrub",
-                                       fs_info->thread_pool_size,
-                                       &fs_info->generic_worker);
-               fs_info->scrub_workers.idle_thresh = 4;
-               ret = btrfs_start_workers(&fs_info->scrub_workers);
-               if (ret)
+                       fs_info->scrub_workers =
+                               btrfs_alloc_workqueue("btrfs-scrub", flags,
+                                                     max_active, 4);
+               if (!fs_info->scrub_workers) {
+                       ret = -ENOMEM;
                        goto out;
-               btrfs_init_workers(&fs_info->scrub_wr_completion_workers,
-                                  "scrubwrc",
-                                  fs_info->thread_pool_size,
-                                  &fs_info->generic_worker);
-               fs_info->scrub_wr_completion_workers.idle_thresh = 2;
-               ret = btrfs_start_workers(
-                               &fs_info->scrub_wr_completion_workers);
-               if (ret)
+               }
+               fs_info->scrub_wr_completion_workers =
+                       btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
+                                             max_active, 2);
+               if (!fs_info->scrub_wr_completion_workers) {
+                       ret = -ENOMEM;
                        goto out;
-               btrfs_init_workers(&fs_info->scrub_nocow_workers, "scrubnc", 1,
-                                  &fs_info->generic_worker);
-               ret = btrfs_start_workers(&fs_info->scrub_nocow_workers);
-               if (ret)
+               }
+               fs_info->scrub_nocow_workers =
+                       btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
+               if (!fs_info->scrub_nocow_workers) {
+                       ret = -ENOMEM;
                        goto out;
+               }
        }
        ++fs_info->scrub_workers_refcnt;
 out:
@@ -2801,9 +2890,9 @@ out:
 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
 {
        if (--fs_info->scrub_workers_refcnt == 0) {
-               btrfs_stop_workers(&fs_info->scrub_workers);
-               btrfs_stop_workers(&fs_info->scrub_wr_completion_workers);
-               btrfs_stop_workers(&fs_info->scrub_nocow_workers);
+               btrfs_destroy_workqueue(fs_info->scrub_workers);
+               btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
+               btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
        }
        WARN_ON(fs_info->scrub_workers_refcnt < 0);
 }
@@ -2823,8 +2912,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
         * check some assumptions
         */
        if (fs_info->chunk_root->nodesize != fs_info->chunk_root->leafsize) {
-               printk(KERN_ERR
-                      "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
+               btrfs_err(fs_info,
+                          "scrub: size assumption nodesize == leafsize (%d == %d) fails",
                       fs_info->chunk_root->nodesize,
                       fs_info->chunk_root->leafsize);
                return -EINVAL;
@@ -2836,16 +2925,17 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
                 * the way scrub is implemented. Do not handle this
                 * situation at all because it won't ever happen.
                 */
-               printk(KERN_ERR
-                      "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
+               btrfs_err(fs_info,
+                          "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
                       fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
                return -EINVAL;
        }
 
        if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
                /* not supported for data w/o checksums */
-               printk(KERN_ERR
-                      "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails\n",
+               btrfs_err(fs_info,
+                          "scrub: size assumption sectorsize != PAGE_SIZE "
+                          "(%d != %lu) fails",
                       fs_info->chunk_root->sectorsize, PAGE_SIZE);
                return -EINVAL;
        }
@@ -2858,7 +2948,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
                 * would exhaust the array bounds of pagev member in
                 * struct scrub_block
                 */
-               pr_err("btrfs_scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails\n",
+               btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
+                          "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
                       fs_info->chunk_root->nodesize,
                       SCRUB_MAX_PAGES_PER_BLOCK,
                       fs_info->chunk_root->sectorsize,
@@ -2908,7 +2999,13 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
        }
        sctx->readonly = readonly;
        dev->scrub_device = sctx;
+       mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
+       /*
+        * checking @scrub_pause_req here, we can avoid
+        * race between committing transaction and scrubbing.
+        */
+       __scrub_blocked_if_needed(fs_info);
        atomic_inc(&fs_info->scrubs_running);
        mutex_unlock(&fs_info->scrub_lock);
 
@@ -2917,9 +3014,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
                 * by holding device list mutex, we can
                 * kick off writing super in log tree sync.
                 */
+               mutex_lock(&fs_info->fs_devices->device_list_mutex);
                ret = scrub_supers(sctx, dev);
+               mutex_unlock(&fs_info->fs_devices->device_list_mutex);
        }
-       mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
        if (!ret)
                ret = scrub_enumerate_chunks(sctx, dev, start, end,
@@ -3105,10 +3203,10 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
        nocow_ctx->len = len;
        nocow_ctx->mirror_num = mirror_num;
        nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
-       nocow_ctx->work.func = copy_nocow_pages_worker;
+       btrfs_init_work(&nocow_ctx->work, copy_nocow_pages_worker, NULL, NULL);
        INIT_LIST_HEAD(&nocow_ctx->inodes);
-       btrfs_queue_worker(&fs_info->scrub_nocow_workers,
-                          &nocow_ctx->work);
+       btrfs_queue_work(fs_info->scrub_nocow_workers,
+                        &nocow_ctx->work);
 
        return 0;
 }
@@ -3167,7 +3265,8 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
        ret = iterate_inodes_from_logical(logical, fs_info, path,
                                          record_inode_for_nocow, nocow_ctx);
        if (ret != 0 && ret != -ENOENT) {
-               pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d\n",
+               btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
+                       "phys %llu, len %llu, mir %u, ret %d",
                        logical, physical_for_dev_replace, len, mirror_num,
                        ret);
                not_written = 1;
@@ -3289,7 +3388,7 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
 again:
                page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
                if (!page) {
-                       pr_err("find_or_create_page() failed\n");
+                       btrfs_err(fs_info, "find_or_create_page() failed");
                        ret = -ENOMEM;
                        goto out;
                }
@@ -3361,7 +3460,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
                return -EIO;
        if (!dev->bdev) {
                printk_ratelimited(KERN_WARNING
-                       "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
+                       "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
                return -EIO;
        }
        bio = btrfs_io_bio_alloc(GFP_NOFS, 1);