Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include "ctree.h"
27 #include "disk-io.h"
28 #include "transaction.h"
29 #include "locking.h"
30 #include "tree-log.h"
31 #include "inode-map.h"
32 #include "volumes.h"
33 #include "dev-replace.h"
34
35 #define BTRFS_ROOT_TRANS_TAG 0
36
37 static unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
38         [TRANS_STATE_RUNNING]           = 0U,
39         [TRANS_STATE_BLOCKED]           = (__TRANS_USERSPACE |
40                                            __TRANS_START),
41         [TRANS_STATE_COMMIT_START]      = (__TRANS_USERSPACE |
42                                            __TRANS_START |
43                                            __TRANS_ATTACH),
44         [TRANS_STATE_COMMIT_DOING]      = (__TRANS_USERSPACE |
45                                            __TRANS_START |
46                                            __TRANS_ATTACH |
47                                            __TRANS_JOIN),
48         [TRANS_STATE_UNBLOCKED]         = (__TRANS_USERSPACE |
49                                            __TRANS_START |
50                                            __TRANS_ATTACH |
51                                            __TRANS_JOIN |
52                                            __TRANS_JOIN_NOLOCK),
53         [TRANS_STATE_COMPLETED]         = (__TRANS_USERSPACE |
54                                            __TRANS_START |
55                                            __TRANS_ATTACH |
56                                            __TRANS_JOIN |
57                                            __TRANS_JOIN_NOLOCK),
58 };
59
60 void btrfs_put_transaction(struct btrfs_transaction *transaction)
61 {
62         WARN_ON(atomic_read(&transaction->use_count) == 0);
63         if (atomic_dec_and_test(&transaction->use_count)) {
64                 BUG_ON(!list_empty(&transaction->list));
65                 WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
66                 while (!list_empty(&transaction->pending_chunks)) {
67                         struct extent_map *em;
68
69                         em = list_first_entry(&transaction->pending_chunks,
70                                               struct extent_map, list);
71                         list_del_init(&em->list);
72                         free_extent_map(em);
73                 }
74                 kmem_cache_free(btrfs_transaction_cachep, transaction);
75         }
76 }
77
78 static noinline void switch_commit_roots(struct btrfs_transaction *trans,
79                                          struct btrfs_fs_info *fs_info)
80 {
81         struct btrfs_root *root, *tmp;
82
83         down_write(&fs_info->commit_root_sem);
84         list_for_each_entry_safe(root, tmp, &trans->switch_commits,
85                                  dirty_list) {
86                 list_del_init(&root->dirty_list);
87                 free_extent_buffer(root->commit_root);
88                 root->commit_root = btrfs_root_node(root);
89                 if (is_fstree(root->objectid))
90                         btrfs_unpin_free_ino(root);
91         }
92         up_write(&fs_info->commit_root_sem);
93 }
94
95 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
96                                          unsigned int type)
97 {
98         if (type & TRANS_EXTWRITERS)
99                 atomic_inc(&trans->num_extwriters);
100 }
101
102 static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
103                                          unsigned int type)
104 {
105         if (type & TRANS_EXTWRITERS)
106                 atomic_dec(&trans->num_extwriters);
107 }
108
109 static inline void extwriter_counter_init(struct btrfs_transaction *trans,
110                                           unsigned int type)
111 {
112         atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
113 }
114
115 static inline int extwriter_counter_read(struct btrfs_transaction *trans)
116 {
117         return atomic_read(&trans->num_extwriters);
118 }
119
120 /*
121  * either allocate a new transaction or hop into the existing one
122  */
123 static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
124 {
125         struct btrfs_transaction *cur_trans;
126         struct btrfs_fs_info *fs_info = root->fs_info;
127
128         spin_lock(&fs_info->trans_lock);
129 loop:
130         /* The file system has been taken offline. No new transactions. */
131         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
132                 spin_unlock(&fs_info->trans_lock);
133                 return -EROFS;
134         }
135
136         cur_trans = fs_info->running_transaction;
137         if (cur_trans) {
138                 if (cur_trans->aborted) {
139                         spin_unlock(&fs_info->trans_lock);
140                         return cur_trans->aborted;
141                 }
142                 if (btrfs_blocked_trans_types[cur_trans->state] & type) {
143                         spin_unlock(&fs_info->trans_lock);
144                         return -EBUSY;
145                 }
146                 atomic_inc(&cur_trans->use_count);
147                 atomic_inc(&cur_trans->num_writers);
148                 extwriter_counter_inc(cur_trans, type);
149                 spin_unlock(&fs_info->trans_lock);
150                 return 0;
151         }
152         spin_unlock(&fs_info->trans_lock);
153
154         /*
155          * If we are ATTACH, we just want to catch the current transaction,
156          * and commit it. If there is no transaction, just return ENOENT.
157          */
158         if (type == TRANS_ATTACH)
159                 return -ENOENT;
160
161         /*
162          * JOIN_NOLOCK only happens during the transaction commit, so
163          * it is impossible that ->running_transaction is NULL
164          */
165         BUG_ON(type == TRANS_JOIN_NOLOCK);
166
167         cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
168         if (!cur_trans)
169                 return -ENOMEM;
170
171         spin_lock(&fs_info->trans_lock);
172         if (fs_info->running_transaction) {
173                 /*
174                  * someone started a transaction after we unlocked.  Make sure
175                  * to redo the checks above
176                  */
177                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
178                 goto loop;
179         } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
180                 spin_unlock(&fs_info->trans_lock);
181                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
182                 return -EROFS;
183         }
184
185         atomic_set(&cur_trans->num_writers, 1);
186         extwriter_counter_init(cur_trans, type);
187         init_waitqueue_head(&cur_trans->writer_wait);
188         init_waitqueue_head(&cur_trans->commit_wait);
189         cur_trans->state = TRANS_STATE_RUNNING;
190         /*
191          * One for this trans handle, one so it will live on until we
192          * commit the transaction.
193          */
194         atomic_set(&cur_trans->use_count, 2);
195         cur_trans->start_time = get_seconds();
196
197         cur_trans->delayed_refs.href_root = RB_ROOT;
198         atomic_set(&cur_trans->delayed_refs.num_entries, 0);
199         cur_trans->delayed_refs.num_heads_ready = 0;
200         cur_trans->delayed_refs.num_heads = 0;
201         cur_trans->delayed_refs.flushing = 0;
202         cur_trans->delayed_refs.run_delayed_start = 0;
203
204         /*
205          * although the tree mod log is per file system and not per transaction,
206          * the log must never go across transaction boundaries.
207          */
208         smp_mb();
209         if (!list_empty(&fs_info->tree_mod_seq_list))
210                 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when "
211                         "creating a fresh transaction\n");
212         if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
213                 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
214                         "creating a fresh transaction\n");
215         atomic64_set(&fs_info->tree_mod_seq, 0);
216
217         spin_lock_init(&cur_trans->delayed_refs.lock);
218
219         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
220         INIT_LIST_HEAD(&cur_trans->ordered_operations);
221         INIT_LIST_HEAD(&cur_trans->pending_chunks);
222         INIT_LIST_HEAD(&cur_trans->switch_commits);
223         list_add_tail(&cur_trans->list, &fs_info->trans_list);
224         extent_io_tree_init(&cur_trans->dirty_pages,
225                              fs_info->btree_inode->i_mapping);
226         fs_info->generation++;
227         cur_trans->transid = fs_info->generation;
228         fs_info->running_transaction = cur_trans;
229         cur_trans->aborted = 0;
230         spin_unlock(&fs_info->trans_lock);
231
232         return 0;
233 }
234
235 /*
236  * this does all the record keeping required to make sure that a reference
237  * counted root is properly recorded in a given transaction.  This is required
238  * to make sure the old root from before we joined the transaction is deleted
239  * when the transaction commits
240  */
241 static int record_root_in_trans(struct btrfs_trans_handle *trans,
242                                struct btrfs_root *root)
243 {
244         if (root->ref_cows && root->last_trans < trans->transid) {
245                 WARN_ON(root == root->fs_info->extent_root);
246                 WARN_ON(root->commit_root != root->node);
247
248                 /*
249                  * see below for in_trans_setup usage rules
250                  * we have the reloc mutex held now, so there
251                  * is only one writer in this function
252                  */
253                 root->in_trans_setup = 1;
254
255                 /* make sure readers find in_trans_setup before
256                  * they find our root->last_trans update
257                  */
258                 smp_wmb();
259
260                 spin_lock(&root->fs_info->fs_roots_radix_lock);
261                 if (root->last_trans == trans->transid) {
262                         spin_unlock(&root->fs_info->fs_roots_radix_lock);
263                         return 0;
264                 }
265                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
266                            (unsigned long)root->root_key.objectid,
267                            BTRFS_ROOT_TRANS_TAG);
268                 spin_unlock(&root->fs_info->fs_roots_radix_lock);
269                 root->last_trans = trans->transid;
270
271                 /* this is pretty tricky.  We don't want to
272                  * take the relocation lock in btrfs_record_root_in_trans
273                  * unless we're really doing the first setup for this root in
274                  * this transaction.
275                  *
276                  * Normally we'd use root->last_trans as a flag to decide
277                  * if we want to take the expensive mutex.
278                  *
279                  * But, we have to set root->last_trans before we
280                  * init the relocation root, otherwise, we trip over warnings
281                  * in ctree.c.  The solution used here is to flag ourselves
282                  * with root->in_trans_setup.  When this is 1, we're still
283                  * fixing up the reloc trees and everyone must wait.
284                  *
285                  * When this is zero, they can trust root->last_trans and fly
286                  * through btrfs_record_root_in_trans without having to take the
287                  * lock.  smp_wmb() makes sure that all the writes above are
288                  * done before we pop in the zero below
289                  */
290                 btrfs_init_reloc_root(trans, root);
291                 smp_wmb();
292                 root->in_trans_setup = 0;
293         }
294         return 0;
295 }
296
297
298 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
299                                struct btrfs_root *root)
300 {
301         if (!root->ref_cows)
302                 return 0;
303
304         /*
305          * see record_root_in_trans for comments about in_trans_setup usage
306          * and barriers
307          */
308         smp_rmb();
309         if (root->last_trans == trans->transid &&
310             !root->in_trans_setup)
311                 return 0;
312
313         mutex_lock(&root->fs_info->reloc_mutex);
314         record_root_in_trans(trans, root);
315         mutex_unlock(&root->fs_info->reloc_mutex);
316
317         return 0;
318 }
319
320 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
321 {
322         return (trans->state >= TRANS_STATE_BLOCKED &&
323                 trans->state < TRANS_STATE_UNBLOCKED &&
324                 !trans->aborted);
325 }
326
327 /* wait for commit against the current transaction to become unblocked
328  * when this is done, it is safe to start a new transaction, but the current
329  * transaction might not be fully on disk.
330  */
331 static void wait_current_trans(struct btrfs_root *root)
332 {
333         struct btrfs_transaction *cur_trans;
334
335         spin_lock(&root->fs_info->trans_lock);
336         cur_trans = root->fs_info->running_transaction;
337         if (cur_trans && is_transaction_blocked(cur_trans)) {
338                 atomic_inc(&cur_trans->use_count);
339                 spin_unlock(&root->fs_info->trans_lock);
340
341                 wait_event(root->fs_info->transaction_wait,
342                            cur_trans->state >= TRANS_STATE_UNBLOCKED ||
343                            cur_trans->aborted);
344                 btrfs_put_transaction(cur_trans);
345         } else {
346                 spin_unlock(&root->fs_info->trans_lock);
347         }
348 }
349
350 static int may_wait_transaction(struct btrfs_root *root, int type)
351 {
352         if (root->fs_info->log_root_recovering)
353                 return 0;
354
355         if (type == TRANS_USERSPACE)
356                 return 1;
357
358         if (type == TRANS_START &&
359             !atomic_read(&root->fs_info->open_ioctl_trans))
360                 return 1;
361
362         return 0;
363 }
364
365 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
366 {
367         if (!root->fs_info->reloc_ctl ||
368             !root->ref_cows ||
369             root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
370             root->reloc_root)
371                 return false;
372
373         return true;
374 }
375
376 static struct btrfs_trans_handle *
377 start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
378                   enum btrfs_reserve_flush_enum flush)
379 {
380         struct btrfs_trans_handle *h;
381         struct btrfs_transaction *cur_trans;
382         u64 num_bytes = 0;
383         u64 qgroup_reserved = 0;
384         bool reloc_reserved = false;
385         int ret;
386
387         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
388                 return ERR_PTR(-EROFS);
389
390         if (current->journal_info &&
391             current->journal_info != (void *)BTRFS_SEND_TRANS_STUB) {
392                 WARN_ON(type & TRANS_EXTWRITERS);
393                 h = current->journal_info;
394                 h->use_count++;
395                 WARN_ON(h->use_count > 2);
396                 h->orig_rsv = h->block_rsv;
397                 h->block_rsv = NULL;
398                 goto got_it;
399         }
400
401         /*
402          * Do the reservation before we join the transaction so we can do all
403          * the appropriate flushing if need be.
404          */
405         if (num_items > 0 && root != root->fs_info->chunk_root) {
406                 if (root->fs_info->quota_enabled &&
407                     is_fstree(root->root_key.objectid)) {
408                         qgroup_reserved = num_items * root->leafsize;
409                         ret = btrfs_qgroup_reserve(root, qgroup_reserved);
410                         if (ret)
411                                 return ERR_PTR(ret);
412                 }
413
414                 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
415                 /*
416                  * Do the reservation for the relocation root creation
417                  */
418                 if (unlikely(need_reserve_reloc_root(root))) {
419                         num_bytes += root->nodesize;
420                         reloc_reserved = true;
421                 }
422
423                 ret = btrfs_block_rsv_add(root,
424                                           &root->fs_info->trans_block_rsv,
425                                           num_bytes, flush);
426                 if (ret)
427                         goto reserve_fail;
428         }
429 again:
430         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
431         if (!h) {
432                 ret = -ENOMEM;
433                 goto alloc_fail;
434         }
435
436         /*
437          * If we are JOIN_NOLOCK we're already committing a transaction and
438          * waiting on this guy, so we don't need to do the sb_start_intwrite
439          * because we're already holding a ref.  We need this because we could
440          * have raced in and did an fsync() on a file which can kick a commit
441          * and then we deadlock with somebody doing a freeze.
442          *
443          * If we are ATTACH, it means we just want to catch the current
444          * transaction and commit it, so we needn't do sb_start_intwrite(). 
445          */
446         if (type & __TRANS_FREEZABLE)
447                 sb_start_intwrite(root->fs_info->sb);
448
449         if (may_wait_transaction(root, type))
450                 wait_current_trans(root);
451
452         do {
453                 ret = join_transaction(root, type);
454                 if (ret == -EBUSY) {
455                         wait_current_trans(root);
456                         if (unlikely(type == TRANS_ATTACH))
457                                 ret = -ENOENT;
458                 }
459         } while (ret == -EBUSY);
460
461         if (ret < 0) {
462                 /* We must get the transaction if we are JOIN_NOLOCK. */
463                 BUG_ON(type == TRANS_JOIN_NOLOCK);
464                 goto join_fail;
465         }
466
467         cur_trans = root->fs_info->running_transaction;
468
469         h->transid = cur_trans->transid;
470         h->transaction = cur_trans;
471         h->blocks_used = 0;
472         h->bytes_reserved = 0;
473         h->root = root;
474         h->delayed_ref_updates = 0;
475         h->use_count = 1;
476         h->adding_csums = 0;
477         h->block_rsv = NULL;
478         h->orig_rsv = NULL;
479         h->aborted = 0;
480         h->qgroup_reserved = 0;
481         h->delayed_ref_elem.seq = 0;
482         h->type = type;
483         h->allocating_chunk = false;
484         h->reloc_reserved = false;
485         h->sync = false;
486         INIT_LIST_HEAD(&h->qgroup_ref_list);
487         INIT_LIST_HEAD(&h->new_bgs);
488
489         smp_mb();
490         if (cur_trans->state >= TRANS_STATE_BLOCKED &&
491             may_wait_transaction(root, type)) {
492                 btrfs_commit_transaction(h, root);
493                 goto again;
494         }
495
496         if (num_bytes) {
497                 trace_btrfs_space_reservation(root->fs_info, "transaction",
498                                               h->transid, num_bytes, 1);
499                 h->block_rsv = &root->fs_info->trans_block_rsv;
500                 h->bytes_reserved = num_bytes;
501                 h->reloc_reserved = reloc_reserved;
502         }
503         h->qgroup_reserved = qgroup_reserved;
504
505 got_it:
506         btrfs_record_root_in_trans(h, root);
507
508         if (!current->journal_info && type != TRANS_USERSPACE)
509                 current->journal_info = h;
510         return h;
511
512 join_fail:
513         if (type & __TRANS_FREEZABLE)
514                 sb_end_intwrite(root->fs_info->sb);
515         kmem_cache_free(btrfs_trans_handle_cachep, h);
516 alloc_fail:
517         if (num_bytes)
518                 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
519                                         num_bytes);
520 reserve_fail:
521         if (qgroup_reserved)
522                 btrfs_qgroup_free(root, qgroup_reserved);
523         return ERR_PTR(ret);
524 }
525
526 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
527                                                    int num_items)
528 {
529         return start_transaction(root, num_items, TRANS_START,
530                                  BTRFS_RESERVE_FLUSH_ALL);
531 }
532
533 struct btrfs_trans_handle *btrfs_start_transaction_lflush(
534                                         struct btrfs_root *root, int num_items)
535 {
536         return start_transaction(root, num_items, TRANS_START,
537                                  BTRFS_RESERVE_FLUSH_LIMIT);
538 }
539
540 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
541 {
542         return start_transaction(root, 0, TRANS_JOIN, 0);
543 }
544
545 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
546 {
547         return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
548 }
549
550 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
551 {
552         return start_transaction(root, 0, TRANS_USERSPACE, 0);
553 }
554
555 /*
556  * btrfs_attach_transaction() - catch the running transaction
557  *
558  * It is used when we want to commit the current the transaction, but
559  * don't want to start a new one.
560  *
561  * Note: If this function return -ENOENT, it just means there is no
562  * running transaction. But it is possible that the inactive transaction
563  * is still in the memory, not fully on disk. If you hope there is no
564  * inactive transaction in the fs when -ENOENT is returned, you should
565  * invoke
566  *     btrfs_attach_transaction_barrier()
567  */
568 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
569 {
570         return start_transaction(root, 0, TRANS_ATTACH, 0);
571 }
572
573 /*
574  * btrfs_attach_transaction_barrier() - catch the running transaction
575  *
576  * It is similar to the above function, the differentia is this one
577  * will wait for all the inactive transactions until they fully
578  * complete.
579  */
580 struct btrfs_trans_handle *
581 btrfs_attach_transaction_barrier(struct btrfs_root *root)
582 {
583         struct btrfs_trans_handle *trans;
584
585         trans = start_transaction(root, 0, TRANS_ATTACH, 0);
586         if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
587                 btrfs_wait_for_commit(root, 0);
588
589         return trans;
590 }
591
592 /* wait for a transaction commit to be fully complete */
593 static noinline void wait_for_commit(struct btrfs_root *root,
594                                     struct btrfs_transaction *commit)
595 {
596         wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
597 }
598
599 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
600 {
601         struct btrfs_transaction *cur_trans = NULL, *t;
602         int ret = 0;
603
604         if (transid) {
605                 if (transid <= root->fs_info->last_trans_committed)
606                         goto out;
607
608                 ret = -EINVAL;
609                 /* find specified transaction */
610                 spin_lock(&root->fs_info->trans_lock);
611                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
612                         if (t->transid == transid) {
613                                 cur_trans = t;
614                                 atomic_inc(&cur_trans->use_count);
615                                 ret = 0;
616                                 break;
617                         }
618                         if (t->transid > transid) {
619                                 ret = 0;
620                                 break;
621                         }
622                 }
623                 spin_unlock(&root->fs_info->trans_lock);
624                 /* The specified transaction doesn't exist */
625                 if (!cur_trans)
626                         goto out;
627         } else {
628                 /* find newest transaction that is committing | committed */
629                 spin_lock(&root->fs_info->trans_lock);
630                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
631                                             list) {
632                         if (t->state >= TRANS_STATE_COMMIT_START) {
633                                 if (t->state == TRANS_STATE_COMPLETED)
634                                         break;
635                                 cur_trans = t;
636                                 atomic_inc(&cur_trans->use_count);
637                                 break;
638                         }
639                 }
640                 spin_unlock(&root->fs_info->trans_lock);
641                 if (!cur_trans)
642                         goto out;  /* nothing committing|committed */
643         }
644
645         wait_for_commit(root, cur_trans);
646         btrfs_put_transaction(cur_trans);
647 out:
648         return ret;
649 }
650
651 void btrfs_throttle(struct btrfs_root *root)
652 {
653         if (!atomic_read(&root->fs_info->open_ioctl_trans))
654                 wait_current_trans(root);
655 }
656
657 static int should_end_transaction(struct btrfs_trans_handle *trans,
658                                   struct btrfs_root *root)
659 {
660         if (root->fs_info->global_block_rsv.space_info->full &&
661             btrfs_check_space_for_delayed_refs(trans, root))
662                 return 1;
663
664         return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
665 }
666
667 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
668                                  struct btrfs_root *root)
669 {
670         struct btrfs_transaction *cur_trans = trans->transaction;
671         int updates;
672         int err;
673
674         smp_mb();
675         if (cur_trans->state >= TRANS_STATE_BLOCKED ||
676             cur_trans->delayed_refs.flushing)
677                 return 1;
678
679         updates = trans->delayed_ref_updates;
680         trans->delayed_ref_updates = 0;
681         if (updates) {
682                 err = btrfs_run_delayed_refs(trans, root, updates);
683                 if (err) /* Error code will also eval true */
684                         return err;
685         }
686
687         return should_end_transaction(trans, root);
688 }
689
690 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
691                           struct btrfs_root *root, int throttle)
692 {
693         struct btrfs_transaction *cur_trans = trans->transaction;
694         struct btrfs_fs_info *info = root->fs_info;
695         unsigned long cur = trans->delayed_ref_updates;
696         int lock = (trans->type != TRANS_JOIN_NOLOCK);
697         int err = 0;
698
699         if (trans->use_count > 1) {
700                 trans->use_count--;
701                 trans->block_rsv = trans->orig_rsv;
702                 return 0;
703         }
704
705         /*
706          * do the qgroup accounting as early as possible
707          */
708         err = btrfs_delayed_refs_qgroup_accounting(trans, info);
709
710         btrfs_trans_release_metadata(trans, root);
711         trans->block_rsv = NULL;
712
713         if (trans->qgroup_reserved) {
714                 /*
715                  * the same root has to be passed here between start_transaction
716                  * and end_transaction. Subvolume quota depends on this.
717                  */
718                 btrfs_qgroup_free(trans->root, trans->qgroup_reserved);
719                 trans->qgroup_reserved = 0;
720         }
721
722         if (!list_empty(&trans->new_bgs))
723                 btrfs_create_pending_block_groups(trans, root);
724
725         trans->delayed_ref_updates = 0;
726         if (!trans->sync && btrfs_should_throttle_delayed_refs(trans, root)) {
727                 cur = max_t(unsigned long, cur, 32);
728                 trans->delayed_ref_updates = 0;
729                 btrfs_run_delayed_refs(trans, root, cur);
730         }
731
732         btrfs_trans_release_metadata(trans, root);
733         trans->block_rsv = NULL;
734
735         if (!list_empty(&trans->new_bgs))
736                 btrfs_create_pending_block_groups(trans, root);
737
738         if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
739             should_end_transaction(trans, root) &&
740             ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
741                 spin_lock(&info->trans_lock);
742                 if (cur_trans->state == TRANS_STATE_RUNNING)
743                         cur_trans->state = TRANS_STATE_BLOCKED;
744                 spin_unlock(&info->trans_lock);
745         }
746
747         if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
748                 if (throttle)
749                         return btrfs_commit_transaction(trans, root);
750                 else
751                         wake_up_process(info->transaction_kthread);
752         }
753
754         if (trans->type & __TRANS_FREEZABLE)
755                 sb_end_intwrite(root->fs_info->sb);
756
757         WARN_ON(cur_trans != info->running_transaction);
758         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
759         atomic_dec(&cur_trans->num_writers);
760         extwriter_counter_dec(cur_trans, trans->type);
761
762         smp_mb();
763         if (waitqueue_active(&cur_trans->writer_wait))
764                 wake_up(&cur_trans->writer_wait);
765         btrfs_put_transaction(cur_trans);
766
767         if (current->journal_info == trans)
768                 current->journal_info = NULL;
769
770         if (throttle)
771                 btrfs_run_delayed_iputs(root);
772
773         if (trans->aborted ||
774             test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
775                 wake_up_process(info->transaction_kthread);
776                 err = -EIO;
777         }
778         assert_qgroups_uptodate(trans);
779
780         kmem_cache_free(btrfs_trans_handle_cachep, trans);
781         return err;
782 }
783
784 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
785                           struct btrfs_root *root)
786 {
787         return __btrfs_end_transaction(trans, root, 0);
788 }
789
790 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
791                                    struct btrfs_root *root)
792 {
793         return __btrfs_end_transaction(trans, root, 1);
794 }
795
796 /*
797  * when btree blocks are allocated, they have some corresponding bits set for
798  * them in one of two extent_io trees.  This is used to make sure all of
799  * those extents are sent to disk but does not wait on them
800  */
801 int btrfs_write_marked_extents(struct btrfs_root *root,
802                                struct extent_io_tree *dirty_pages, int mark)
803 {
804         int err = 0;
805         int werr = 0;
806         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
807         struct extent_state *cached_state = NULL;
808         u64 start = 0;
809         u64 end;
810
811         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
812                                       mark, &cached_state)) {
813                 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
814                                    mark, &cached_state, GFP_NOFS);
815                 cached_state = NULL;
816                 err = filemap_fdatawrite_range(mapping, start, end);
817                 if (err)
818                         werr = err;
819                 cond_resched();
820                 start = end + 1;
821         }
822         if (err)
823                 werr = err;
824         return werr;
825 }
826
827 /*
828  * when btree blocks are allocated, they have some corresponding bits set for
829  * them in one of two extent_io trees.  This is used to make sure all of
830  * those extents are on disk for transaction or log commit.  We wait
831  * on all the pages and clear them from the dirty pages state tree
832  */
833 int btrfs_wait_marked_extents(struct btrfs_root *root,
834                               struct extent_io_tree *dirty_pages, int mark)
835 {
836         int err = 0;
837         int werr = 0;
838         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
839         struct extent_state *cached_state = NULL;
840         u64 start = 0;
841         u64 end;
842
843         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
844                                       EXTENT_NEED_WAIT, &cached_state)) {
845                 clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
846                                  0, 0, &cached_state, GFP_NOFS);
847                 err = filemap_fdatawait_range(mapping, start, end);
848                 if (err)
849                         werr = err;
850                 cond_resched();
851                 start = end + 1;
852         }
853         if (err)
854                 werr = err;
855         return werr;
856 }
857
858 /*
859  * when btree blocks are allocated, they have some corresponding bits set for
860  * them in one of two extent_io trees.  This is used to make sure all of
861  * those extents are on disk for transaction or log commit
862  */
863 static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
864                                 struct extent_io_tree *dirty_pages, int mark)
865 {
866         int ret;
867         int ret2;
868         struct blk_plug plug;
869
870         blk_start_plug(&plug);
871         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
872         blk_finish_plug(&plug);
873         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
874
875         if (ret)
876                 return ret;
877         if (ret2)
878                 return ret2;
879         return 0;
880 }
881
882 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
883                                      struct btrfs_root *root)
884 {
885         if (!trans || !trans->transaction) {
886                 struct inode *btree_inode;
887                 btree_inode = root->fs_info->btree_inode;
888                 return filemap_write_and_wait(btree_inode->i_mapping);
889         }
890         return btrfs_write_and_wait_marked_extents(root,
891                                            &trans->transaction->dirty_pages,
892                                            EXTENT_DIRTY);
893 }
894
895 /*
896  * this is used to update the root pointer in the tree of tree roots.
897  *
898  * But, in the case of the extent allocation tree, updating the root
899  * pointer may allocate blocks which may change the root of the extent
900  * allocation tree.
901  *
902  * So, this loops and repeats and makes sure the cowonly root didn't
903  * change while the root pointer was being updated in the metadata.
904  */
905 static int update_cowonly_root(struct btrfs_trans_handle *trans,
906                                struct btrfs_root *root)
907 {
908         int ret;
909         u64 old_root_bytenr;
910         u64 old_root_used;
911         struct btrfs_root *tree_root = root->fs_info->tree_root;
912
913         old_root_used = btrfs_root_used(&root->root_item);
914         btrfs_write_dirty_block_groups(trans, root);
915
916         while (1) {
917                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
918                 if (old_root_bytenr == root->node->start &&
919                     old_root_used == btrfs_root_used(&root->root_item))
920                         break;
921
922                 btrfs_set_root_node(&root->root_item, root->node);
923                 ret = btrfs_update_root(trans, tree_root,
924                                         &root->root_key,
925                                         &root->root_item);
926                 if (ret)
927                         return ret;
928
929                 old_root_used = btrfs_root_used(&root->root_item);
930                 ret = btrfs_write_dirty_block_groups(trans, root);
931                 if (ret)
932                         return ret;
933         }
934
935         return 0;
936 }
937
938 /*
939  * update all the cowonly tree roots on disk
940  *
941  * The error handling in this function may not be obvious. Any of the
942  * failures will cause the file system to go offline. We still need
943  * to clean up the delayed refs.
944  */
945 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
946                                          struct btrfs_root *root)
947 {
948         struct btrfs_fs_info *fs_info = root->fs_info;
949         struct list_head *next;
950         struct extent_buffer *eb;
951         int ret;
952
953         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
954         if (ret)
955                 return ret;
956
957         eb = btrfs_lock_root_node(fs_info->tree_root);
958         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
959                               0, &eb);
960         btrfs_tree_unlock(eb);
961         free_extent_buffer(eb);
962
963         if (ret)
964                 return ret;
965
966         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
967         if (ret)
968                 return ret;
969
970         ret = btrfs_run_dev_stats(trans, root->fs_info);
971         if (ret)
972                 return ret;
973         ret = btrfs_run_dev_replace(trans, root->fs_info);
974         if (ret)
975                 return ret;
976         ret = btrfs_run_qgroups(trans, root->fs_info);
977         if (ret)
978                 return ret;
979
980         /* run_qgroups might have added some more refs */
981         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
982         if (ret)
983                 return ret;
984
985         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
986                 next = fs_info->dirty_cowonly_roots.next;
987                 list_del_init(next);
988                 root = list_entry(next, struct btrfs_root, dirty_list);
989
990                 if (root != fs_info->extent_root)
991                         list_add_tail(&root->dirty_list,
992                                       &trans->transaction->switch_commits);
993                 ret = update_cowonly_root(trans, root);
994                 if (ret)
995                         return ret;
996         }
997
998         list_add_tail(&fs_info->extent_root->dirty_list,
999                       &trans->transaction->switch_commits);
1000         btrfs_after_dev_replace_commit(fs_info);
1001
1002         return 0;
1003 }
1004
1005 /*
1006  * dead roots are old snapshots that need to be deleted.  This allocates
1007  * a dirty root struct and adds it into the list of dead roots that need to
1008  * be deleted
1009  */
1010 void btrfs_add_dead_root(struct btrfs_root *root)
1011 {
1012         spin_lock(&root->fs_info->trans_lock);
1013         if (list_empty(&root->root_list))
1014                 list_add_tail(&root->root_list, &root->fs_info->dead_roots);
1015         spin_unlock(&root->fs_info->trans_lock);
1016 }
1017
1018 /*
1019  * update all the cowonly tree roots on disk
1020  */
1021 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
1022                                     struct btrfs_root *root)
1023 {
1024         struct btrfs_root *gang[8];
1025         struct btrfs_fs_info *fs_info = root->fs_info;
1026         int i;
1027         int ret;
1028         int err = 0;
1029
1030         spin_lock(&fs_info->fs_roots_radix_lock);
1031         while (1) {
1032                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1033                                                  (void **)gang, 0,
1034                                                  ARRAY_SIZE(gang),
1035                                                  BTRFS_ROOT_TRANS_TAG);
1036                 if (ret == 0)
1037                         break;
1038                 for (i = 0; i < ret; i++) {
1039                         root = gang[i];
1040                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
1041                                         (unsigned long)root->root_key.objectid,
1042                                         BTRFS_ROOT_TRANS_TAG);
1043                         spin_unlock(&fs_info->fs_roots_radix_lock);
1044
1045                         btrfs_free_log(trans, root);
1046                         btrfs_update_reloc_root(trans, root);
1047                         btrfs_orphan_commit_root(trans, root);
1048
1049                         btrfs_save_ino_cache(root, trans);
1050
1051                         /* see comments in should_cow_block() */
1052                         root->force_cow = 0;
1053                         smp_wmb();
1054
1055                         if (root->commit_root != root->node) {
1056                                 list_add_tail(&root->dirty_list,
1057                                         &trans->transaction->switch_commits);
1058                                 btrfs_set_root_node(&root->root_item,
1059                                                     root->node);
1060                         }
1061
1062                         err = btrfs_update_root(trans, fs_info->tree_root,
1063                                                 &root->root_key,
1064                                                 &root->root_item);
1065                         spin_lock(&fs_info->fs_roots_radix_lock);
1066                         if (err)
1067                                 break;
1068                 }
1069         }
1070         spin_unlock(&fs_info->fs_roots_radix_lock);
1071         return err;
1072 }
1073
1074 /*
1075  * defrag a given btree.
1076  * Every leaf in the btree is read and defragged.
1077  */
1078 int btrfs_defrag_root(struct btrfs_root *root)
1079 {
1080         struct btrfs_fs_info *info = root->fs_info;
1081         struct btrfs_trans_handle *trans;
1082         int ret;
1083
1084         if (xchg(&root->defrag_running, 1))
1085                 return 0;
1086
1087         while (1) {
1088                 trans = btrfs_start_transaction(root, 0);
1089                 if (IS_ERR(trans))
1090                         return PTR_ERR(trans);
1091
1092                 ret = btrfs_defrag_leaves(trans, root);
1093
1094                 btrfs_end_transaction(trans, root);
1095                 btrfs_btree_balance_dirty(info->tree_root);
1096                 cond_resched();
1097
1098                 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
1099                         break;
1100
1101                 if (btrfs_defrag_cancelled(root->fs_info)) {
1102                         pr_debug("BTRFS: defrag_root cancelled\n");
1103                         ret = -EAGAIN;
1104                         break;
1105                 }
1106         }
1107         root->defrag_running = 0;
1108         return ret;
1109 }
1110
1111 /*
1112  * new snapshots need to be created at a very specific time in the
1113  * transaction commit.  This does the actual creation.
1114  *
1115  * Note:
1116  * If the error which may affect the commitment of the current transaction
1117  * happens, we should return the error number. If the error which just affect
1118  * the creation of the pending snapshots, just return 0.
1119  */
1120 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1121                                    struct btrfs_fs_info *fs_info,
1122                                    struct btrfs_pending_snapshot *pending)
1123 {
1124         struct btrfs_key key;
1125         struct btrfs_root_item *new_root_item;
1126         struct btrfs_root *tree_root = fs_info->tree_root;
1127         struct btrfs_root *root = pending->root;
1128         struct btrfs_root *parent_root;
1129         struct btrfs_block_rsv *rsv;
1130         struct inode *parent_inode;
1131         struct btrfs_path *path;
1132         struct btrfs_dir_item *dir_item;
1133         struct dentry *dentry;
1134         struct extent_buffer *tmp;
1135         struct extent_buffer *old;
1136         struct timespec cur_time = CURRENT_TIME;
1137         int ret = 0;
1138         u64 to_reserve = 0;
1139         u64 index = 0;
1140         u64 objectid;
1141         u64 root_flags;
1142         uuid_le new_uuid;
1143
1144         path = btrfs_alloc_path();
1145         if (!path) {
1146                 pending->error = -ENOMEM;
1147                 return 0;
1148         }
1149
1150         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
1151         if (!new_root_item) {
1152                 pending->error = -ENOMEM;
1153                 goto root_item_alloc_fail;
1154         }
1155
1156         pending->error = btrfs_find_free_objectid(tree_root, &objectid);
1157         if (pending->error)
1158                 goto no_free_objectid;
1159
1160         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
1161
1162         if (to_reserve > 0) {
1163                 pending->error = btrfs_block_rsv_add(root,
1164                                                      &pending->block_rsv,
1165                                                      to_reserve,
1166                                                      BTRFS_RESERVE_NO_FLUSH);
1167                 if (pending->error)
1168                         goto no_free_objectid;
1169         }
1170
1171         pending->error = btrfs_qgroup_inherit(trans, fs_info,
1172                                               root->root_key.objectid,
1173                                               objectid, pending->inherit);
1174         if (pending->error)
1175                 goto no_free_objectid;
1176
1177         key.objectid = objectid;
1178         key.offset = (u64)-1;
1179         key.type = BTRFS_ROOT_ITEM_KEY;
1180
1181         rsv = trans->block_rsv;
1182         trans->block_rsv = &pending->block_rsv;
1183         trans->bytes_reserved = trans->block_rsv->reserved;
1184
1185         dentry = pending->dentry;
1186         parent_inode = pending->dir;
1187         parent_root = BTRFS_I(parent_inode)->root;
1188         record_root_in_trans(trans, parent_root);
1189
1190         /*
1191          * insert the directory item
1192          */
1193         ret = btrfs_set_inode_index(parent_inode, &index);
1194         BUG_ON(ret); /* -ENOMEM */
1195
1196         /* check if there is a file/dir which has the same name. */
1197         dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1198                                          btrfs_ino(parent_inode),
1199                                          dentry->d_name.name,
1200                                          dentry->d_name.len, 0);
1201         if (dir_item != NULL && !IS_ERR(dir_item)) {
1202                 pending->error = -EEXIST;
1203                 goto dir_item_existed;
1204         } else if (IS_ERR(dir_item)) {
1205                 ret = PTR_ERR(dir_item);
1206                 btrfs_abort_transaction(trans, root, ret);
1207                 goto fail;
1208         }
1209         btrfs_release_path(path);
1210
1211         /*
1212          * pull in the delayed directory update
1213          * and the delayed inode item
1214          * otherwise we corrupt the FS during
1215          * snapshot
1216          */
1217         ret = btrfs_run_delayed_items(trans, root);
1218         if (ret) {      /* Transaction aborted */
1219                 btrfs_abort_transaction(trans, root, ret);
1220                 goto fail;
1221         }
1222
1223         record_root_in_trans(trans, root);
1224         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1225         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1226         btrfs_check_and_init_root_item(new_root_item);
1227
1228         root_flags = btrfs_root_flags(new_root_item);
1229         if (pending->readonly)
1230                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1231         else
1232                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1233         btrfs_set_root_flags(new_root_item, root_flags);
1234
1235         btrfs_set_root_generation_v2(new_root_item,
1236                         trans->transid);
1237         uuid_le_gen(&new_uuid);
1238         memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1239         memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1240                         BTRFS_UUID_SIZE);
1241         if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1242                 memset(new_root_item->received_uuid, 0,
1243                        sizeof(new_root_item->received_uuid));
1244                 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1245                 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1246                 btrfs_set_root_stransid(new_root_item, 0);
1247                 btrfs_set_root_rtransid(new_root_item, 0);
1248         }
1249         btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1250         btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1251         btrfs_set_root_otransid(new_root_item, trans->transid);
1252
1253         old = btrfs_lock_root_node(root);
1254         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1255         if (ret) {
1256                 btrfs_tree_unlock(old);
1257                 free_extent_buffer(old);
1258                 btrfs_abort_transaction(trans, root, ret);
1259                 goto fail;
1260         }
1261
1262         btrfs_set_lock_blocking(old);
1263
1264         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1265         /* clean up in any case */
1266         btrfs_tree_unlock(old);
1267         free_extent_buffer(old);
1268         if (ret) {
1269                 btrfs_abort_transaction(trans, root, ret);
1270                 goto fail;
1271         }
1272
1273         /* see comments in should_cow_block() */
1274         root->force_cow = 1;
1275         smp_wmb();
1276
1277         btrfs_set_root_node(new_root_item, tmp);
1278         /* record when the snapshot was created in key.offset */
1279         key.offset = trans->transid;
1280         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1281         btrfs_tree_unlock(tmp);
1282         free_extent_buffer(tmp);
1283         if (ret) {
1284                 btrfs_abort_transaction(trans, root, ret);
1285                 goto fail;
1286         }
1287
1288         /*
1289          * insert root back/forward references
1290          */
1291         ret = btrfs_add_root_ref(trans, tree_root, objectid,
1292                                  parent_root->root_key.objectid,
1293                                  btrfs_ino(parent_inode), index,
1294                                  dentry->d_name.name, dentry->d_name.len);
1295         if (ret) {
1296                 btrfs_abort_transaction(trans, root, ret);
1297                 goto fail;
1298         }
1299
1300         key.offset = (u64)-1;
1301         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1302         if (IS_ERR(pending->snap)) {
1303                 ret = PTR_ERR(pending->snap);
1304                 btrfs_abort_transaction(trans, root, ret);
1305                 goto fail;
1306         }
1307
1308         ret = btrfs_reloc_post_snapshot(trans, pending);
1309         if (ret) {
1310                 btrfs_abort_transaction(trans, root, ret);
1311                 goto fail;
1312         }
1313
1314         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1315         if (ret) {
1316                 btrfs_abort_transaction(trans, root, ret);
1317                 goto fail;
1318         }
1319
1320         ret = btrfs_insert_dir_item(trans, parent_root,
1321                                     dentry->d_name.name, dentry->d_name.len,
1322                                     parent_inode, &key,
1323                                     BTRFS_FT_DIR, index);
1324         /* We have check then name at the beginning, so it is impossible. */
1325         BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1326         if (ret) {
1327                 btrfs_abort_transaction(trans, root, ret);
1328                 goto fail;
1329         }
1330
1331         btrfs_i_size_write(parent_inode, parent_inode->i_size +
1332                                          dentry->d_name.len * 2);
1333         parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1334         ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1335         if (ret) {
1336                 btrfs_abort_transaction(trans, root, ret);
1337                 goto fail;
1338         }
1339         ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
1340                                   BTRFS_UUID_KEY_SUBVOL, objectid);
1341         if (ret) {
1342                 btrfs_abort_transaction(trans, root, ret);
1343                 goto fail;
1344         }
1345         if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1346                 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
1347                                           new_root_item->received_uuid,
1348                                           BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1349                                           objectid);
1350                 if (ret && ret != -EEXIST) {
1351                         btrfs_abort_transaction(trans, root, ret);
1352                         goto fail;
1353                 }
1354         }
1355 fail:
1356         pending->error = ret;
1357 dir_item_existed:
1358         trans->block_rsv = rsv;
1359         trans->bytes_reserved = 0;
1360 no_free_objectid:
1361         kfree(new_root_item);
1362 root_item_alloc_fail:
1363         btrfs_free_path(path);
1364         return ret;
1365 }
1366
1367 /*
1368  * create all the snapshots we've scheduled for creation
1369  */
1370 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1371                                              struct btrfs_fs_info *fs_info)
1372 {
1373         struct btrfs_pending_snapshot *pending, *next;
1374         struct list_head *head = &trans->transaction->pending_snapshots;
1375         int ret = 0;
1376
1377         list_for_each_entry_safe(pending, next, head, list) {
1378                 list_del(&pending->list);
1379                 ret = create_pending_snapshot(trans, fs_info, pending);
1380                 if (ret)
1381                         break;
1382         }
1383         return ret;
1384 }
1385
1386 static void update_super_roots(struct btrfs_root *root)
1387 {
1388         struct btrfs_root_item *root_item;
1389         struct btrfs_super_block *super;
1390
1391         super = root->fs_info->super_copy;
1392
1393         root_item = &root->fs_info->chunk_root->root_item;
1394         super->chunk_root = root_item->bytenr;
1395         super->chunk_root_generation = root_item->generation;
1396         super->chunk_root_level = root_item->level;
1397
1398         root_item = &root->fs_info->tree_root->root_item;
1399         super->root = root_item->bytenr;
1400         super->generation = root_item->generation;
1401         super->root_level = root_item->level;
1402         if (btrfs_test_opt(root, SPACE_CACHE))
1403                 super->cache_generation = root_item->generation;
1404         if (root->fs_info->update_uuid_tree_gen)
1405                 super->uuid_tree_generation = root_item->generation;
1406 }
1407
1408 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1409 {
1410         struct btrfs_transaction *trans;
1411         int ret = 0;
1412
1413         spin_lock(&info->trans_lock);
1414         trans = info->running_transaction;
1415         if (trans)
1416                 ret = (trans->state >= TRANS_STATE_COMMIT_START);
1417         spin_unlock(&info->trans_lock);
1418         return ret;
1419 }
1420
1421 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1422 {
1423         struct btrfs_transaction *trans;
1424         int ret = 0;
1425
1426         spin_lock(&info->trans_lock);
1427         trans = info->running_transaction;
1428         if (trans)
1429                 ret = is_transaction_blocked(trans);
1430         spin_unlock(&info->trans_lock);
1431         return ret;
1432 }
1433
1434 /*
1435  * wait for the current transaction commit to start and block subsequent
1436  * transaction joins
1437  */
1438 static void wait_current_trans_commit_start(struct btrfs_root *root,
1439                                             struct btrfs_transaction *trans)
1440 {
1441         wait_event(root->fs_info->transaction_blocked_wait,
1442                    trans->state >= TRANS_STATE_COMMIT_START ||
1443                    trans->aborted);
1444 }
1445
1446 /*
1447  * wait for the current transaction to start and then become unblocked.
1448  * caller holds ref.
1449  */
1450 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1451                                          struct btrfs_transaction *trans)
1452 {
1453         wait_event(root->fs_info->transaction_wait,
1454                    trans->state >= TRANS_STATE_UNBLOCKED ||
1455                    trans->aborted);
1456 }
1457
1458 /*
1459  * commit transactions asynchronously. once btrfs_commit_transaction_async
1460  * returns, any subsequent transaction will not be allowed to join.
1461  */
1462 struct btrfs_async_commit {
1463         struct btrfs_trans_handle *newtrans;
1464         struct btrfs_root *root;
1465         struct work_struct work;
1466 };
1467
1468 static void do_async_commit(struct work_struct *work)
1469 {
1470         struct btrfs_async_commit *ac =
1471                 container_of(work, struct btrfs_async_commit, work);
1472
1473         /*
1474          * We've got freeze protection passed with the transaction.
1475          * Tell lockdep about it.
1476          */
1477         if (ac->newtrans->type & __TRANS_FREEZABLE)
1478                 rwsem_acquire_read(
1479                      &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1480                      0, 1, _THIS_IP_);
1481
1482         current->journal_info = ac->newtrans;
1483
1484         btrfs_commit_transaction(ac->newtrans, ac->root);
1485         kfree(ac);
1486 }
1487
1488 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1489                                    struct btrfs_root *root,
1490                                    int wait_for_unblock)
1491 {
1492         struct btrfs_async_commit *ac;
1493         struct btrfs_transaction *cur_trans;
1494
1495         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1496         if (!ac)
1497                 return -ENOMEM;
1498
1499         INIT_WORK(&ac->work, do_async_commit);
1500         ac->root = root;
1501         ac->newtrans = btrfs_join_transaction(root);
1502         if (IS_ERR(ac->newtrans)) {
1503                 int err = PTR_ERR(ac->newtrans);
1504                 kfree(ac);
1505                 return err;
1506         }
1507
1508         /* take transaction reference */
1509         cur_trans = trans->transaction;
1510         atomic_inc(&cur_trans->use_count);
1511
1512         btrfs_end_transaction(trans, root);
1513
1514         /*
1515          * Tell lockdep we've released the freeze rwsem, since the
1516          * async commit thread will be the one to unlock it.
1517          */
1518         if (ac->newtrans->type & __TRANS_FREEZABLE)
1519                 rwsem_release(
1520                         &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1521                         1, _THIS_IP_);
1522
1523         schedule_work(&ac->work);
1524
1525         /* wait for transaction to start and unblock */
1526         if (wait_for_unblock)
1527                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1528         else
1529                 wait_current_trans_commit_start(root, cur_trans);
1530
1531         if (current->journal_info == trans)
1532                 current->journal_info = NULL;
1533
1534         btrfs_put_transaction(cur_trans);
1535         return 0;
1536 }
1537
1538
1539 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1540                                 struct btrfs_root *root, int err)
1541 {
1542         struct btrfs_transaction *cur_trans = trans->transaction;
1543         DEFINE_WAIT(wait);
1544
1545         WARN_ON(trans->use_count > 1);
1546
1547         btrfs_abort_transaction(trans, root, err);
1548
1549         spin_lock(&root->fs_info->trans_lock);
1550
1551         /*
1552          * If the transaction is removed from the list, it means this
1553          * transaction has been committed successfully, so it is impossible
1554          * to call the cleanup function.
1555          */
1556         BUG_ON(list_empty(&cur_trans->list));
1557
1558         list_del_init(&cur_trans->list);
1559         if (cur_trans == root->fs_info->running_transaction) {
1560                 cur_trans->state = TRANS_STATE_COMMIT_DOING;
1561                 spin_unlock(&root->fs_info->trans_lock);
1562                 wait_event(cur_trans->writer_wait,
1563                            atomic_read(&cur_trans->num_writers) == 1);
1564
1565                 spin_lock(&root->fs_info->trans_lock);
1566         }
1567         spin_unlock(&root->fs_info->trans_lock);
1568
1569         btrfs_cleanup_one_transaction(trans->transaction, root);
1570
1571         spin_lock(&root->fs_info->trans_lock);
1572         if (cur_trans == root->fs_info->running_transaction)
1573                 root->fs_info->running_transaction = NULL;
1574         spin_unlock(&root->fs_info->trans_lock);
1575
1576         if (trans->type & __TRANS_FREEZABLE)
1577                 sb_end_intwrite(root->fs_info->sb);
1578         btrfs_put_transaction(cur_trans);
1579         btrfs_put_transaction(cur_trans);
1580
1581         trace_btrfs_transaction_commit(root);
1582
1583         if (current->journal_info == trans)
1584                 current->journal_info = NULL;
1585         btrfs_scrub_cancel(root->fs_info);
1586
1587         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1588 }
1589
1590 static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
1591                                           struct btrfs_root *root)
1592 {
1593         int ret;
1594
1595         ret = btrfs_run_delayed_items(trans, root);
1596         /*
1597          * running the delayed items may have added new refs. account
1598          * them now so that they hinder processing of more delayed refs
1599          * as little as possible.
1600          */
1601         if (ret) {
1602                 btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
1603                 return ret;
1604         }
1605
1606         ret = btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
1607         if (ret)
1608                 return ret;
1609
1610         /*
1611          * rename don't use btrfs_join_transaction, so, once we
1612          * set the transaction to blocked above, we aren't going
1613          * to get any new ordered operations.  We can safely run
1614          * it here and no for sure that nothing new will be added
1615          * to the list
1616          */
1617         ret = btrfs_run_ordered_operations(trans, root, 1);
1618
1619         return ret;
1620 }
1621
1622 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
1623 {
1624         if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
1625                 return btrfs_start_delalloc_roots(fs_info, 1, -1);
1626         return 0;
1627 }
1628
1629 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
1630 {
1631         if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
1632                 btrfs_wait_ordered_roots(fs_info, -1);
1633 }
1634
1635 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1636                              struct btrfs_root *root)
1637 {
1638         struct btrfs_transaction *cur_trans = trans->transaction;
1639         struct btrfs_transaction *prev_trans = NULL;
1640         int ret;
1641
1642         ret = btrfs_run_ordered_operations(trans, root, 0);
1643         if (ret) {
1644                 btrfs_abort_transaction(trans, root, ret);
1645                 btrfs_end_transaction(trans, root);
1646                 return ret;
1647         }
1648
1649         /* Stop the commit early if ->aborted is set */
1650         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1651                 ret = cur_trans->aborted;
1652                 btrfs_end_transaction(trans, root);
1653                 return ret;
1654         }
1655
1656         /* make a pass through all the delayed refs we have so far
1657          * any runnings procs may add more while we are here
1658          */
1659         ret = btrfs_run_delayed_refs(trans, root, 0);
1660         if (ret) {
1661                 btrfs_end_transaction(trans, root);
1662                 return ret;
1663         }
1664
1665         btrfs_trans_release_metadata(trans, root);
1666         trans->block_rsv = NULL;
1667         if (trans->qgroup_reserved) {
1668                 btrfs_qgroup_free(root, trans->qgroup_reserved);
1669                 trans->qgroup_reserved = 0;
1670         }
1671
1672         cur_trans = trans->transaction;
1673
1674         /*
1675          * set the flushing flag so procs in this transaction have to
1676          * start sending their work down.
1677          */
1678         cur_trans->delayed_refs.flushing = 1;
1679         smp_wmb();
1680
1681         if (!list_empty(&trans->new_bgs))
1682                 btrfs_create_pending_block_groups(trans, root);
1683
1684         ret = btrfs_run_delayed_refs(trans, root, 0);
1685         if (ret) {
1686                 btrfs_end_transaction(trans, root);
1687                 return ret;
1688         }
1689
1690         spin_lock(&root->fs_info->trans_lock);
1691         if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
1692                 spin_unlock(&root->fs_info->trans_lock);
1693                 atomic_inc(&cur_trans->use_count);
1694                 ret = btrfs_end_transaction(trans, root);
1695
1696                 wait_for_commit(root, cur_trans);
1697
1698                 btrfs_put_transaction(cur_trans);
1699
1700                 return ret;
1701         }
1702
1703         cur_trans->state = TRANS_STATE_COMMIT_START;
1704         wake_up(&root->fs_info->transaction_blocked_wait);
1705
1706         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1707                 prev_trans = list_entry(cur_trans->list.prev,
1708                                         struct btrfs_transaction, list);
1709                 if (prev_trans->state != TRANS_STATE_COMPLETED) {
1710                         atomic_inc(&prev_trans->use_count);
1711                         spin_unlock(&root->fs_info->trans_lock);
1712
1713                         wait_for_commit(root, prev_trans);
1714
1715                         btrfs_put_transaction(prev_trans);
1716                 } else {
1717                         spin_unlock(&root->fs_info->trans_lock);
1718                 }
1719         } else {
1720                 spin_unlock(&root->fs_info->trans_lock);
1721         }
1722
1723         extwriter_counter_dec(cur_trans, trans->type);
1724
1725         ret = btrfs_start_delalloc_flush(root->fs_info);
1726         if (ret)
1727                 goto cleanup_transaction;
1728
1729         ret = btrfs_flush_all_pending_stuffs(trans, root);
1730         if (ret)
1731                 goto cleanup_transaction;
1732
1733         wait_event(cur_trans->writer_wait,
1734                    extwriter_counter_read(cur_trans) == 0);
1735
1736         /* some pending stuffs might be added after the previous flush. */
1737         ret = btrfs_flush_all_pending_stuffs(trans, root);
1738         if (ret)
1739                 goto cleanup_transaction;
1740
1741         btrfs_wait_delalloc_flush(root->fs_info);
1742
1743         btrfs_scrub_pause(root);
1744         /*
1745          * Ok now we need to make sure to block out any other joins while we
1746          * commit the transaction.  We could have started a join before setting
1747          * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
1748          */
1749         spin_lock(&root->fs_info->trans_lock);
1750         cur_trans->state = TRANS_STATE_COMMIT_DOING;
1751         spin_unlock(&root->fs_info->trans_lock);
1752         wait_event(cur_trans->writer_wait,
1753                    atomic_read(&cur_trans->num_writers) == 1);
1754
1755         /* ->aborted might be set after the previous check, so check it */
1756         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1757                 ret = cur_trans->aborted;
1758                 goto scrub_continue;
1759         }
1760         /*
1761          * the reloc mutex makes sure that we stop
1762          * the balancing code from coming in and moving
1763          * extents around in the middle of the commit
1764          */
1765         mutex_lock(&root->fs_info->reloc_mutex);
1766
1767         /*
1768          * We needn't worry about the delayed items because we will
1769          * deal with them in create_pending_snapshot(), which is the
1770          * core function of the snapshot creation.
1771          */
1772         ret = create_pending_snapshots(trans, root->fs_info);
1773         if (ret) {
1774                 mutex_unlock(&root->fs_info->reloc_mutex);
1775                 goto scrub_continue;
1776         }
1777
1778         /*
1779          * We insert the dir indexes of the snapshots and update the inode
1780          * of the snapshots' parents after the snapshot creation, so there
1781          * are some delayed items which are not dealt with. Now deal with
1782          * them.
1783          *
1784          * We needn't worry that this operation will corrupt the snapshots,
1785          * because all the tree which are snapshoted will be forced to COW
1786          * the nodes and leaves.
1787          */
1788         ret = btrfs_run_delayed_items(trans, root);
1789         if (ret) {
1790                 mutex_unlock(&root->fs_info->reloc_mutex);
1791                 goto scrub_continue;
1792         }
1793
1794         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1795         if (ret) {
1796                 mutex_unlock(&root->fs_info->reloc_mutex);
1797                 goto scrub_continue;
1798         }
1799
1800         /*
1801          * make sure none of the code above managed to slip in a
1802          * delayed item
1803          */
1804         btrfs_assert_delayed_root_empty(root);
1805
1806         WARN_ON(cur_trans != trans->transaction);
1807
1808         /* btrfs_commit_tree_roots is responsible for getting the
1809          * various roots consistent with each other.  Every pointer
1810          * in the tree of tree roots has to point to the most up to date
1811          * root for every subvolume and other tree.  So, we have to keep
1812          * the tree logging code from jumping in and changing any
1813          * of the trees.
1814          *
1815          * At this point in the commit, there can't be any tree-log
1816          * writers, but a little lower down we drop the trans mutex
1817          * and let new people in.  By holding the tree_log_mutex
1818          * from now until after the super is written, we avoid races
1819          * with the tree-log code.
1820          */
1821         mutex_lock(&root->fs_info->tree_log_mutex);
1822
1823         ret = commit_fs_roots(trans, root);
1824         if (ret) {
1825                 mutex_unlock(&root->fs_info->tree_log_mutex);
1826                 mutex_unlock(&root->fs_info->reloc_mutex);
1827                 goto scrub_continue;
1828         }
1829
1830         /*
1831          * Since the transaction is done, we should set the inode map cache flag
1832          * before any other comming transaction.
1833          */
1834         if (btrfs_test_opt(root, CHANGE_INODE_CACHE))
1835                 btrfs_set_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
1836         else
1837                 btrfs_clear_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
1838
1839         /* commit_fs_roots gets rid of all the tree log roots, it is now
1840          * safe to free the root of tree log roots
1841          */
1842         btrfs_free_log_root_tree(trans, root->fs_info);
1843
1844         ret = commit_cowonly_roots(trans, root);
1845         if (ret) {
1846                 mutex_unlock(&root->fs_info->tree_log_mutex);
1847                 mutex_unlock(&root->fs_info->reloc_mutex);
1848                 goto scrub_continue;
1849         }
1850
1851         /*
1852          * The tasks which save the space cache and inode cache may also
1853          * update ->aborted, check it.
1854          */
1855         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1856                 ret = cur_trans->aborted;
1857                 mutex_unlock(&root->fs_info->tree_log_mutex);
1858                 mutex_unlock(&root->fs_info->reloc_mutex);
1859                 goto scrub_continue;
1860         }
1861
1862         btrfs_prepare_extent_commit(trans, root);
1863
1864         cur_trans = root->fs_info->running_transaction;
1865
1866         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1867                             root->fs_info->tree_root->node);
1868         list_add_tail(&root->fs_info->tree_root->dirty_list,
1869                       &cur_trans->switch_commits);
1870
1871         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1872                             root->fs_info->chunk_root->node);
1873         list_add_tail(&root->fs_info->chunk_root->dirty_list,
1874                       &cur_trans->switch_commits);
1875
1876         switch_commit_roots(cur_trans, root->fs_info);
1877
1878         assert_qgroups_uptodate(trans);
1879         update_super_roots(root);
1880
1881         btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1882         btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1883         memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1884                sizeof(*root->fs_info->super_copy));
1885
1886         spin_lock(&root->fs_info->trans_lock);
1887         cur_trans->state = TRANS_STATE_UNBLOCKED;
1888         root->fs_info->running_transaction = NULL;
1889         spin_unlock(&root->fs_info->trans_lock);
1890         mutex_unlock(&root->fs_info->reloc_mutex);
1891
1892         wake_up(&root->fs_info->transaction_wait);
1893
1894         ret = btrfs_write_and_wait_transaction(trans, root);
1895         if (ret) {
1896                 btrfs_error(root->fs_info, ret,
1897                             "Error while writing out transaction");
1898                 mutex_unlock(&root->fs_info->tree_log_mutex);
1899                 goto scrub_continue;
1900         }
1901
1902         ret = write_ctree_super(trans, root, 0);
1903         if (ret) {
1904                 mutex_unlock(&root->fs_info->tree_log_mutex);
1905                 goto scrub_continue;
1906         }
1907
1908         /*
1909          * the super is written, we can safely allow the tree-loggers
1910          * to go about their business
1911          */
1912         mutex_unlock(&root->fs_info->tree_log_mutex);
1913
1914         btrfs_finish_extent_commit(trans, root);
1915
1916         root->fs_info->last_trans_committed = cur_trans->transid;
1917         /*
1918          * We needn't acquire the lock here because there is no other task
1919          * which can change it.
1920          */
1921         cur_trans->state = TRANS_STATE_COMPLETED;
1922         wake_up(&cur_trans->commit_wait);
1923
1924         spin_lock(&root->fs_info->trans_lock);
1925         list_del_init(&cur_trans->list);
1926         spin_unlock(&root->fs_info->trans_lock);
1927
1928         btrfs_put_transaction(cur_trans);
1929         btrfs_put_transaction(cur_trans);
1930
1931         if (trans->type & __TRANS_FREEZABLE)
1932                 sb_end_intwrite(root->fs_info->sb);
1933
1934         trace_btrfs_transaction_commit(root);
1935
1936         btrfs_scrub_continue(root);
1937
1938         if (current->journal_info == trans)
1939                 current->journal_info = NULL;
1940
1941         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1942
1943         if (current != root->fs_info->transaction_kthread)
1944                 btrfs_run_delayed_iputs(root);
1945
1946         return ret;
1947
1948 scrub_continue:
1949         btrfs_scrub_continue(root);
1950 cleanup_transaction:
1951         btrfs_trans_release_metadata(trans, root);
1952         trans->block_rsv = NULL;
1953         if (trans->qgroup_reserved) {
1954                 btrfs_qgroup_free(root, trans->qgroup_reserved);
1955                 trans->qgroup_reserved = 0;
1956         }
1957         btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
1958         if (current->journal_info == trans)
1959                 current->journal_info = NULL;
1960         cleanup_transaction(trans, root, ret);
1961
1962         return ret;
1963 }
1964
1965 /*
1966  * return < 0 if error
1967  * 0 if there are no more dead_roots at the time of call
1968  * 1 there are more to be processed, call me again
1969  *
1970  * The return value indicates there are certainly more snapshots to delete, but
1971  * if there comes a new one during processing, it may return 0. We don't mind,
1972  * because btrfs_commit_super will poke cleaner thread and it will process it a
1973  * few seconds later.
1974  */
1975 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
1976 {
1977         int ret;
1978         struct btrfs_fs_info *fs_info = root->fs_info;
1979
1980         spin_lock(&fs_info->trans_lock);
1981         if (list_empty(&fs_info->dead_roots)) {
1982                 spin_unlock(&fs_info->trans_lock);
1983                 return 0;
1984         }
1985         root = list_first_entry(&fs_info->dead_roots,
1986                         struct btrfs_root, root_list);
1987         /*
1988          * Make sure root is not involved in send,
1989          * if we fail with first root, we return
1990          * directly rather than continue.
1991          */
1992         spin_lock(&root->root_item_lock);
1993         if (root->send_in_progress) {
1994                 spin_unlock(&fs_info->trans_lock);
1995                 spin_unlock(&root->root_item_lock);
1996                 return 0;
1997         }
1998         spin_unlock(&root->root_item_lock);
1999
2000         list_del_init(&root->root_list);
2001         spin_unlock(&fs_info->trans_lock);
2002
2003         pr_debug("BTRFS: cleaner removing %llu\n", root->objectid);
2004
2005         btrfs_kill_all_delayed_nodes(root);
2006
2007         if (btrfs_header_backref_rev(root->node) <
2008                         BTRFS_MIXED_BACKREF_REV)
2009                 ret = btrfs_drop_snapshot(root, NULL, 0, 0);
2010         else
2011                 ret = btrfs_drop_snapshot(root, NULL, 1, 0);
2012         /*
2013          * If we encounter a transaction abort during snapshot cleaning, we
2014          * don't want to crash here
2015          */
2016         return (ret < 0) ? 0 : 1;
2017 }