Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "raid56.h"
35 #include "locking.h"
36 #include "free-space-cache.h"
37 #include "math.h"
38 #include "sysfs.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_root *root,
78                               u64 bytenr, u64 num_bytes, int alloc);
79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80                                 struct btrfs_root *root,
81                                 u64 bytenr, u64 num_bytes, u64 parent,
82                                 u64 root_objectid, u64 owner_objectid,
83                                 u64 owner_offset, int refs_to_drop,
84                                 struct btrfs_delayed_extent_op *extra_op);
85 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
86                                     struct extent_buffer *leaf,
87                                     struct btrfs_extent_item *ei);
88 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
89                                       struct btrfs_root *root,
90                                       u64 parent, u64 root_objectid,
91                                       u64 flags, u64 owner, u64 offset,
92                                       struct btrfs_key *ins, int ref_mod);
93 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
94                                      struct btrfs_root *root,
95                                      u64 parent, u64 root_objectid,
96                                      u64 flags, struct btrfs_disk_key *key,
97                                      int level, struct btrfs_key *ins);
98 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
99                           struct btrfs_root *extent_root, u64 flags,
100                           int force);
101 static int find_next_key(struct btrfs_path *path, int level,
102                          struct btrfs_key *key);
103 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
104                             int dump_block_groups);
105 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
106                                        u64 num_bytes, int reserve);
107 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
108                                u64 num_bytes);
109 int btrfs_pin_extent(struct btrfs_root *root,
110                      u64 bytenr, u64 num_bytes, int reserved);
111
112 static noinline int
113 block_group_cache_done(struct btrfs_block_group_cache *cache)
114 {
115         smp_mb();
116         return cache->cached == BTRFS_CACHE_FINISHED ||
117                 cache->cached == BTRFS_CACHE_ERROR;
118 }
119
120 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
121 {
122         return (cache->flags & bits) == bits;
123 }
124
125 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
126 {
127         atomic_inc(&cache->count);
128 }
129
130 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
131 {
132         if (atomic_dec_and_test(&cache->count)) {
133                 WARN_ON(cache->pinned > 0);
134                 WARN_ON(cache->reserved > 0);
135                 kfree(cache->free_space_ctl);
136                 kfree(cache);
137         }
138 }
139
140 /*
141  * this adds the block group to the fs_info rb tree for the block group
142  * cache
143  */
144 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
145                                 struct btrfs_block_group_cache *block_group)
146 {
147         struct rb_node **p;
148         struct rb_node *parent = NULL;
149         struct btrfs_block_group_cache *cache;
150
151         spin_lock(&info->block_group_cache_lock);
152         p = &info->block_group_cache_tree.rb_node;
153
154         while (*p) {
155                 parent = *p;
156                 cache = rb_entry(parent, struct btrfs_block_group_cache,
157                                  cache_node);
158                 if (block_group->key.objectid < cache->key.objectid) {
159                         p = &(*p)->rb_left;
160                 } else if (block_group->key.objectid > cache->key.objectid) {
161                         p = &(*p)->rb_right;
162                 } else {
163                         spin_unlock(&info->block_group_cache_lock);
164                         return -EEXIST;
165                 }
166         }
167
168         rb_link_node(&block_group->cache_node, parent, p);
169         rb_insert_color(&block_group->cache_node,
170                         &info->block_group_cache_tree);
171
172         if (info->first_logical_byte > block_group->key.objectid)
173                 info->first_logical_byte = block_group->key.objectid;
174
175         spin_unlock(&info->block_group_cache_lock);
176
177         return 0;
178 }
179
180 /*
181  * This will return the block group at or after bytenr if contains is 0, else
182  * it will return the block group that contains the bytenr
183  */
184 static struct btrfs_block_group_cache *
185 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
186                               int contains)
187 {
188         struct btrfs_block_group_cache *cache, *ret = NULL;
189         struct rb_node *n;
190         u64 end, start;
191
192         spin_lock(&info->block_group_cache_lock);
193         n = info->block_group_cache_tree.rb_node;
194
195         while (n) {
196                 cache = rb_entry(n, struct btrfs_block_group_cache,
197                                  cache_node);
198                 end = cache->key.objectid + cache->key.offset - 1;
199                 start = cache->key.objectid;
200
201                 if (bytenr < start) {
202                         if (!contains && (!ret || start < ret->key.objectid))
203                                 ret = cache;
204                         n = n->rb_left;
205                 } else if (bytenr > start) {
206                         if (contains && bytenr <= end) {
207                                 ret = cache;
208                                 break;
209                         }
210                         n = n->rb_right;
211                 } else {
212                         ret = cache;
213                         break;
214                 }
215         }
216         if (ret) {
217                 btrfs_get_block_group(ret);
218                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
219                         info->first_logical_byte = ret->key.objectid;
220         }
221         spin_unlock(&info->block_group_cache_lock);
222
223         return ret;
224 }
225
226 static int add_excluded_extent(struct btrfs_root *root,
227                                u64 start, u64 num_bytes)
228 {
229         u64 end = start + num_bytes - 1;
230         set_extent_bits(&root->fs_info->freed_extents[0],
231                         start, end, EXTENT_UPTODATE, GFP_NOFS);
232         set_extent_bits(&root->fs_info->freed_extents[1],
233                         start, end, EXTENT_UPTODATE, GFP_NOFS);
234         return 0;
235 }
236
237 static void free_excluded_extents(struct btrfs_root *root,
238                                   struct btrfs_block_group_cache *cache)
239 {
240         u64 start, end;
241
242         start = cache->key.objectid;
243         end = start + cache->key.offset - 1;
244
245         clear_extent_bits(&root->fs_info->freed_extents[0],
246                           start, end, EXTENT_UPTODATE, GFP_NOFS);
247         clear_extent_bits(&root->fs_info->freed_extents[1],
248                           start, end, EXTENT_UPTODATE, GFP_NOFS);
249 }
250
251 static int exclude_super_stripes(struct btrfs_root *root,
252                                  struct btrfs_block_group_cache *cache)
253 {
254         u64 bytenr;
255         u64 *logical;
256         int stripe_len;
257         int i, nr, ret;
258
259         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
260                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
261                 cache->bytes_super += stripe_len;
262                 ret = add_excluded_extent(root, cache->key.objectid,
263                                           stripe_len);
264                 if (ret)
265                         return ret;
266         }
267
268         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
269                 bytenr = btrfs_sb_offset(i);
270                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
271                                        cache->key.objectid, bytenr,
272                                        0, &logical, &nr, &stripe_len);
273                 if (ret)
274                         return ret;
275
276                 while (nr--) {
277                         u64 start, len;
278
279                         if (logical[nr] > cache->key.objectid +
280                             cache->key.offset)
281                                 continue;
282
283                         if (logical[nr] + stripe_len <= cache->key.objectid)
284                                 continue;
285
286                         start = logical[nr];
287                         if (start < cache->key.objectid) {
288                                 start = cache->key.objectid;
289                                 len = (logical[nr] + stripe_len) - start;
290                         } else {
291                                 len = min_t(u64, stripe_len,
292                                             cache->key.objectid +
293                                             cache->key.offset - start);
294                         }
295
296                         cache->bytes_super += len;
297                         ret = add_excluded_extent(root, start, len);
298                         if (ret) {
299                                 kfree(logical);
300                                 return ret;
301                         }
302                 }
303
304                 kfree(logical);
305         }
306         return 0;
307 }
308
309 static struct btrfs_caching_control *
310 get_caching_control(struct btrfs_block_group_cache *cache)
311 {
312         struct btrfs_caching_control *ctl;
313
314         spin_lock(&cache->lock);
315         if (cache->cached != BTRFS_CACHE_STARTED) {
316                 spin_unlock(&cache->lock);
317                 return NULL;
318         }
319
320         /* We're loading it the fast way, so we don't have a caching_ctl. */
321         if (!cache->caching_ctl) {
322                 spin_unlock(&cache->lock);
323                 return NULL;
324         }
325
326         ctl = cache->caching_ctl;
327         atomic_inc(&ctl->count);
328         spin_unlock(&cache->lock);
329         return ctl;
330 }
331
332 static void put_caching_control(struct btrfs_caching_control *ctl)
333 {
334         if (atomic_dec_and_test(&ctl->count))
335                 kfree(ctl);
336 }
337
338 /*
339  * this is only called by cache_block_group, since we could have freed extents
340  * we need to check the pinned_extents for any extents that can't be used yet
341  * since their free space will be released as soon as the transaction commits.
342  */
343 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
344                               struct btrfs_fs_info *info, u64 start, u64 end)
345 {
346         u64 extent_start, extent_end, size, total_added = 0;
347         int ret;
348
349         while (start < end) {
350                 ret = find_first_extent_bit(info->pinned_extents, start,
351                                             &extent_start, &extent_end,
352                                             EXTENT_DIRTY | EXTENT_UPTODATE,
353                                             NULL);
354                 if (ret)
355                         break;
356
357                 if (extent_start <= start) {
358                         start = extent_end + 1;
359                 } else if (extent_start > start && extent_start < end) {
360                         size = extent_start - start;
361                         total_added += size;
362                         ret = btrfs_add_free_space(block_group, start,
363                                                    size);
364                         BUG_ON(ret); /* -ENOMEM or logic error */
365                         start = extent_end + 1;
366                 } else {
367                         break;
368                 }
369         }
370
371         if (start < end) {
372                 size = end - start;
373                 total_added += size;
374                 ret = btrfs_add_free_space(block_group, start, size);
375                 BUG_ON(ret); /* -ENOMEM or logic error */
376         }
377
378         return total_added;
379 }
380
381 static noinline void caching_thread(struct btrfs_work *work)
382 {
383         struct btrfs_block_group_cache *block_group;
384         struct btrfs_fs_info *fs_info;
385         struct btrfs_caching_control *caching_ctl;
386         struct btrfs_root *extent_root;
387         struct btrfs_path *path;
388         struct extent_buffer *leaf;
389         struct btrfs_key key;
390         u64 total_found = 0;
391         u64 last = 0;
392         u32 nritems;
393         int ret = -ENOMEM;
394
395         caching_ctl = container_of(work, struct btrfs_caching_control, work);
396         block_group = caching_ctl->block_group;
397         fs_info = block_group->fs_info;
398         extent_root = fs_info->extent_root;
399
400         path = btrfs_alloc_path();
401         if (!path)
402                 goto out;
403
404         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
405
406         /*
407          * We don't want to deadlock with somebody trying to allocate a new
408          * extent for the extent root while also trying to search the extent
409          * root to add free space.  So we skip locking and search the commit
410          * root, since its read-only
411          */
412         path->skip_locking = 1;
413         path->search_commit_root = 1;
414         path->reada = 1;
415
416         key.objectid = last;
417         key.offset = 0;
418         key.type = BTRFS_EXTENT_ITEM_KEY;
419 again:
420         mutex_lock(&caching_ctl->mutex);
421         /* need to make sure the commit_root doesn't disappear */
422         down_read(&fs_info->commit_root_sem);
423
424 next:
425         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
426         if (ret < 0)
427                 goto err;
428
429         leaf = path->nodes[0];
430         nritems = btrfs_header_nritems(leaf);
431
432         while (1) {
433                 if (btrfs_fs_closing(fs_info) > 1) {
434                         last = (u64)-1;
435                         break;
436                 }
437
438                 if (path->slots[0] < nritems) {
439                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
440                 } else {
441                         ret = find_next_key(path, 0, &key);
442                         if (ret)
443                                 break;
444
445                         if (need_resched() ||
446                             rwsem_is_contended(&fs_info->commit_root_sem)) {
447                                 caching_ctl->progress = last;
448                                 btrfs_release_path(path);
449                                 up_read(&fs_info->commit_root_sem);
450                                 mutex_unlock(&caching_ctl->mutex);
451                                 cond_resched();
452                                 goto again;
453                         }
454
455                         ret = btrfs_next_leaf(extent_root, path);
456                         if (ret < 0)
457                                 goto err;
458                         if (ret)
459                                 break;
460                         leaf = path->nodes[0];
461                         nritems = btrfs_header_nritems(leaf);
462                         continue;
463                 }
464
465                 if (key.objectid < last) {
466                         key.objectid = last;
467                         key.offset = 0;
468                         key.type = BTRFS_EXTENT_ITEM_KEY;
469
470                         caching_ctl->progress = last;
471                         btrfs_release_path(path);
472                         goto next;
473                 }
474
475                 if (key.objectid < block_group->key.objectid) {
476                         path->slots[0]++;
477                         continue;
478                 }
479
480                 if (key.objectid >= block_group->key.objectid +
481                     block_group->key.offset)
482                         break;
483
484                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
485                     key.type == BTRFS_METADATA_ITEM_KEY) {
486                         total_found += add_new_free_space(block_group,
487                                                           fs_info, last,
488                                                           key.objectid);
489                         if (key.type == BTRFS_METADATA_ITEM_KEY)
490                                 last = key.objectid +
491                                         fs_info->tree_root->leafsize;
492                         else
493                                 last = key.objectid + key.offset;
494
495                         if (total_found > (1024 * 1024 * 2)) {
496                                 total_found = 0;
497                                 wake_up(&caching_ctl->wait);
498                         }
499                 }
500                 path->slots[0]++;
501         }
502         ret = 0;
503
504         total_found += add_new_free_space(block_group, fs_info, last,
505                                           block_group->key.objectid +
506                                           block_group->key.offset);
507         caching_ctl->progress = (u64)-1;
508
509         spin_lock(&block_group->lock);
510         block_group->caching_ctl = NULL;
511         block_group->cached = BTRFS_CACHE_FINISHED;
512         spin_unlock(&block_group->lock);
513
514 err:
515         btrfs_free_path(path);
516         up_read(&fs_info->commit_root_sem);
517
518         free_excluded_extents(extent_root, block_group);
519
520         mutex_unlock(&caching_ctl->mutex);
521 out:
522         if (ret) {
523                 spin_lock(&block_group->lock);
524                 block_group->caching_ctl = NULL;
525                 block_group->cached = BTRFS_CACHE_ERROR;
526                 spin_unlock(&block_group->lock);
527         }
528         wake_up(&caching_ctl->wait);
529
530         put_caching_control(caching_ctl);
531         btrfs_put_block_group(block_group);
532 }
533
534 static int cache_block_group(struct btrfs_block_group_cache *cache,
535                              int load_cache_only)
536 {
537         DEFINE_WAIT(wait);
538         struct btrfs_fs_info *fs_info = cache->fs_info;
539         struct btrfs_caching_control *caching_ctl;
540         int ret = 0;
541
542         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
543         if (!caching_ctl)
544                 return -ENOMEM;
545
546         INIT_LIST_HEAD(&caching_ctl->list);
547         mutex_init(&caching_ctl->mutex);
548         init_waitqueue_head(&caching_ctl->wait);
549         caching_ctl->block_group = cache;
550         caching_ctl->progress = cache->key.objectid;
551         atomic_set(&caching_ctl->count, 1);
552         btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
553
554         spin_lock(&cache->lock);
555         /*
556          * This should be a rare occasion, but this could happen I think in the
557          * case where one thread starts to load the space cache info, and then
558          * some other thread starts a transaction commit which tries to do an
559          * allocation while the other thread is still loading the space cache
560          * info.  The previous loop should have kept us from choosing this block
561          * group, but if we've moved to the state where we will wait on caching
562          * block groups we need to first check if we're doing a fast load here,
563          * so we can wait for it to finish, otherwise we could end up allocating
564          * from a block group who's cache gets evicted for one reason or
565          * another.
566          */
567         while (cache->cached == BTRFS_CACHE_FAST) {
568                 struct btrfs_caching_control *ctl;
569
570                 ctl = cache->caching_ctl;
571                 atomic_inc(&ctl->count);
572                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
573                 spin_unlock(&cache->lock);
574
575                 schedule();
576
577                 finish_wait(&ctl->wait, &wait);
578                 put_caching_control(ctl);
579                 spin_lock(&cache->lock);
580         }
581
582         if (cache->cached != BTRFS_CACHE_NO) {
583                 spin_unlock(&cache->lock);
584                 kfree(caching_ctl);
585                 return 0;
586         }
587         WARN_ON(cache->caching_ctl);
588         cache->caching_ctl = caching_ctl;
589         cache->cached = BTRFS_CACHE_FAST;
590         spin_unlock(&cache->lock);
591
592         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
593                 ret = load_free_space_cache(fs_info, cache);
594
595                 spin_lock(&cache->lock);
596                 if (ret == 1) {
597                         cache->caching_ctl = NULL;
598                         cache->cached = BTRFS_CACHE_FINISHED;
599                         cache->last_byte_to_unpin = (u64)-1;
600                 } else {
601                         if (load_cache_only) {
602                                 cache->caching_ctl = NULL;
603                                 cache->cached = BTRFS_CACHE_NO;
604                         } else {
605                                 cache->cached = BTRFS_CACHE_STARTED;
606                         }
607                 }
608                 spin_unlock(&cache->lock);
609                 wake_up(&caching_ctl->wait);
610                 if (ret == 1) {
611                         put_caching_control(caching_ctl);
612                         free_excluded_extents(fs_info->extent_root, cache);
613                         return 0;
614                 }
615         } else {
616                 /*
617                  * We are not going to do the fast caching, set cached to the
618                  * appropriate value and wakeup any waiters.
619                  */
620                 spin_lock(&cache->lock);
621                 if (load_cache_only) {
622                         cache->caching_ctl = NULL;
623                         cache->cached = BTRFS_CACHE_NO;
624                 } else {
625                         cache->cached = BTRFS_CACHE_STARTED;
626                 }
627                 spin_unlock(&cache->lock);
628                 wake_up(&caching_ctl->wait);
629         }
630
631         if (load_cache_only) {
632                 put_caching_control(caching_ctl);
633                 return 0;
634         }
635
636         down_write(&fs_info->commit_root_sem);
637         atomic_inc(&caching_ctl->count);
638         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
639         up_write(&fs_info->commit_root_sem);
640
641         btrfs_get_block_group(cache);
642
643         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
644
645         return ret;
646 }
647
648 /*
649  * return the block group that starts at or after bytenr
650  */
651 static struct btrfs_block_group_cache *
652 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
653 {
654         struct btrfs_block_group_cache *cache;
655
656         cache = block_group_cache_tree_search(info, bytenr, 0);
657
658         return cache;
659 }
660
661 /*
662  * return the block group that contains the given bytenr
663  */
664 struct btrfs_block_group_cache *btrfs_lookup_block_group(
665                                                  struct btrfs_fs_info *info,
666                                                  u64 bytenr)
667 {
668         struct btrfs_block_group_cache *cache;
669
670         cache = block_group_cache_tree_search(info, bytenr, 1);
671
672         return cache;
673 }
674
675 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
676                                                   u64 flags)
677 {
678         struct list_head *head = &info->space_info;
679         struct btrfs_space_info *found;
680
681         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
682
683         rcu_read_lock();
684         list_for_each_entry_rcu(found, head, list) {
685                 if (found->flags & flags) {
686                         rcu_read_unlock();
687                         return found;
688                 }
689         }
690         rcu_read_unlock();
691         return NULL;
692 }
693
694 /*
695  * after adding space to the filesystem, we need to clear the full flags
696  * on all the space infos.
697  */
698 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
699 {
700         struct list_head *head = &info->space_info;
701         struct btrfs_space_info *found;
702
703         rcu_read_lock();
704         list_for_each_entry_rcu(found, head, list)
705                 found->full = 0;
706         rcu_read_unlock();
707 }
708
709 /* simple helper to search for an existing extent at a given offset */
710 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
711 {
712         int ret;
713         struct btrfs_key key;
714         struct btrfs_path *path;
715
716         path = btrfs_alloc_path();
717         if (!path)
718                 return -ENOMEM;
719
720         key.objectid = start;
721         key.offset = len;
722         key.type = BTRFS_EXTENT_ITEM_KEY;
723         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
724                                 0, 0);
725         if (ret > 0) {
726                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
727                 if (key.objectid == start &&
728                     key.type == BTRFS_METADATA_ITEM_KEY)
729                         ret = 0;
730         }
731         btrfs_free_path(path);
732         return ret;
733 }
734
735 /*
736  * helper function to lookup reference count and flags of a tree block.
737  *
738  * the head node for delayed ref is used to store the sum of all the
739  * reference count modifications queued up in the rbtree. the head
740  * node may also store the extent flags to set. This way you can check
741  * to see what the reference count and extent flags would be if all of
742  * the delayed refs are not processed.
743  */
744 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
745                              struct btrfs_root *root, u64 bytenr,
746                              u64 offset, int metadata, u64 *refs, u64 *flags)
747 {
748         struct btrfs_delayed_ref_head *head;
749         struct btrfs_delayed_ref_root *delayed_refs;
750         struct btrfs_path *path;
751         struct btrfs_extent_item *ei;
752         struct extent_buffer *leaf;
753         struct btrfs_key key;
754         u32 item_size;
755         u64 num_refs;
756         u64 extent_flags;
757         int ret;
758
759         /*
760          * If we don't have skinny metadata, don't bother doing anything
761          * different
762          */
763         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
764                 offset = root->leafsize;
765                 metadata = 0;
766         }
767
768         path = btrfs_alloc_path();
769         if (!path)
770                 return -ENOMEM;
771
772         if (!trans) {
773                 path->skip_locking = 1;
774                 path->search_commit_root = 1;
775         }
776
777 search_again:
778         key.objectid = bytenr;
779         key.offset = offset;
780         if (metadata)
781                 key.type = BTRFS_METADATA_ITEM_KEY;
782         else
783                 key.type = BTRFS_EXTENT_ITEM_KEY;
784
785 again:
786         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
787                                 &key, path, 0, 0);
788         if (ret < 0)
789                 goto out_free;
790
791         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
792                 if (path->slots[0]) {
793                         path->slots[0]--;
794                         btrfs_item_key_to_cpu(path->nodes[0], &key,
795                                               path->slots[0]);
796                         if (key.objectid == bytenr &&
797                             key.type == BTRFS_EXTENT_ITEM_KEY &&
798                             key.offset == root->leafsize)
799                                 ret = 0;
800                 }
801                 if (ret) {
802                         key.objectid = bytenr;
803                         key.type = BTRFS_EXTENT_ITEM_KEY;
804                         key.offset = root->leafsize;
805                         btrfs_release_path(path);
806                         goto again;
807                 }
808         }
809
810         if (ret == 0) {
811                 leaf = path->nodes[0];
812                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
813                 if (item_size >= sizeof(*ei)) {
814                         ei = btrfs_item_ptr(leaf, path->slots[0],
815                                             struct btrfs_extent_item);
816                         num_refs = btrfs_extent_refs(leaf, ei);
817                         extent_flags = btrfs_extent_flags(leaf, ei);
818                 } else {
819 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
820                         struct btrfs_extent_item_v0 *ei0;
821                         BUG_ON(item_size != sizeof(*ei0));
822                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
823                                              struct btrfs_extent_item_v0);
824                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
825                         /* FIXME: this isn't correct for data */
826                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
827 #else
828                         BUG();
829 #endif
830                 }
831                 BUG_ON(num_refs == 0);
832         } else {
833                 num_refs = 0;
834                 extent_flags = 0;
835                 ret = 0;
836         }
837
838         if (!trans)
839                 goto out;
840
841         delayed_refs = &trans->transaction->delayed_refs;
842         spin_lock(&delayed_refs->lock);
843         head = btrfs_find_delayed_ref_head(trans, bytenr);
844         if (head) {
845                 if (!mutex_trylock(&head->mutex)) {
846                         atomic_inc(&head->node.refs);
847                         spin_unlock(&delayed_refs->lock);
848
849                         btrfs_release_path(path);
850
851                         /*
852                          * Mutex was contended, block until it's released and try
853                          * again
854                          */
855                         mutex_lock(&head->mutex);
856                         mutex_unlock(&head->mutex);
857                         btrfs_put_delayed_ref(&head->node);
858                         goto search_again;
859                 }
860                 spin_lock(&head->lock);
861                 if (head->extent_op && head->extent_op->update_flags)
862                         extent_flags |= head->extent_op->flags_to_set;
863                 else
864                         BUG_ON(num_refs == 0);
865
866                 num_refs += head->node.ref_mod;
867                 spin_unlock(&head->lock);
868                 mutex_unlock(&head->mutex);
869         }
870         spin_unlock(&delayed_refs->lock);
871 out:
872         WARN_ON(num_refs == 0);
873         if (refs)
874                 *refs = num_refs;
875         if (flags)
876                 *flags = extent_flags;
877 out_free:
878         btrfs_free_path(path);
879         return ret;
880 }
881
882 /*
883  * Back reference rules.  Back refs have three main goals:
884  *
885  * 1) differentiate between all holders of references to an extent so that
886  *    when a reference is dropped we can make sure it was a valid reference
887  *    before freeing the extent.
888  *
889  * 2) Provide enough information to quickly find the holders of an extent
890  *    if we notice a given block is corrupted or bad.
891  *
892  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
893  *    maintenance.  This is actually the same as #2, but with a slightly
894  *    different use case.
895  *
896  * There are two kinds of back refs. The implicit back refs is optimized
897  * for pointers in non-shared tree blocks. For a given pointer in a block,
898  * back refs of this kind provide information about the block's owner tree
899  * and the pointer's key. These information allow us to find the block by
900  * b-tree searching. The full back refs is for pointers in tree blocks not
901  * referenced by their owner trees. The location of tree block is recorded
902  * in the back refs. Actually the full back refs is generic, and can be
903  * used in all cases the implicit back refs is used. The major shortcoming
904  * of the full back refs is its overhead. Every time a tree block gets
905  * COWed, we have to update back refs entry for all pointers in it.
906  *
907  * For a newly allocated tree block, we use implicit back refs for
908  * pointers in it. This means most tree related operations only involve
909  * implicit back refs. For a tree block created in old transaction, the
910  * only way to drop a reference to it is COW it. So we can detect the
911  * event that tree block loses its owner tree's reference and do the
912  * back refs conversion.
913  *
914  * When a tree block is COW'd through a tree, there are four cases:
915  *
916  * The reference count of the block is one and the tree is the block's
917  * owner tree. Nothing to do in this case.
918  *
919  * The reference count of the block is one and the tree is not the
920  * block's owner tree. In this case, full back refs is used for pointers
921  * in the block. Remove these full back refs, add implicit back refs for
922  * every pointers in the new block.
923  *
924  * The reference count of the block is greater than one and the tree is
925  * the block's owner tree. In this case, implicit back refs is used for
926  * pointers in the block. Add full back refs for every pointers in the
927  * block, increase lower level extents' reference counts. The original
928  * implicit back refs are entailed to the new block.
929  *
930  * The reference count of the block is greater than one and the tree is
931  * not the block's owner tree. Add implicit back refs for every pointer in
932  * the new block, increase lower level extents' reference count.
933  *
934  * Back Reference Key composing:
935  *
936  * The key objectid corresponds to the first byte in the extent,
937  * The key type is used to differentiate between types of back refs.
938  * There are different meanings of the key offset for different types
939  * of back refs.
940  *
941  * File extents can be referenced by:
942  *
943  * - multiple snapshots, subvolumes, or different generations in one subvol
944  * - different files inside a single subvolume
945  * - different offsets inside a file (bookend extents in file.c)
946  *
947  * The extent ref structure for the implicit back refs has fields for:
948  *
949  * - Objectid of the subvolume root
950  * - objectid of the file holding the reference
951  * - original offset in the file
952  * - how many bookend extents
953  *
954  * The key offset for the implicit back refs is hash of the first
955  * three fields.
956  *
957  * The extent ref structure for the full back refs has field for:
958  *
959  * - number of pointers in the tree leaf
960  *
961  * The key offset for the implicit back refs is the first byte of
962  * the tree leaf
963  *
964  * When a file extent is allocated, The implicit back refs is used.
965  * the fields are filled in:
966  *
967  *     (root_key.objectid, inode objectid, offset in file, 1)
968  *
969  * When a file extent is removed file truncation, we find the
970  * corresponding implicit back refs and check the following fields:
971  *
972  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
973  *
974  * Btree extents can be referenced by:
975  *
976  * - Different subvolumes
977  *
978  * Both the implicit back refs and the full back refs for tree blocks
979  * only consist of key. The key offset for the implicit back refs is
980  * objectid of block's owner tree. The key offset for the full back refs
981  * is the first byte of parent block.
982  *
983  * When implicit back refs is used, information about the lowest key and
984  * level of the tree block are required. These information are stored in
985  * tree block info structure.
986  */
987
988 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
989 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
990                                   struct btrfs_root *root,
991                                   struct btrfs_path *path,
992                                   u64 owner, u32 extra_size)
993 {
994         struct btrfs_extent_item *item;
995         struct btrfs_extent_item_v0 *ei0;
996         struct btrfs_extent_ref_v0 *ref0;
997         struct btrfs_tree_block_info *bi;
998         struct extent_buffer *leaf;
999         struct btrfs_key key;
1000         struct btrfs_key found_key;
1001         u32 new_size = sizeof(*item);
1002         u64 refs;
1003         int ret;
1004
1005         leaf = path->nodes[0];
1006         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1007
1008         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1009         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1010                              struct btrfs_extent_item_v0);
1011         refs = btrfs_extent_refs_v0(leaf, ei0);
1012
1013         if (owner == (u64)-1) {
1014                 while (1) {
1015                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1016                                 ret = btrfs_next_leaf(root, path);
1017                                 if (ret < 0)
1018                                         return ret;
1019                                 BUG_ON(ret > 0); /* Corruption */
1020                                 leaf = path->nodes[0];
1021                         }
1022                         btrfs_item_key_to_cpu(leaf, &found_key,
1023                                               path->slots[0]);
1024                         BUG_ON(key.objectid != found_key.objectid);
1025                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1026                                 path->slots[0]++;
1027                                 continue;
1028                         }
1029                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1030                                               struct btrfs_extent_ref_v0);
1031                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1032                         break;
1033                 }
1034         }
1035         btrfs_release_path(path);
1036
1037         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1038                 new_size += sizeof(*bi);
1039
1040         new_size -= sizeof(*ei0);
1041         ret = btrfs_search_slot(trans, root, &key, path,
1042                                 new_size + extra_size, 1);
1043         if (ret < 0)
1044                 return ret;
1045         BUG_ON(ret); /* Corruption */
1046
1047         btrfs_extend_item(root, path, new_size);
1048
1049         leaf = path->nodes[0];
1050         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1051         btrfs_set_extent_refs(leaf, item, refs);
1052         /* FIXME: get real generation */
1053         btrfs_set_extent_generation(leaf, item, 0);
1054         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1055                 btrfs_set_extent_flags(leaf, item,
1056                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1057                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1058                 bi = (struct btrfs_tree_block_info *)(item + 1);
1059                 /* FIXME: get first key of the block */
1060                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1061                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1062         } else {
1063                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1064         }
1065         btrfs_mark_buffer_dirty(leaf);
1066         return 0;
1067 }
1068 #endif
1069
1070 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1071 {
1072         u32 high_crc = ~(u32)0;
1073         u32 low_crc = ~(u32)0;
1074         __le64 lenum;
1075
1076         lenum = cpu_to_le64(root_objectid);
1077         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1078         lenum = cpu_to_le64(owner);
1079         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1080         lenum = cpu_to_le64(offset);
1081         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1082
1083         return ((u64)high_crc << 31) ^ (u64)low_crc;
1084 }
1085
1086 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1087                                      struct btrfs_extent_data_ref *ref)
1088 {
1089         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1090                                     btrfs_extent_data_ref_objectid(leaf, ref),
1091                                     btrfs_extent_data_ref_offset(leaf, ref));
1092 }
1093
1094 static int match_extent_data_ref(struct extent_buffer *leaf,
1095                                  struct btrfs_extent_data_ref *ref,
1096                                  u64 root_objectid, u64 owner, u64 offset)
1097 {
1098         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1099             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1100             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1101                 return 0;
1102         return 1;
1103 }
1104
1105 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1106                                            struct btrfs_root *root,
1107                                            struct btrfs_path *path,
1108                                            u64 bytenr, u64 parent,
1109                                            u64 root_objectid,
1110                                            u64 owner, u64 offset)
1111 {
1112         struct btrfs_key key;
1113         struct btrfs_extent_data_ref *ref;
1114         struct extent_buffer *leaf;
1115         u32 nritems;
1116         int ret;
1117         int recow;
1118         int err = -ENOENT;
1119
1120         key.objectid = bytenr;
1121         if (parent) {
1122                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1123                 key.offset = parent;
1124         } else {
1125                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1126                 key.offset = hash_extent_data_ref(root_objectid,
1127                                                   owner, offset);
1128         }
1129 again:
1130         recow = 0;
1131         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1132         if (ret < 0) {
1133                 err = ret;
1134                 goto fail;
1135         }
1136
1137         if (parent) {
1138                 if (!ret)
1139                         return 0;
1140 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1141                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1142                 btrfs_release_path(path);
1143                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1144                 if (ret < 0) {
1145                         err = ret;
1146                         goto fail;
1147                 }
1148                 if (!ret)
1149                         return 0;
1150 #endif
1151                 goto fail;
1152         }
1153
1154         leaf = path->nodes[0];
1155         nritems = btrfs_header_nritems(leaf);
1156         while (1) {
1157                 if (path->slots[0] >= nritems) {
1158                         ret = btrfs_next_leaf(root, path);
1159                         if (ret < 0)
1160                                 err = ret;
1161                         if (ret)
1162                                 goto fail;
1163
1164                         leaf = path->nodes[0];
1165                         nritems = btrfs_header_nritems(leaf);
1166                         recow = 1;
1167                 }
1168
1169                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1170                 if (key.objectid != bytenr ||
1171                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1172                         goto fail;
1173
1174                 ref = btrfs_item_ptr(leaf, path->slots[0],
1175                                      struct btrfs_extent_data_ref);
1176
1177                 if (match_extent_data_ref(leaf, ref, root_objectid,
1178                                           owner, offset)) {
1179                         if (recow) {
1180                                 btrfs_release_path(path);
1181                                 goto again;
1182                         }
1183                         err = 0;
1184                         break;
1185                 }
1186                 path->slots[0]++;
1187         }
1188 fail:
1189         return err;
1190 }
1191
1192 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1193                                            struct btrfs_root *root,
1194                                            struct btrfs_path *path,
1195                                            u64 bytenr, u64 parent,
1196                                            u64 root_objectid, u64 owner,
1197                                            u64 offset, int refs_to_add)
1198 {
1199         struct btrfs_key key;
1200         struct extent_buffer *leaf;
1201         u32 size;
1202         u32 num_refs;
1203         int ret;
1204
1205         key.objectid = bytenr;
1206         if (parent) {
1207                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1208                 key.offset = parent;
1209                 size = sizeof(struct btrfs_shared_data_ref);
1210         } else {
1211                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1212                 key.offset = hash_extent_data_ref(root_objectid,
1213                                                   owner, offset);
1214                 size = sizeof(struct btrfs_extent_data_ref);
1215         }
1216
1217         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1218         if (ret && ret != -EEXIST)
1219                 goto fail;
1220
1221         leaf = path->nodes[0];
1222         if (parent) {
1223                 struct btrfs_shared_data_ref *ref;
1224                 ref = btrfs_item_ptr(leaf, path->slots[0],
1225                                      struct btrfs_shared_data_ref);
1226                 if (ret == 0) {
1227                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1228                 } else {
1229                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1230                         num_refs += refs_to_add;
1231                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1232                 }
1233         } else {
1234                 struct btrfs_extent_data_ref *ref;
1235                 while (ret == -EEXIST) {
1236                         ref = btrfs_item_ptr(leaf, path->slots[0],
1237                                              struct btrfs_extent_data_ref);
1238                         if (match_extent_data_ref(leaf, ref, root_objectid,
1239                                                   owner, offset))
1240                                 break;
1241                         btrfs_release_path(path);
1242                         key.offset++;
1243                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1244                                                       size);
1245                         if (ret && ret != -EEXIST)
1246                                 goto fail;
1247
1248                         leaf = path->nodes[0];
1249                 }
1250                 ref = btrfs_item_ptr(leaf, path->slots[0],
1251                                      struct btrfs_extent_data_ref);
1252                 if (ret == 0) {
1253                         btrfs_set_extent_data_ref_root(leaf, ref,
1254                                                        root_objectid);
1255                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1256                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1257                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1258                 } else {
1259                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1260                         num_refs += refs_to_add;
1261                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1262                 }
1263         }
1264         btrfs_mark_buffer_dirty(leaf);
1265         ret = 0;
1266 fail:
1267         btrfs_release_path(path);
1268         return ret;
1269 }
1270
1271 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1272                                            struct btrfs_root *root,
1273                                            struct btrfs_path *path,
1274                                            int refs_to_drop)
1275 {
1276         struct btrfs_key key;
1277         struct btrfs_extent_data_ref *ref1 = NULL;
1278         struct btrfs_shared_data_ref *ref2 = NULL;
1279         struct extent_buffer *leaf;
1280         u32 num_refs = 0;
1281         int ret = 0;
1282
1283         leaf = path->nodes[0];
1284         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1285
1286         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1287                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1288                                       struct btrfs_extent_data_ref);
1289                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1290         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1291                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1292                                       struct btrfs_shared_data_ref);
1293                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1294 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1295         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1296                 struct btrfs_extent_ref_v0 *ref0;
1297                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1298                                       struct btrfs_extent_ref_v0);
1299                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1300 #endif
1301         } else {
1302                 BUG();
1303         }
1304
1305         BUG_ON(num_refs < refs_to_drop);
1306         num_refs -= refs_to_drop;
1307
1308         if (num_refs == 0) {
1309                 ret = btrfs_del_item(trans, root, path);
1310         } else {
1311                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1312                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1313                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1314                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1315 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1316                 else {
1317                         struct btrfs_extent_ref_v0 *ref0;
1318                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1319                                         struct btrfs_extent_ref_v0);
1320                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1321                 }
1322 #endif
1323                 btrfs_mark_buffer_dirty(leaf);
1324         }
1325         return ret;
1326 }
1327
1328 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1329                                           struct btrfs_path *path,
1330                                           struct btrfs_extent_inline_ref *iref)
1331 {
1332         struct btrfs_key key;
1333         struct extent_buffer *leaf;
1334         struct btrfs_extent_data_ref *ref1;
1335         struct btrfs_shared_data_ref *ref2;
1336         u32 num_refs = 0;
1337
1338         leaf = path->nodes[0];
1339         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1340         if (iref) {
1341                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1342                     BTRFS_EXTENT_DATA_REF_KEY) {
1343                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1344                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1345                 } else {
1346                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1347                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1348                 }
1349         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1350                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1351                                       struct btrfs_extent_data_ref);
1352                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1353         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1354                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1355                                       struct btrfs_shared_data_ref);
1356                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1357 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1358         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1359                 struct btrfs_extent_ref_v0 *ref0;
1360                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1361                                       struct btrfs_extent_ref_v0);
1362                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1363 #endif
1364         } else {
1365                 WARN_ON(1);
1366         }
1367         return num_refs;
1368 }
1369
1370 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1371                                           struct btrfs_root *root,
1372                                           struct btrfs_path *path,
1373                                           u64 bytenr, u64 parent,
1374                                           u64 root_objectid)
1375 {
1376         struct btrfs_key key;
1377         int ret;
1378
1379         key.objectid = bytenr;
1380         if (parent) {
1381                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1382                 key.offset = parent;
1383         } else {
1384                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1385                 key.offset = root_objectid;
1386         }
1387
1388         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1389         if (ret > 0)
1390                 ret = -ENOENT;
1391 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1392         if (ret == -ENOENT && parent) {
1393                 btrfs_release_path(path);
1394                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1395                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1396                 if (ret > 0)
1397                         ret = -ENOENT;
1398         }
1399 #endif
1400         return ret;
1401 }
1402
1403 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1404                                           struct btrfs_root *root,
1405                                           struct btrfs_path *path,
1406                                           u64 bytenr, u64 parent,
1407                                           u64 root_objectid)
1408 {
1409         struct btrfs_key key;
1410         int ret;
1411
1412         key.objectid = bytenr;
1413         if (parent) {
1414                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1415                 key.offset = parent;
1416         } else {
1417                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1418                 key.offset = root_objectid;
1419         }
1420
1421         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1422         btrfs_release_path(path);
1423         return ret;
1424 }
1425
1426 static inline int extent_ref_type(u64 parent, u64 owner)
1427 {
1428         int type;
1429         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1430                 if (parent > 0)
1431                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1432                 else
1433                         type = BTRFS_TREE_BLOCK_REF_KEY;
1434         } else {
1435                 if (parent > 0)
1436                         type = BTRFS_SHARED_DATA_REF_KEY;
1437                 else
1438                         type = BTRFS_EXTENT_DATA_REF_KEY;
1439         }
1440         return type;
1441 }
1442
1443 static int find_next_key(struct btrfs_path *path, int level,
1444                          struct btrfs_key *key)
1445
1446 {
1447         for (; level < BTRFS_MAX_LEVEL; level++) {
1448                 if (!path->nodes[level])
1449                         break;
1450                 if (path->slots[level] + 1 >=
1451                     btrfs_header_nritems(path->nodes[level]))
1452                         continue;
1453                 if (level == 0)
1454                         btrfs_item_key_to_cpu(path->nodes[level], key,
1455                                               path->slots[level] + 1);
1456                 else
1457                         btrfs_node_key_to_cpu(path->nodes[level], key,
1458                                               path->slots[level] + 1);
1459                 return 0;
1460         }
1461         return 1;
1462 }
1463
1464 /*
1465  * look for inline back ref. if back ref is found, *ref_ret is set
1466  * to the address of inline back ref, and 0 is returned.
1467  *
1468  * if back ref isn't found, *ref_ret is set to the address where it
1469  * should be inserted, and -ENOENT is returned.
1470  *
1471  * if insert is true and there are too many inline back refs, the path
1472  * points to the extent item, and -EAGAIN is returned.
1473  *
1474  * NOTE: inline back refs are ordered in the same way that back ref
1475  *       items in the tree are ordered.
1476  */
1477 static noinline_for_stack
1478 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1479                                  struct btrfs_root *root,
1480                                  struct btrfs_path *path,
1481                                  struct btrfs_extent_inline_ref **ref_ret,
1482                                  u64 bytenr, u64 num_bytes,
1483                                  u64 parent, u64 root_objectid,
1484                                  u64 owner, u64 offset, int insert)
1485 {
1486         struct btrfs_key key;
1487         struct extent_buffer *leaf;
1488         struct btrfs_extent_item *ei;
1489         struct btrfs_extent_inline_ref *iref;
1490         u64 flags;
1491         u64 item_size;
1492         unsigned long ptr;
1493         unsigned long end;
1494         int extra_size;
1495         int type;
1496         int want;
1497         int ret;
1498         int err = 0;
1499         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1500                                                  SKINNY_METADATA);
1501
1502         key.objectid = bytenr;
1503         key.type = BTRFS_EXTENT_ITEM_KEY;
1504         key.offset = num_bytes;
1505
1506         want = extent_ref_type(parent, owner);
1507         if (insert) {
1508                 extra_size = btrfs_extent_inline_ref_size(want);
1509                 path->keep_locks = 1;
1510         } else
1511                 extra_size = -1;
1512
1513         /*
1514          * Owner is our parent level, so we can just add one to get the level
1515          * for the block we are interested in.
1516          */
1517         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1518                 key.type = BTRFS_METADATA_ITEM_KEY;
1519                 key.offset = owner;
1520         }
1521
1522 again:
1523         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1524         if (ret < 0) {
1525                 err = ret;
1526                 goto out;
1527         }
1528
1529         /*
1530          * We may be a newly converted file system which still has the old fat
1531          * extent entries for metadata, so try and see if we have one of those.
1532          */
1533         if (ret > 0 && skinny_metadata) {
1534                 skinny_metadata = false;
1535                 if (path->slots[0]) {
1536                         path->slots[0]--;
1537                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1538                                               path->slots[0]);
1539                         if (key.objectid == bytenr &&
1540                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1541                             key.offset == num_bytes)
1542                                 ret = 0;
1543                 }
1544                 if (ret) {
1545                         key.type = BTRFS_EXTENT_ITEM_KEY;
1546                         key.offset = num_bytes;
1547                         btrfs_release_path(path);
1548                         goto again;
1549                 }
1550         }
1551
1552         if (ret && !insert) {
1553                 err = -ENOENT;
1554                 goto out;
1555         } else if (WARN_ON(ret)) {
1556                 err = -EIO;
1557                 goto out;
1558         }
1559
1560         leaf = path->nodes[0];
1561         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1562 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1563         if (item_size < sizeof(*ei)) {
1564                 if (!insert) {
1565                         err = -ENOENT;
1566                         goto out;
1567                 }
1568                 ret = convert_extent_item_v0(trans, root, path, owner,
1569                                              extra_size);
1570                 if (ret < 0) {
1571                         err = ret;
1572                         goto out;
1573                 }
1574                 leaf = path->nodes[0];
1575                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1576         }
1577 #endif
1578         BUG_ON(item_size < sizeof(*ei));
1579
1580         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1581         flags = btrfs_extent_flags(leaf, ei);
1582
1583         ptr = (unsigned long)(ei + 1);
1584         end = (unsigned long)ei + item_size;
1585
1586         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1587                 ptr += sizeof(struct btrfs_tree_block_info);
1588                 BUG_ON(ptr > end);
1589         }
1590
1591         err = -ENOENT;
1592         while (1) {
1593                 if (ptr >= end) {
1594                         WARN_ON(ptr > end);
1595                         break;
1596                 }
1597                 iref = (struct btrfs_extent_inline_ref *)ptr;
1598                 type = btrfs_extent_inline_ref_type(leaf, iref);
1599                 if (want < type)
1600                         break;
1601                 if (want > type) {
1602                         ptr += btrfs_extent_inline_ref_size(type);
1603                         continue;
1604                 }
1605
1606                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1607                         struct btrfs_extent_data_ref *dref;
1608                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1609                         if (match_extent_data_ref(leaf, dref, root_objectid,
1610                                                   owner, offset)) {
1611                                 err = 0;
1612                                 break;
1613                         }
1614                         if (hash_extent_data_ref_item(leaf, dref) <
1615                             hash_extent_data_ref(root_objectid, owner, offset))
1616                                 break;
1617                 } else {
1618                         u64 ref_offset;
1619                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1620                         if (parent > 0) {
1621                                 if (parent == ref_offset) {
1622                                         err = 0;
1623                                         break;
1624                                 }
1625                                 if (ref_offset < parent)
1626                                         break;
1627                         } else {
1628                                 if (root_objectid == ref_offset) {
1629                                         err = 0;
1630                                         break;
1631                                 }
1632                                 if (ref_offset < root_objectid)
1633                                         break;
1634                         }
1635                 }
1636                 ptr += btrfs_extent_inline_ref_size(type);
1637         }
1638         if (err == -ENOENT && insert) {
1639                 if (item_size + extra_size >=
1640                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1641                         err = -EAGAIN;
1642                         goto out;
1643                 }
1644                 /*
1645                  * To add new inline back ref, we have to make sure
1646                  * there is no corresponding back ref item.
1647                  * For simplicity, we just do not add new inline back
1648                  * ref if there is any kind of item for this block
1649                  */
1650                 if (find_next_key(path, 0, &key) == 0 &&
1651                     key.objectid == bytenr &&
1652                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1653                         err = -EAGAIN;
1654                         goto out;
1655                 }
1656         }
1657         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1658 out:
1659         if (insert) {
1660                 path->keep_locks = 0;
1661                 btrfs_unlock_up_safe(path, 1);
1662         }
1663         return err;
1664 }
1665
1666 /*
1667  * helper to add new inline back ref
1668  */
1669 static noinline_for_stack
1670 void setup_inline_extent_backref(struct btrfs_root *root,
1671                                  struct btrfs_path *path,
1672                                  struct btrfs_extent_inline_ref *iref,
1673                                  u64 parent, u64 root_objectid,
1674                                  u64 owner, u64 offset, int refs_to_add,
1675                                  struct btrfs_delayed_extent_op *extent_op)
1676 {
1677         struct extent_buffer *leaf;
1678         struct btrfs_extent_item *ei;
1679         unsigned long ptr;
1680         unsigned long end;
1681         unsigned long item_offset;
1682         u64 refs;
1683         int size;
1684         int type;
1685
1686         leaf = path->nodes[0];
1687         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1688         item_offset = (unsigned long)iref - (unsigned long)ei;
1689
1690         type = extent_ref_type(parent, owner);
1691         size = btrfs_extent_inline_ref_size(type);
1692
1693         btrfs_extend_item(root, path, size);
1694
1695         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1696         refs = btrfs_extent_refs(leaf, ei);
1697         refs += refs_to_add;
1698         btrfs_set_extent_refs(leaf, ei, refs);
1699         if (extent_op)
1700                 __run_delayed_extent_op(extent_op, leaf, ei);
1701
1702         ptr = (unsigned long)ei + item_offset;
1703         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1704         if (ptr < end - size)
1705                 memmove_extent_buffer(leaf, ptr + size, ptr,
1706                                       end - size - ptr);
1707
1708         iref = (struct btrfs_extent_inline_ref *)ptr;
1709         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1710         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1711                 struct btrfs_extent_data_ref *dref;
1712                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1713                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1714                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1715                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1716                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1717         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1718                 struct btrfs_shared_data_ref *sref;
1719                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1720                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1721                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1722         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1723                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1724         } else {
1725                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1726         }
1727         btrfs_mark_buffer_dirty(leaf);
1728 }
1729
1730 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1731                                  struct btrfs_root *root,
1732                                  struct btrfs_path *path,
1733                                  struct btrfs_extent_inline_ref **ref_ret,
1734                                  u64 bytenr, u64 num_bytes, u64 parent,
1735                                  u64 root_objectid, u64 owner, u64 offset)
1736 {
1737         int ret;
1738
1739         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1740                                            bytenr, num_bytes, parent,
1741                                            root_objectid, owner, offset, 0);
1742         if (ret != -ENOENT)
1743                 return ret;
1744
1745         btrfs_release_path(path);
1746         *ref_ret = NULL;
1747
1748         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1749                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1750                                             root_objectid);
1751         } else {
1752                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1753                                              root_objectid, owner, offset);
1754         }
1755         return ret;
1756 }
1757
1758 /*
1759  * helper to update/remove inline back ref
1760  */
1761 static noinline_for_stack
1762 void update_inline_extent_backref(struct btrfs_root *root,
1763                                   struct btrfs_path *path,
1764                                   struct btrfs_extent_inline_ref *iref,
1765                                   int refs_to_mod,
1766                                   struct btrfs_delayed_extent_op *extent_op)
1767 {
1768         struct extent_buffer *leaf;
1769         struct btrfs_extent_item *ei;
1770         struct btrfs_extent_data_ref *dref = NULL;
1771         struct btrfs_shared_data_ref *sref = NULL;
1772         unsigned long ptr;
1773         unsigned long end;
1774         u32 item_size;
1775         int size;
1776         int type;
1777         u64 refs;
1778
1779         leaf = path->nodes[0];
1780         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1781         refs = btrfs_extent_refs(leaf, ei);
1782         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1783         refs += refs_to_mod;
1784         btrfs_set_extent_refs(leaf, ei, refs);
1785         if (extent_op)
1786                 __run_delayed_extent_op(extent_op, leaf, ei);
1787
1788         type = btrfs_extent_inline_ref_type(leaf, iref);
1789
1790         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1791                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1792                 refs = btrfs_extent_data_ref_count(leaf, dref);
1793         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1794                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1795                 refs = btrfs_shared_data_ref_count(leaf, sref);
1796         } else {
1797                 refs = 1;
1798                 BUG_ON(refs_to_mod != -1);
1799         }
1800
1801         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1802         refs += refs_to_mod;
1803
1804         if (refs > 0) {
1805                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1806                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1807                 else
1808                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1809         } else {
1810                 size =  btrfs_extent_inline_ref_size(type);
1811                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1812                 ptr = (unsigned long)iref;
1813                 end = (unsigned long)ei + item_size;
1814                 if (ptr + size < end)
1815                         memmove_extent_buffer(leaf, ptr, ptr + size,
1816                                               end - ptr - size);
1817                 item_size -= size;
1818                 btrfs_truncate_item(root, path, item_size, 1);
1819         }
1820         btrfs_mark_buffer_dirty(leaf);
1821 }
1822
1823 static noinline_for_stack
1824 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1825                                  struct btrfs_root *root,
1826                                  struct btrfs_path *path,
1827                                  u64 bytenr, u64 num_bytes, u64 parent,
1828                                  u64 root_objectid, u64 owner,
1829                                  u64 offset, int refs_to_add,
1830                                  struct btrfs_delayed_extent_op *extent_op)
1831 {
1832         struct btrfs_extent_inline_ref *iref;
1833         int ret;
1834
1835         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1836                                            bytenr, num_bytes, parent,
1837                                            root_objectid, owner, offset, 1);
1838         if (ret == 0) {
1839                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1840                 update_inline_extent_backref(root, path, iref,
1841                                              refs_to_add, extent_op);
1842         } else if (ret == -ENOENT) {
1843                 setup_inline_extent_backref(root, path, iref, parent,
1844                                             root_objectid, owner, offset,
1845                                             refs_to_add, extent_op);
1846                 ret = 0;
1847         }
1848         return ret;
1849 }
1850
1851 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1852                                  struct btrfs_root *root,
1853                                  struct btrfs_path *path,
1854                                  u64 bytenr, u64 parent, u64 root_objectid,
1855                                  u64 owner, u64 offset, int refs_to_add)
1856 {
1857         int ret;
1858         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1859                 BUG_ON(refs_to_add != 1);
1860                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1861                                             parent, root_objectid);
1862         } else {
1863                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1864                                              parent, root_objectid,
1865                                              owner, offset, refs_to_add);
1866         }
1867         return ret;
1868 }
1869
1870 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1871                                  struct btrfs_root *root,
1872                                  struct btrfs_path *path,
1873                                  struct btrfs_extent_inline_ref *iref,
1874                                  int refs_to_drop, int is_data)
1875 {
1876         int ret = 0;
1877
1878         BUG_ON(!is_data && refs_to_drop != 1);
1879         if (iref) {
1880                 update_inline_extent_backref(root, path, iref,
1881                                              -refs_to_drop, NULL);
1882         } else if (is_data) {
1883                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1884         } else {
1885                 ret = btrfs_del_item(trans, root, path);
1886         }
1887         return ret;
1888 }
1889
1890 static int btrfs_issue_discard(struct block_device *bdev,
1891                                 u64 start, u64 len)
1892 {
1893         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1894 }
1895
1896 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1897                                 u64 num_bytes, u64 *actual_bytes)
1898 {
1899         int ret;
1900         u64 discarded_bytes = 0;
1901         struct btrfs_bio *bbio = NULL;
1902
1903
1904         /* Tell the block device(s) that the sectors can be discarded */
1905         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1906                               bytenr, &num_bytes, &bbio, 0);
1907         /* Error condition is -ENOMEM */
1908         if (!ret) {
1909                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1910                 int i;
1911
1912
1913                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1914                         if (!stripe->dev->can_discard)
1915                                 continue;
1916
1917                         ret = btrfs_issue_discard(stripe->dev->bdev,
1918                                                   stripe->physical,
1919                                                   stripe->length);
1920                         if (!ret)
1921                                 discarded_bytes += stripe->length;
1922                         else if (ret != -EOPNOTSUPP)
1923                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1924
1925                         /*
1926                          * Just in case we get back EOPNOTSUPP for some reason,
1927                          * just ignore the return value so we don't screw up
1928                          * people calling discard_extent.
1929                          */
1930                         ret = 0;
1931                 }
1932                 kfree(bbio);
1933         }
1934
1935         if (actual_bytes)
1936                 *actual_bytes = discarded_bytes;
1937
1938
1939         if (ret == -EOPNOTSUPP)
1940                 ret = 0;
1941         return ret;
1942 }
1943
1944 /* Can return -ENOMEM */
1945 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1946                          struct btrfs_root *root,
1947                          u64 bytenr, u64 num_bytes, u64 parent,
1948                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1949 {
1950         int ret;
1951         struct btrfs_fs_info *fs_info = root->fs_info;
1952
1953         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1954                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1955
1956         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1957                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1958                                         num_bytes,
1959                                         parent, root_objectid, (int)owner,
1960                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1961         } else {
1962                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1963                                         num_bytes,
1964                                         parent, root_objectid, owner, offset,
1965                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1966         }
1967         return ret;
1968 }
1969
1970 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1971                                   struct btrfs_root *root,
1972                                   u64 bytenr, u64 num_bytes,
1973                                   u64 parent, u64 root_objectid,
1974                                   u64 owner, u64 offset, int refs_to_add,
1975                                   struct btrfs_delayed_extent_op *extent_op)
1976 {
1977         struct btrfs_path *path;
1978         struct extent_buffer *leaf;
1979         struct btrfs_extent_item *item;
1980         u64 refs;
1981         int ret;
1982
1983         path = btrfs_alloc_path();
1984         if (!path)
1985                 return -ENOMEM;
1986
1987         path->reada = 1;
1988         path->leave_spinning = 1;
1989         /* this will setup the path even if it fails to insert the back ref */
1990         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1991                                            path, bytenr, num_bytes, parent,
1992                                            root_objectid, owner, offset,
1993                                            refs_to_add, extent_op);
1994         if (ret != -EAGAIN)
1995                 goto out;
1996
1997         leaf = path->nodes[0];
1998         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1999         refs = btrfs_extent_refs(leaf, item);
2000         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2001         if (extent_op)
2002                 __run_delayed_extent_op(extent_op, leaf, item);
2003
2004         btrfs_mark_buffer_dirty(leaf);
2005         btrfs_release_path(path);
2006
2007         path->reada = 1;
2008         path->leave_spinning = 1;
2009
2010         /* now insert the actual backref */
2011         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2012                                     path, bytenr, parent, root_objectid,
2013                                     owner, offset, refs_to_add);
2014         if (ret)
2015                 btrfs_abort_transaction(trans, root, ret);
2016 out:
2017         btrfs_free_path(path);
2018         return ret;
2019 }
2020
2021 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2022                                 struct btrfs_root *root,
2023                                 struct btrfs_delayed_ref_node *node,
2024                                 struct btrfs_delayed_extent_op *extent_op,
2025                                 int insert_reserved)
2026 {
2027         int ret = 0;
2028         struct btrfs_delayed_data_ref *ref;
2029         struct btrfs_key ins;
2030         u64 parent = 0;
2031         u64 ref_root = 0;
2032         u64 flags = 0;
2033
2034         ins.objectid = node->bytenr;
2035         ins.offset = node->num_bytes;
2036         ins.type = BTRFS_EXTENT_ITEM_KEY;
2037
2038         ref = btrfs_delayed_node_to_data_ref(node);
2039         trace_run_delayed_data_ref(node, ref, node->action);
2040
2041         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2042                 parent = ref->parent;
2043         else
2044                 ref_root = ref->root;
2045
2046         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2047                 if (extent_op)
2048                         flags |= extent_op->flags_to_set;
2049                 ret = alloc_reserved_file_extent(trans, root,
2050                                                  parent, ref_root, flags,
2051                                                  ref->objectid, ref->offset,
2052                                                  &ins, node->ref_mod);
2053         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2054                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2055                                              node->num_bytes, parent,
2056                                              ref_root, ref->objectid,
2057                                              ref->offset, node->ref_mod,
2058                                              extent_op);
2059         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2060                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2061                                           node->num_bytes, parent,
2062                                           ref_root, ref->objectid,
2063                                           ref->offset, node->ref_mod,
2064                                           extent_op);
2065         } else {
2066                 BUG();
2067         }
2068         return ret;
2069 }
2070
2071 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2072                                     struct extent_buffer *leaf,
2073                                     struct btrfs_extent_item *ei)
2074 {
2075         u64 flags = btrfs_extent_flags(leaf, ei);
2076         if (extent_op->update_flags) {
2077                 flags |= extent_op->flags_to_set;
2078                 btrfs_set_extent_flags(leaf, ei, flags);
2079         }
2080
2081         if (extent_op->update_key) {
2082                 struct btrfs_tree_block_info *bi;
2083                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2084                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2085                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2086         }
2087 }
2088
2089 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2090                                  struct btrfs_root *root,
2091                                  struct btrfs_delayed_ref_node *node,
2092                                  struct btrfs_delayed_extent_op *extent_op)
2093 {
2094         struct btrfs_key key;
2095         struct btrfs_path *path;
2096         struct btrfs_extent_item *ei;
2097         struct extent_buffer *leaf;
2098         u32 item_size;
2099         int ret;
2100         int err = 0;
2101         int metadata = !extent_op->is_data;
2102
2103         if (trans->aborted)
2104                 return 0;
2105
2106         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2107                 metadata = 0;
2108
2109         path = btrfs_alloc_path();
2110         if (!path)
2111                 return -ENOMEM;
2112
2113         key.objectid = node->bytenr;
2114
2115         if (metadata) {
2116                 key.type = BTRFS_METADATA_ITEM_KEY;
2117                 key.offset = extent_op->level;
2118         } else {
2119                 key.type = BTRFS_EXTENT_ITEM_KEY;
2120                 key.offset = node->num_bytes;
2121         }
2122
2123 again:
2124         path->reada = 1;
2125         path->leave_spinning = 1;
2126         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2127                                 path, 0, 1);
2128         if (ret < 0) {
2129                 err = ret;
2130                 goto out;
2131         }
2132         if (ret > 0) {
2133                 if (metadata) {
2134                         if (path->slots[0] > 0) {
2135                                 path->slots[0]--;
2136                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2137                                                       path->slots[0]);
2138                                 if (key.objectid == node->bytenr &&
2139                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2140                                     key.offset == node->num_bytes)
2141                                         ret = 0;
2142                         }
2143                         if (ret > 0) {
2144                                 btrfs_release_path(path);
2145                                 metadata = 0;
2146
2147                                 key.objectid = node->bytenr;
2148                                 key.offset = node->num_bytes;
2149                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2150                                 goto again;
2151                         }
2152                 } else {
2153                         err = -EIO;
2154                         goto out;
2155                 }
2156         }
2157
2158         leaf = path->nodes[0];
2159         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2160 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2161         if (item_size < sizeof(*ei)) {
2162                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2163                                              path, (u64)-1, 0);
2164                 if (ret < 0) {
2165                         err = ret;
2166                         goto out;
2167                 }
2168                 leaf = path->nodes[0];
2169                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2170         }
2171 #endif
2172         BUG_ON(item_size < sizeof(*ei));
2173         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2174         __run_delayed_extent_op(extent_op, leaf, ei);
2175
2176         btrfs_mark_buffer_dirty(leaf);
2177 out:
2178         btrfs_free_path(path);
2179         return err;
2180 }
2181
2182 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2183                                 struct btrfs_root *root,
2184                                 struct btrfs_delayed_ref_node *node,
2185                                 struct btrfs_delayed_extent_op *extent_op,
2186                                 int insert_reserved)
2187 {
2188         int ret = 0;
2189         struct btrfs_delayed_tree_ref *ref;
2190         struct btrfs_key ins;
2191         u64 parent = 0;
2192         u64 ref_root = 0;
2193         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2194                                                  SKINNY_METADATA);
2195
2196         ref = btrfs_delayed_node_to_tree_ref(node);
2197         trace_run_delayed_tree_ref(node, ref, node->action);
2198
2199         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2200                 parent = ref->parent;
2201         else
2202                 ref_root = ref->root;
2203
2204         ins.objectid = node->bytenr;
2205         if (skinny_metadata) {
2206                 ins.offset = ref->level;
2207                 ins.type = BTRFS_METADATA_ITEM_KEY;
2208         } else {
2209                 ins.offset = node->num_bytes;
2210                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2211         }
2212
2213         BUG_ON(node->ref_mod != 1);
2214         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2215                 BUG_ON(!extent_op || !extent_op->update_flags);
2216                 ret = alloc_reserved_tree_block(trans, root,
2217                                                 parent, ref_root,
2218                                                 extent_op->flags_to_set,
2219                                                 &extent_op->key,
2220                                                 ref->level, &ins);
2221         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2222                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2223                                              node->num_bytes, parent, ref_root,
2224                                              ref->level, 0, 1, extent_op);
2225         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2226                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2227                                           node->num_bytes, parent, ref_root,
2228                                           ref->level, 0, 1, extent_op);
2229         } else {
2230                 BUG();
2231         }
2232         return ret;
2233 }
2234
2235 /* helper function to actually process a single delayed ref entry */
2236 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2237                                struct btrfs_root *root,
2238                                struct btrfs_delayed_ref_node *node,
2239                                struct btrfs_delayed_extent_op *extent_op,
2240                                int insert_reserved)
2241 {
2242         int ret = 0;
2243
2244         if (trans->aborted) {
2245                 if (insert_reserved)
2246                         btrfs_pin_extent(root, node->bytenr,
2247                                          node->num_bytes, 1);
2248                 return 0;
2249         }
2250
2251         if (btrfs_delayed_ref_is_head(node)) {
2252                 struct btrfs_delayed_ref_head *head;
2253                 /*
2254                  * we've hit the end of the chain and we were supposed
2255                  * to insert this extent into the tree.  But, it got
2256                  * deleted before we ever needed to insert it, so all
2257                  * we have to do is clean up the accounting
2258                  */
2259                 BUG_ON(extent_op);
2260                 head = btrfs_delayed_node_to_head(node);
2261                 trace_run_delayed_ref_head(node, head, node->action);
2262
2263                 if (insert_reserved) {
2264                         btrfs_pin_extent(root, node->bytenr,
2265                                          node->num_bytes, 1);
2266                         if (head->is_data) {
2267                                 ret = btrfs_del_csums(trans, root,
2268                                                       node->bytenr,
2269                                                       node->num_bytes);
2270                         }
2271                 }
2272                 return ret;
2273         }
2274
2275         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2276             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2277                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2278                                            insert_reserved);
2279         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2280                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2281                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2282                                            insert_reserved);
2283         else
2284                 BUG();
2285         return ret;
2286 }
2287
2288 static noinline struct btrfs_delayed_ref_node *
2289 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2290 {
2291         struct rb_node *node;
2292         struct btrfs_delayed_ref_node *ref, *last = NULL;;
2293
2294         /*
2295          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2296          * this prevents ref count from going down to zero when
2297          * there still are pending delayed ref.
2298          */
2299         node = rb_first(&head->ref_root);
2300         while (node) {
2301                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2302                                 rb_node);
2303                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2304                         return ref;
2305                 else if (last == NULL)
2306                         last = ref;
2307                 node = rb_next(node);
2308         }
2309         return last;
2310 }
2311
2312 /*
2313  * Returns 0 on success or if called with an already aborted transaction.
2314  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2315  */
2316 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2317                                              struct btrfs_root *root,
2318                                              unsigned long nr)
2319 {
2320         struct btrfs_delayed_ref_root *delayed_refs;
2321         struct btrfs_delayed_ref_node *ref;
2322         struct btrfs_delayed_ref_head *locked_ref = NULL;
2323         struct btrfs_delayed_extent_op *extent_op;
2324         struct btrfs_fs_info *fs_info = root->fs_info;
2325         ktime_t start = ktime_get();
2326         int ret;
2327         unsigned long count = 0;
2328         unsigned long actual_count = 0;
2329         int must_insert_reserved = 0;
2330
2331         delayed_refs = &trans->transaction->delayed_refs;
2332         while (1) {
2333                 if (!locked_ref) {
2334                         if (count >= nr)
2335                                 break;
2336
2337                         spin_lock(&delayed_refs->lock);
2338                         locked_ref = btrfs_select_ref_head(trans);
2339                         if (!locked_ref) {
2340                                 spin_unlock(&delayed_refs->lock);
2341                                 break;
2342                         }
2343
2344                         /* grab the lock that says we are going to process
2345                          * all the refs for this head */
2346                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2347                         spin_unlock(&delayed_refs->lock);
2348                         /*
2349                          * we may have dropped the spin lock to get the head
2350                          * mutex lock, and that might have given someone else
2351                          * time to free the head.  If that's true, it has been
2352                          * removed from our list and we can move on.
2353                          */
2354                         if (ret == -EAGAIN) {
2355                                 locked_ref = NULL;
2356                                 count++;
2357                                 continue;
2358                         }
2359                 }
2360
2361                 /*
2362                  * We need to try and merge add/drops of the same ref since we
2363                  * can run into issues with relocate dropping the implicit ref
2364                  * and then it being added back again before the drop can
2365                  * finish.  If we merged anything we need to re-loop so we can
2366                  * get a good ref.
2367                  */
2368                 spin_lock(&locked_ref->lock);
2369                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2370                                          locked_ref);
2371
2372                 /*
2373                  * locked_ref is the head node, so we have to go one
2374                  * node back for any delayed ref updates
2375                  */
2376                 ref = select_delayed_ref(locked_ref);
2377
2378                 if (ref && ref->seq &&
2379                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2380                         spin_unlock(&locked_ref->lock);
2381                         btrfs_delayed_ref_unlock(locked_ref);
2382                         spin_lock(&delayed_refs->lock);
2383                         locked_ref->processing = 0;
2384                         delayed_refs->num_heads_ready++;
2385                         spin_unlock(&delayed_refs->lock);
2386                         locked_ref = NULL;
2387                         cond_resched();
2388                         count++;
2389                         continue;
2390                 }
2391
2392                 /*
2393                  * record the must insert reserved flag before we
2394                  * drop the spin lock.
2395                  */
2396                 must_insert_reserved = locked_ref->must_insert_reserved;
2397                 locked_ref->must_insert_reserved = 0;
2398
2399                 extent_op = locked_ref->extent_op;
2400                 locked_ref->extent_op = NULL;
2401
2402                 if (!ref) {
2403
2404
2405                         /* All delayed refs have been processed, Go ahead
2406                          * and send the head node to run_one_delayed_ref,
2407                          * so that any accounting fixes can happen
2408                          */
2409                         ref = &locked_ref->node;
2410
2411                         if (extent_op && must_insert_reserved) {
2412                                 btrfs_free_delayed_extent_op(extent_op);
2413                                 extent_op = NULL;
2414                         }
2415
2416                         if (extent_op) {
2417                                 spin_unlock(&locked_ref->lock);
2418                                 ret = run_delayed_extent_op(trans, root,
2419                                                             ref, extent_op);
2420                                 btrfs_free_delayed_extent_op(extent_op);
2421
2422                                 if (ret) {
2423                                         /*
2424                                          * Need to reset must_insert_reserved if
2425                                          * there was an error so the abort stuff
2426                                          * can cleanup the reserved space
2427                                          * properly.
2428                                          */
2429                                         if (must_insert_reserved)
2430                                                 locked_ref->must_insert_reserved = 1;
2431                                         locked_ref->processing = 0;
2432                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2433                                         btrfs_delayed_ref_unlock(locked_ref);
2434                                         return ret;
2435                                 }
2436                                 continue;
2437                         }
2438
2439                         /*
2440                          * Need to drop our head ref lock and re-aqcuire the
2441                          * delayed ref lock and then re-check to make sure
2442                          * nobody got added.
2443                          */
2444                         spin_unlock(&locked_ref->lock);
2445                         spin_lock(&delayed_refs->lock);
2446                         spin_lock(&locked_ref->lock);
2447                         if (rb_first(&locked_ref->ref_root) ||
2448                             locked_ref->extent_op) {
2449                                 spin_unlock(&locked_ref->lock);
2450                                 spin_unlock(&delayed_refs->lock);
2451                                 continue;
2452                         }
2453                         ref->in_tree = 0;
2454                         delayed_refs->num_heads--;
2455                         rb_erase(&locked_ref->href_node,
2456                                  &delayed_refs->href_root);
2457                         spin_unlock(&delayed_refs->lock);
2458                 } else {
2459                         actual_count++;
2460                         ref->in_tree = 0;
2461                         rb_erase(&ref->rb_node, &locked_ref->ref_root);
2462                 }
2463                 atomic_dec(&delayed_refs->num_entries);
2464
2465                 if (!btrfs_delayed_ref_is_head(ref)) {
2466                         /*
2467                          * when we play the delayed ref, also correct the
2468                          * ref_mod on head
2469                          */
2470                         switch (ref->action) {
2471                         case BTRFS_ADD_DELAYED_REF:
2472                         case BTRFS_ADD_DELAYED_EXTENT:
2473                                 locked_ref->node.ref_mod -= ref->ref_mod;
2474                                 break;
2475                         case BTRFS_DROP_DELAYED_REF:
2476                                 locked_ref->node.ref_mod += ref->ref_mod;
2477                                 break;
2478                         default:
2479                                 WARN_ON(1);
2480                         }
2481                 }
2482                 spin_unlock(&locked_ref->lock);
2483
2484                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2485                                           must_insert_reserved);
2486
2487                 btrfs_free_delayed_extent_op(extent_op);
2488                 if (ret) {
2489                         locked_ref->processing = 0;
2490                         btrfs_delayed_ref_unlock(locked_ref);
2491                         btrfs_put_delayed_ref(ref);
2492                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2493                         return ret;
2494                 }
2495
2496                 /*
2497                  * If this node is a head, that means all the refs in this head
2498                  * have been dealt with, and we will pick the next head to deal
2499                  * with, so we must unlock the head and drop it from the cluster
2500                  * list before we release it.
2501                  */
2502                 if (btrfs_delayed_ref_is_head(ref)) {
2503                         btrfs_delayed_ref_unlock(locked_ref);
2504                         locked_ref = NULL;
2505                 }
2506                 btrfs_put_delayed_ref(ref);
2507                 count++;
2508                 cond_resched();
2509         }
2510
2511         /*
2512          * We don't want to include ref heads since we can have empty ref heads
2513          * and those will drastically skew our runtime down since we just do
2514          * accounting, no actual extent tree updates.
2515          */
2516         if (actual_count > 0) {
2517                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2518                 u64 avg;
2519
2520                 /*
2521                  * We weigh the current average higher than our current runtime
2522                  * to avoid large swings in the average.
2523                  */
2524                 spin_lock(&delayed_refs->lock);
2525                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2526                 avg = div64_u64(avg, 4);
2527                 fs_info->avg_delayed_ref_runtime = avg;
2528                 spin_unlock(&delayed_refs->lock);
2529         }
2530         return 0;
2531 }
2532
2533 #ifdef SCRAMBLE_DELAYED_REFS
2534 /*
2535  * Normally delayed refs get processed in ascending bytenr order. This
2536  * correlates in most cases to the order added. To expose dependencies on this
2537  * order, we start to process the tree in the middle instead of the beginning
2538  */
2539 static u64 find_middle(struct rb_root *root)
2540 {
2541         struct rb_node *n = root->rb_node;
2542         struct btrfs_delayed_ref_node *entry;
2543         int alt = 1;
2544         u64 middle;
2545         u64 first = 0, last = 0;
2546
2547         n = rb_first(root);
2548         if (n) {
2549                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2550                 first = entry->bytenr;
2551         }
2552         n = rb_last(root);
2553         if (n) {
2554                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2555                 last = entry->bytenr;
2556         }
2557         n = root->rb_node;
2558
2559         while (n) {
2560                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2561                 WARN_ON(!entry->in_tree);
2562
2563                 middle = entry->bytenr;
2564
2565                 if (alt)
2566                         n = n->rb_left;
2567                 else
2568                         n = n->rb_right;
2569
2570                 alt = 1 - alt;
2571         }
2572         return middle;
2573 }
2574 #endif
2575
2576 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2577                                          struct btrfs_fs_info *fs_info)
2578 {
2579         struct qgroup_update *qgroup_update;
2580         int ret = 0;
2581
2582         if (list_empty(&trans->qgroup_ref_list) !=
2583             !trans->delayed_ref_elem.seq) {
2584                 /* list without seq or seq without list */
2585                 btrfs_err(fs_info,
2586                         "qgroup accounting update error, list is%s empty, seq is %#x.%x",
2587                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2588                         (u32)(trans->delayed_ref_elem.seq >> 32),
2589                         (u32)trans->delayed_ref_elem.seq);
2590                 BUG();
2591         }
2592
2593         if (!trans->delayed_ref_elem.seq)
2594                 return 0;
2595
2596         while (!list_empty(&trans->qgroup_ref_list)) {
2597                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2598                                                  struct qgroup_update, list);
2599                 list_del(&qgroup_update->list);
2600                 if (!ret)
2601                         ret = btrfs_qgroup_account_ref(
2602                                         trans, fs_info, qgroup_update->node,
2603                                         qgroup_update->extent_op);
2604                 kfree(qgroup_update);
2605         }
2606
2607         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2608
2609         return ret;
2610 }
2611
2612 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2613 {
2614         u64 num_bytes;
2615
2616         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2617                              sizeof(struct btrfs_extent_inline_ref));
2618         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2619                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2620
2621         /*
2622          * We don't ever fill up leaves all the way so multiply by 2 just to be
2623          * closer to what we're really going to want to ouse.
2624          */
2625         return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2626 }
2627
2628 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2629                                        struct btrfs_root *root)
2630 {
2631         struct btrfs_block_rsv *global_rsv;
2632         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2633         u64 num_bytes;
2634         int ret = 0;
2635
2636         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2637         num_heads = heads_to_leaves(root, num_heads);
2638         if (num_heads > 1)
2639                 num_bytes += (num_heads - 1) * root->leafsize;
2640         num_bytes <<= 1;
2641         global_rsv = &root->fs_info->global_block_rsv;
2642
2643         /*
2644          * If we can't allocate any more chunks lets make sure we have _lots_ of
2645          * wiggle room since running delayed refs can create more delayed refs.
2646          */
2647         if (global_rsv->space_info->full)
2648                 num_bytes <<= 1;
2649
2650         spin_lock(&global_rsv->lock);
2651         if (global_rsv->reserved <= num_bytes)
2652                 ret = 1;
2653         spin_unlock(&global_rsv->lock);
2654         return ret;
2655 }
2656
2657 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2658                                        struct btrfs_root *root)
2659 {
2660         struct btrfs_fs_info *fs_info = root->fs_info;
2661         u64 num_entries =
2662                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2663         u64 avg_runtime;
2664
2665         smp_mb();
2666         avg_runtime = fs_info->avg_delayed_ref_runtime;
2667         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2668                 return 1;
2669
2670         return btrfs_check_space_for_delayed_refs(trans, root);
2671 }
2672
2673 /*
2674  * this starts processing the delayed reference count updates and
2675  * extent insertions we have queued up so far.  count can be
2676  * 0, which means to process everything in the tree at the start
2677  * of the run (but not newly added entries), or it can be some target
2678  * number you'd like to process.
2679  *
2680  * Returns 0 on success or if called with an aborted transaction
2681  * Returns <0 on error and aborts the transaction
2682  */
2683 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2684                            struct btrfs_root *root, unsigned long count)
2685 {
2686         struct rb_node *node;
2687         struct btrfs_delayed_ref_root *delayed_refs;
2688         struct btrfs_delayed_ref_head *head;
2689         int ret;
2690         int run_all = count == (unsigned long)-1;
2691         int run_most = 0;
2692
2693         /* We'll clean this up in btrfs_cleanup_transaction */
2694         if (trans->aborted)
2695                 return 0;
2696
2697         if (root == root->fs_info->extent_root)
2698                 root = root->fs_info->tree_root;
2699
2700         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2701
2702         delayed_refs = &trans->transaction->delayed_refs;
2703         if (count == 0) {
2704                 count = atomic_read(&delayed_refs->num_entries) * 2;
2705                 run_most = 1;
2706         }
2707
2708 again:
2709 #ifdef SCRAMBLE_DELAYED_REFS
2710         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2711 #endif
2712         ret = __btrfs_run_delayed_refs(trans, root, count);
2713         if (ret < 0) {
2714                 btrfs_abort_transaction(trans, root, ret);
2715                 return ret;
2716         }
2717
2718         if (run_all) {
2719                 if (!list_empty(&trans->new_bgs))
2720                         btrfs_create_pending_block_groups(trans, root);
2721
2722                 spin_lock(&delayed_refs->lock);
2723                 node = rb_first(&delayed_refs->href_root);
2724                 if (!node) {
2725                         spin_unlock(&delayed_refs->lock);
2726                         goto out;
2727                 }
2728                 count = (unsigned long)-1;
2729
2730                 while (node) {
2731                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2732                                         href_node);
2733                         if (btrfs_delayed_ref_is_head(&head->node)) {
2734                                 struct btrfs_delayed_ref_node *ref;
2735
2736                                 ref = &head->node;
2737                                 atomic_inc(&ref->refs);
2738
2739                                 spin_unlock(&delayed_refs->lock);
2740                                 /*
2741                                  * Mutex was contended, block until it's
2742                                  * released and try again
2743                                  */
2744                                 mutex_lock(&head->mutex);
2745                                 mutex_unlock(&head->mutex);
2746
2747                                 btrfs_put_delayed_ref(ref);
2748                                 cond_resched();
2749                                 goto again;
2750                         } else {
2751                                 WARN_ON(1);
2752                         }
2753                         node = rb_next(node);
2754                 }
2755                 spin_unlock(&delayed_refs->lock);
2756                 cond_resched();
2757                 goto again;
2758         }
2759 out:
2760         assert_qgroups_uptodate(trans);
2761         return 0;
2762 }
2763
2764 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2765                                 struct btrfs_root *root,
2766                                 u64 bytenr, u64 num_bytes, u64 flags,
2767                                 int level, int is_data)
2768 {
2769         struct btrfs_delayed_extent_op *extent_op;
2770         int ret;
2771
2772         extent_op = btrfs_alloc_delayed_extent_op();
2773         if (!extent_op)
2774                 return -ENOMEM;
2775
2776         extent_op->flags_to_set = flags;
2777         extent_op->update_flags = 1;
2778         extent_op->update_key = 0;
2779         extent_op->is_data = is_data ? 1 : 0;
2780         extent_op->level = level;
2781
2782         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2783                                           num_bytes, extent_op);
2784         if (ret)
2785                 btrfs_free_delayed_extent_op(extent_op);
2786         return ret;
2787 }
2788
2789 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2790                                       struct btrfs_root *root,
2791                                       struct btrfs_path *path,
2792                                       u64 objectid, u64 offset, u64 bytenr)
2793 {
2794         struct btrfs_delayed_ref_head *head;
2795         struct btrfs_delayed_ref_node *ref;
2796         struct btrfs_delayed_data_ref *data_ref;
2797         struct btrfs_delayed_ref_root *delayed_refs;
2798         struct rb_node *node;
2799         int ret = 0;
2800
2801         delayed_refs = &trans->transaction->delayed_refs;
2802         spin_lock(&delayed_refs->lock);
2803         head = btrfs_find_delayed_ref_head(trans, bytenr);
2804         if (!head) {
2805                 spin_unlock(&delayed_refs->lock);
2806                 return 0;
2807         }
2808
2809         if (!mutex_trylock(&head->mutex)) {
2810                 atomic_inc(&head->node.refs);
2811                 spin_unlock(&delayed_refs->lock);
2812
2813                 btrfs_release_path(path);
2814
2815                 /*
2816                  * Mutex was contended, block until it's released and let
2817                  * caller try again
2818                  */
2819                 mutex_lock(&head->mutex);
2820                 mutex_unlock(&head->mutex);
2821                 btrfs_put_delayed_ref(&head->node);
2822                 return -EAGAIN;
2823         }
2824         spin_unlock(&delayed_refs->lock);
2825
2826         spin_lock(&head->lock);
2827         node = rb_first(&head->ref_root);
2828         while (node) {
2829                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2830                 node = rb_next(node);
2831
2832                 /* If it's a shared ref we know a cross reference exists */
2833                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2834                         ret = 1;
2835                         break;
2836                 }
2837
2838                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2839
2840                 /*
2841                  * If our ref doesn't match the one we're currently looking at
2842                  * then we have a cross reference.
2843                  */
2844                 if (data_ref->root != root->root_key.objectid ||
2845                     data_ref->objectid != objectid ||
2846                     data_ref->offset != offset) {
2847                         ret = 1;
2848                         break;
2849                 }
2850         }
2851         spin_unlock(&head->lock);
2852         mutex_unlock(&head->mutex);
2853         return ret;
2854 }
2855
2856 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2857                                         struct btrfs_root *root,
2858                                         struct btrfs_path *path,
2859                                         u64 objectid, u64 offset, u64 bytenr)
2860 {
2861         struct btrfs_root *extent_root = root->fs_info->extent_root;
2862         struct extent_buffer *leaf;
2863         struct btrfs_extent_data_ref *ref;
2864         struct btrfs_extent_inline_ref *iref;
2865         struct btrfs_extent_item *ei;
2866         struct btrfs_key key;
2867         u32 item_size;
2868         int ret;
2869
2870         key.objectid = bytenr;
2871         key.offset = (u64)-1;
2872         key.type = BTRFS_EXTENT_ITEM_KEY;
2873
2874         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2875         if (ret < 0)
2876                 goto out;
2877         BUG_ON(ret == 0); /* Corruption */
2878
2879         ret = -ENOENT;
2880         if (path->slots[0] == 0)
2881                 goto out;
2882
2883         path->slots[0]--;
2884         leaf = path->nodes[0];
2885         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2886
2887         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2888                 goto out;
2889
2890         ret = 1;
2891         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2892 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2893         if (item_size < sizeof(*ei)) {
2894                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2895                 goto out;
2896         }
2897 #endif
2898         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2899
2900         if (item_size != sizeof(*ei) +
2901             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2902                 goto out;
2903
2904         if (btrfs_extent_generation(leaf, ei) <=
2905             btrfs_root_last_snapshot(&root->root_item))
2906                 goto out;
2907
2908         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2909         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2910             BTRFS_EXTENT_DATA_REF_KEY)
2911                 goto out;
2912
2913         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2914         if (btrfs_extent_refs(leaf, ei) !=
2915             btrfs_extent_data_ref_count(leaf, ref) ||
2916             btrfs_extent_data_ref_root(leaf, ref) !=
2917             root->root_key.objectid ||
2918             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2919             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2920                 goto out;
2921
2922         ret = 0;
2923 out:
2924         return ret;
2925 }
2926
2927 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2928                           struct btrfs_root *root,
2929                           u64 objectid, u64 offset, u64 bytenr)
2930 {
2931         struct btrfs_path *path;
2932         int ret;
2933         int ret2;
2934
2935         path = btrfs_alloc_path();
2936         if (!path)
2937                 return -ENOENT;
2938
2939         do {
2940                 ret = check_committed_ref(trans, root, path, objectid,
2941                                           offset, bytenr);
2942                 if (ret && ret != -ENOENT)
2943                         goto out;
2944
2945                 ret2 = check_delayed_ref(trans, root, path, objectid,
2946                                          offset, bytenr);
2947         } while (ret2 == -EAGAIN);
2948
2949         if (ret2 && ret2 != -ENOENT) {
2950                 ret = ret2;
2951                 goto out;
2952         }
2953
2954         if (ret != -ENOENT || ret2 != -ENOENT)
2955                 ret = 0;
2956 out:
2957         btrfs_free_path(path);
2958         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2959                 WARN_ON(ret > 0);
2960         return ret;
2961 }
2962
2963 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2964                            struct btrfs_root *root,
2965                            struct extent_buffer *buf,
2966                            int full_backref, int inc, int for_cow)
2967 {
2968         u64 bytenr;
2969         u64 num_bytes;
2970         u64 parent;
2971         u64 ref_root;
2972         u32 nritems;
2973         struct btrfs_key key;
2974         struct btrfs_file_extent_item *fi;
2975         int i;
2976         int level;
2977         int ret = 0;
2978         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2979                             u64, u64, u64, u64, u64, u64, int);
2980
2981         ref_root = btrfs_header_owner(buf);
2982         nritems = btrfs_header_nritems(buf);
2983         level = btrfs_header_level(buf);
2984
2985         if (!root->ref_cows && level == 0)
2986                 return 0;
2987
2988         if (inc)
2989                 process_func = btrfs_inc_extent_ref;
2990         else
2991                 process_func = btrfs_free_extent;
2992
2993         if (full_backref)
2994                 parent = buf->start;
2995         else
2996                 parent = 0;
2997
2998         for (i = 0; i < nritems; i++) {
2999                 if (level == 0) {
3000                         btrfs_item_key_to_cpu(buf, &key, i);
3001                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3002                                 continue;
3003                         fi = btrfs_item_ptr(buf, i,
3004                                             struct btrfs_file_extent_item);
3005                         if (btrfs_file_extent_type(buf, fi) ==
3006                             BTRFS_FILE_EXTENT_INLINE)
3007                                 continue;
3008                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3009                         if (bytenr == 0)
3010                                 continue;
3011
3012                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3013                         key.offset -= btrfs_file_extent_offset(buf, fi);
3014                         ret = process_func(trans, root, bytenr, num_bytes,
3015                                            parent, ref_root, key.objectid,
3016                                            key.offset, for_cow);
3017                         if (ret)
3018                                 goto fail;
3019                 } else {
3020                         bytenr = btrfs_node_blockptr(buf, i);
3021                         num_bytes = btrfs_level_size(root, level - 1);
3022                         ret = process_func(trans, root, bytenr, num_bytes,
3023                                            parent, ref_root, level - 1, 0,
3024                                            for_cow);
3025                         if (ret)
3026                                 goto fail;
3027                 }
3028         }
3029         return 0;
3030 fail:
3031         return ret;
3032 }
3033
3034 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3035                   struct extent_buffer *buf, int full_backref, int for_cow)
3036 {
3037         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
3038 }
3039
3040 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3041                   struct extent_buffer *buf, int full_backref, int for_cow)
3042 {
3043         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
3044 }
3045
3046 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3047                                  struct btrfs_root *root,
3048                                  struct btrfs_path *path,
3049                                  struct btrfs_block_group_cache *cache)
3050 {
3051         int ret;
3052         struct btrfs_root *extent_root = root->fs_info->extent_root;
3053         unsigned long bi;
3054         struct extent_buffer *leaf;
3055
3056         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3057         if (ret < 0)
3058                 goto fail;
3059         BUG_ON(ret); /* Corruption */
3060
3061         leaf = path->nodes[0];
3062         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3063         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3064         btrfs_mark_buffer_dirty(leaf);
3065         btrfs_release_path(path);
3066 fail:
3067         if (ret) {
3068                 btrfs_abort_transaction(trans, root, ret);
3069                 return ret;
3070         }
3071         return 0;
3072
3073 }
3074
3075 static struct btrfs_block_group_cache *
3076 next_block_group(struct btrfs_root *root,
3077                  struct btrfs_block_group_cache *cache)
3078 {
3079         struct rb_node *node;
3080         spin_lock(&root->fs_info->block_group_cache_lock);
3081         node = rb_next(&cache->cache_node);
3082         btrfs_put_block_group(cache);
3083         if (node) {
3084                 cache = rb_entry(node, struct btrfs_block_group_cache,
3085                                  cache_node);
3086                 btrfs_get_block_group(cache);
3087         } else
3088                 cache = NULL;
3089         spin_unlock(&root->fs_info->block_group_cache_lock);
3090         return cache;
3091 }
3092
3093 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3094                             struct btrfs_trans_handle *trans,
3095                             struct btrfs_path *path)
3096 {
3097         struct btrfs_root *root = block_group->fs_info->tree_root;
3098         struct inode *inode = NULL;
3099         u64 alloc_hint = 0;
3100         int dcs = BTRFS_DC_ERROR;
3101         int num_pages = 0;
3102         int retries = 0;
3103         int ret = 0;
3104
3105         /*
3106          * If this block group is smaller than 100 megs don't bother caching the
3107          * block group.
3108          */
3109         if (block_group->key.offset < (100 * 1024 * 1024)) {
3110                 spin_lock(&block_group->lock);
3111                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3112                 spin_unlock(&block_group->lock);
3113                 return 0;
3114         }
3115
3116 again:
3117         inode = lookup_free_space_inode(root, block_group, path);
3118         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3119                 ret = PTR_ERR(inode);
3120                 btrfs_release_path(path);
3121                 goto out;
3122         }
3123
3124         if (IS_ERR(inode)) {
3125                 BUG_ON(retries);
3126                 retries++;
3127
3128                 if (block_group->ro)
3129                         goto out_free;
3130
3131                 ret = create_free_space_inode(root, trans, block_group, path);
3132                 if (ret)
3133                         goto out_free;
3134                 goto again;
3135         }
3136
3137         /* We've already setup this transaction, go ahead and exit */
3138         if (block_group->cache_generation == trans->transid &&
3139             i_size_read(inode)) {
3140                 dcs = BTRFS_DC_SETUP;
3141                 goto out_put;
3142         }
3143
3144         /*
3145          * We want to set the generation to 0, that way if anything goes wrong
3146          * from here on out we know not to trust this cache when we load up next
3147          * time.
3148          */
3149         BTRFS_I(inode)->generation = 0;
3150         ret = btrfs_update_inode(trans, root, inode);
3151         WARN_ON(ret);
3152
3153         if (i_size_read(inode) > 0) {
3154                 ret = btrfs_check_trunc_cache_free_space(root,
3155                                         &root->fs_info->global_block_rsv);
3156                 if (ret)
3157                         goto out_put;
3158
3159                 ret = btrfs_truncate_free_space_cache(root, trans, inode);
3160                 if (ret)
3161                         goto out_put;
3162         }
3163
3164         spin_lock(&block_group->lock);
3165         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3166             !btrfs_test_opt(root, SPACE_CACHE)) {
3167                 /*
3168                  * don't bother trying to write stuff out _if_
3169                  * a) we're not cached,
3170                  * b) we're with nospace_cache mount option.
3171                  */
3172                 dcs = BTRFS_DC_WRITTEN;
3173                 spin_unlock(&block_group->lock);
3174                 goto out_put;
3175         }
3176         spin_unlock(&block_group->lock);
3177
3178         /*
3179          * Try to preallocate enough space based on how big the block group is.
3180          * Keep in mind this has to include any pinned space which could end up
3181          * taking up quite a bit since it's not folded into the other space
3182          * cache.
3183          */
3184         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3185         if (!num_pages)
3186                 num_pages = 1;
3187
3188         num_pages *= 16;
3189         num_pages *= PAGE_CACHE_SIZE;
3190
3191         ret = btrfs_check_data_free_space(inode, num_pages);
3192         if (ret)
3193                 goto out_put;
3194
3195         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3196                                               num_pages, num_pages,
3197                                               &alloc_hint);
3198         if (!ret)
3199                 dcs = BTRFS_DC_SETUP;
3200         btrfs_free_reserved_data_space(inode, num_pages);
3201
3202 out_put:
3203         iput(inode);
3204 out_free:
3205         btrfs_release_path(path);
3206 out:
3207         spin_lock(&block_group->lock);
3208         if (!ret && dcs == BTRFS_DC_SETUP)
3209                 block_group->cache_generation = trans->transid;
3210         block_group->disk_cache_state = dcs;
3211         spin_unlock(&block_group->lock);
3212
3213         return ret;
3214 }
3215
3216 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3217                                    struct btrfs_root *root)
3218 {
3219         struct btrfs_block_group_cache *cache;
3220         int err = 0;
3221         struct btrfs_path *path;
3222         u64 last = 0;
3223
3224         path = btrfs_alloc_path();
3225         if (!path)
3226                 return -ENOMEM;
3227
3228 again:
3229         while (1) {
3230                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3231                 while (cache) {
3232                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3233                                 break;
3234                         cache = next_block_group(root, cache);
3235                 }
3236                 if (!cache) {
3237                         if (last == 0)
3238                                 break;
3239                         last = 0;
3240                         continue;
3241                 }
3242                 err = cache_save_setup(cache, trans, path);
3243                 last = cache->key.objectid + cache->key.offset;
3244                 btrfs_put_block_group(cache);
3245         }
3246
3247         while (1) {
3248                 if (last == 0) {
3249                         err = btrfs_run_delayed_refs(trans, root,
3250                                                      (unsigned long)-1);
3251                         if (err) /* File system offline */
3252                                 goto out;
3253                 }
3254
3255                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3256                 while (cache) {
3257                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3258                                 btrfs_put_block_group(cache);
3259                                 goto again;
3260                         }
3261
3262                         if (cache->dirty)
3263                                 break;
3264                         cache = next_block_group(root, cache);
3265                 }
3266                 if (!cache) {
3267                         if (last == 0)
3268                                 break;
3269                         last = 0;
3270                         continue;
3271                 }
3272
3273                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3274                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3275                 cache->dirty = 0;
3276                 last = cache->key.objectid + cache->key.offset;
3277
3278                 err = write_one_cache_group(trans, root, path, cache);
3279                 btrfs_put_block_group(cache);
3280                 if (err) /* File system offline */
3281                         goto out;
3282         }
3283
3284         while (1) {
3285                 /*
3286                  * I don't think this is needed since we're just marking our
3287                  * preallocated extent as written, but just in case it can't
3288                  * hurt.
3289                  */
3290                 if (last == 0) {
3291                         err = btrfs_run_delayed_refs(trans, root,
3292                                                      (unsigned long)-1);
3293                         if (err) /* File system offline */
3294                                 goto out;
3295                 }
3296
3297                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3298                 while (cache) {
3299                         /*
3300                          * Really this shouldn't happen, but it could if we
3301                          * couldn't write the entire preallocated extent and
3302                          * splitting the extent resulted in a new block.
3303                          */
3304                         if (cache->dirty) {
3305                                 btrfs_put_block_group(cache);
3306                                 goto again;
3307                         }
3308                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3309                                 break;
3310                         cache = next_block_group(root, cache);
3311                 }
3312                 if (!cache) {
3313                         if (last == 0)
3314                                 break;
3315                         last = 0;
3316                         continue;
3317                 }
3318
3319                 err = btrfs_write_out_cache(root, trans, cache, path);
3320
3321                 /*
3322                  * If we didn't have an error then the cache state is still
3323                  * NEED_WRITE, so we can set it to WRITTEN.
3324                  */
3325                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3326                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3327                 last = cache->key.objectid + cache->key.offset;
3328                 btrfs_put_block_group(cache);
3329         }
3330 out:
3331
3332         btrfs_free_path(path);
3333         return err;
3334 }
3335
3336 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3337 {
3338         struct btrfs_block_group_cache *block_group;
3339         int readonly = 0;
3340
3341         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3342         if (!block_group || block_group->ro)
3343                 readonly = 1;
3344         if (block_group)
3345                 btrfs_put_block_group(block_group);
3346         return readonly;
3347 }
3348
3349 static const char *alloc_name(u64 flags)
3350 {
3351         switch (flags) {
3352         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3353                 return "mixed";
3354         case BTRFS_BLOCK_GROUP_METADATA:
3355                 return "metadata";
3356         case BTRFS_BLOCK_GROUP_DATA:
3357                 return "data";
3358         case BTRFS_BLOCK_GROUP_SYSTEM:
3359                 return "system";
3360         default:
3361                 WARN_ON(1);
3362                 return "invalid-combination";
3363         };
3364 }
3365
3366 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3367                              u64 total_bytes, u64 bytes_used,
3368                              struct btrfs_space_info **space_info)
3369 {
3370         struct btrfs_space_info *found;
3371         int i;
3372         int factor;
3373         int ret;
3374
3375         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3376                      BTRFS_BLOCK_GROUP_RAID10))
3377                 factor = 2;
3378         else
3379                 factor = 1;
3380
3381         found = __find_space_info(info, flags);
3382         if (found) {
3383                 spin_lock(&found->lock);
3384                 found->total_bytes += total_bytes;
3385                 found->disk_total += total_bytes * factor;
3386                 found->bytes_used += bytes_used;
3387                 found->disk_used += bytes_used * factor;
3388                 found->full = 0;
3389                 spin_unlock(&found->lock);
3390                 *space_info = found;
3391                 return 0;
3392         }
3393         found = kzalloc(sizeof(*found), GFP_NOFS);
3394         if (!found)
3395                 return -ENOMEM;
3396
3397         ret = percpu_counter_init(&found->total_bytes_pinned, 0);
3398         if (ret) {
3399                 kfree(found);
3400                 return ret;
3401         }
3402
3403         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
3404                 INIT_LIST_HEAD(&found->block_groups[i]);
3405                 kobject_init(&found->block_group_kobjs[i], &btrfs_raid_ktype);
3406         }
3407         init_rwsem(&found->groups_sem);
3408         spin_lock_init(&found->lock);
3409         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3410         found->total_bytes = total_bytes;
3411         found->disk_total = total_bytes * factor;
3412         found->bytes_used = bytes_used;
3413         found->disk_used = bytes_used * factor;
3414         found->bytes_pinned = 0;
3415         found->bytes_reserved = 0;
3416         found->bytes_readonly = 0;
3417         found->bytes_may_use = 0;
3418         found->full = 0;
3419         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3420         found->chunk_alloc = 0;
3421         found->flush = 0;
3422         init_waitqueue_head(&found->wait);
3423
3424         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3425                                     info->space_info_kobj, "%s",
3426                                     alloc_name(found->flags));
3427         if (ret) {
3428                 kfree(found);
3429                 return ret;
3430         }
3431
3432         *space_info = found;
3433         list_add_rcu(&found->list, &info->space_info);
3434         if (flags & BTRFS_BLOCK_GROUP_DATA)
3435                 info->data_sinfo = found;
3436
3437         return ret;
3438 }
3439
3440 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3441 {
3442         u64 extra_flags = chunk_to_extended(flags) &
3443                                 BTRFS_EXTENDED_PROFILE_MASK;
3444
3445         write_seqlock(&fs_info->profiles_lock);
3446         if (flags & BTRFS_BLOCK_GROUP_DATA)
3447                 fs_info->avail_data_alloc_bits |= extra_flags;
3448         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3449                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3450         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3451                 fs_info->avail_system_alloc_bits |= extra_flags;
3452         write_sequnlock(&fs_info->profiles_lock);
3453 }
3454
3455 /*
3456  * returns target flags in extended format or 0 if restripe for this
3457  * chunk_type is not in progress
3458  *
3459  * should be called with either volume_mutex or balance_lock held
3460  */
3461 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3462 {
3463         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3464         u64 target = 0;
3465
3466         if (!bctl)
3467                 return 0;
3468
3469         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3470             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3471                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3472         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3473                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3474                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3475         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3476                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3477                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3478         }
3479
3480         return target;
3481 }
3482
3483 /*
3484  * @flags: available profiles in extended format (see ctree.h)
3485  *
3486  * Returns reduced profile in chunk format.  If profile changing is in
3487  * progress (either running or paused) picks the target profile (if it's
3488  * already available), otherwise falls back to plain reducing.
3489  */
3490 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3491 {
3492         /*
3493          * we add in the count of missing devices because we want
3494          * to make sure that any RAID levels on a degraded FS
3495          * continue to be honored.
3496          */
3497         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3498                 root->fs_info->fs_devices->missing_devices;
3499         u64 target;
3500         u64 tmp;
3501
3502         /*
3503          * see if restripe for this chunk_type is in progress, if so
3504          * try to reduce to the target profile
3505          */
3506         spin_lock(&root->fs_info->balance_lock);
3507         target = get_restripe_target(root->fs_info, flags);
3508         if (target) {
3509                 /* pick target profile only if it's already available */
3510                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3511                         spin_unlock(&root->fs_info->balance_lock);
3512                         return extended_to_chunk(target);
3513                 }
3514         }
3515         spin_unlock(&root->fs_info->balance_lock);
3516
3517         /* First, mask out the RAID levels which aren't possible */
3518         if (num_devices == 1)
3519                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3520                            BTRFS_BLOCK_GROUP_RAID5);
3521         if (num_devices < 3)
3522                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3523         if (num_devices < 4)
3524                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3525
3526         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3527                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3528                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3529         flags &= ~tmp;
3530
3531         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3532                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3533         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3534                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3535         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3536                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3537         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3538                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3539         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3540                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3541
3542         return extended_to_chunk(flags | tmp);
3543 }
3544
3545 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3546 {
3547         unsigned seq;
3548
3549         do {
3550                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3551
3552                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3553                         flags |= root->fs_info->avail_data_alloc_bits;
3554                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3555                         flags |= root->fs_info->avail_system_alloc_bits;
3556                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3557                         flags |= root->fs_info->avail_metadata_alloc_bits;
3558         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3559
3560         return btrfs_reduce_alloc_profile(root, flags);
3561 }
3562
3563 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3564 {
3565         u64 flags;
3566         u64 ret;
3567
3568         if (data)
3569                 flags = BTRFS_BLOCK_GROUP_DATA;
3570         else if (root == root->fs_info->chunk_root)
3571                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3572         else
3573                 flags = BTRFS_BLOCK_GROUP_METADATA;
3574
3575         ret = get_alloc_profile(root, flags);
3576         return ret;
3577 }
3578
3579 /*
3580  * This will check the space that the inode allocates from to make sure we have
3581  * enough space for bytes.
3582  */
3583 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3584 {
3585         struct btrfs_space_info *data_sinfo;
3586         struct btrfs_root *root = BTRFS_I(inode)->root;
3587         struct btrfs_fs_info *fs_info = root->fs_info;
3588         u64 used;
3589         int ret = 0, committed = 0, alloc_chunk = 1;
3590
3591         /* make sure bytes are sectorsize aligned */
3592         bytes = ALIGN(bytes, root->sectorsize);
3593
3594         if (btrfs_is_free_space_inode(inode)) {
3595                 committed = 1;
3596                 ASSERT(current->journal_info);
3597         }
3598
3599         data_sinfo = fs_info->data_sinfo;
3600         if (!data_sinfo)
3601                 goto alloc;
3602
3603 again:
3604         /* make sure we have enough space to handle the data first */
3605         spin_lock(&data_sinfo->lock);
3606         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3607                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3608                 data_sinfo->bytes_may_use;
3609
3610         if (used + bytes > data_sinfo->total_bytes) {
3611                 struct btrfs_trans_handle *trans;
3612
3613                 /*
3614                  * if we don't have enough free bytes in this space then we need
3615                  * to alloc a new chunk.
3616                  */
3617                 if (!data_sinfo->full && alloc_chunk) {
3618                         u64 alloc_target;
3619
3620                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3621                         spin_unlock(&data_sinfo->lock);
3622 alloc:
3623                         alloc_target = btrfs_get_alloc_profile(root, 1);
3624                         /*
3625                          * It is ugly that we don't call nolock join
3626                          * transaction for the free space inode case here.
3627                          * But it is safe because we only do the data space
3628                          * reservation for the free space cache in the
3629                          * transaction context, the common join transaction
3630                          * just increase the counter of the current transaction
3631                          * handler, doesn't try to acquire the trans_lock of
3632                          * the fs.
3633                          */
3634                         trans = btrfs_join_transaction(root);
3635                         if (IS_ERR(trans))
3636                                 return PTR_ERR(trans);
3637
3638                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3639                                              alloc_target,
3640                                              CHUNK_ALLOC_NO_FORCE);
3641                         btrfs_end_transaction(trans, root);
3642                         if (ret < 0) {
3643                                 if (ret != -ENOSPC)
3644                                         return ret;
3645                                 else
3646                                         goto commit_trans;
3647                         }
3648
3649                         if (!data_sinfo)
3650                                 data_sinfo = fs_info->data_sinfo;
3651
3652                         goto again;
3653                 }
3654
3655                 /*
3656                  * If we don't have enough pinned space to deal with this
3657                  * allocation don't bother committing the transaction.
3658                  */
3659                 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3660                                            bytes) < 0)
3661                         committed = 1;
3662                 spin_unlock(&data_sinfo->lock);
3663
3664                 /* commit the current transaction and try again */
3665 commit_trans:
3666                 if (!committed &&
3667                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3668                         committed = 1;
3669
3670                         trans = btrfs_join_transaction(root);
3671                         if (IS_ERR(trans))
3672                                 return PTR_ERR(trans);
3673                         ret = btrfs_commit_transaction(trans, root);
3674                         if (ret)
3675                                 return ret;
3676                         goto again;
3677                 }
3678
3679                 trace_btrfs_space_reservation(root->fs_info,
3680                                               "space_info:enospc",
3681                                               data_sinfo->flags, bytes, 1);
3682                 return -ENOSPC;
3683         }
3684         data_sinfo->bytes_may_use += bytes;
3685         trace_btrfs_space_reservation(root->fs_info, "space_info",
3686                                       data_sinfo->flags, bytes, 1);
3687         spin_unlock(&data_sinfo->lock);
3688
3689         return 0;
3690 }
3691
3692 /*
3693  * Called if we need to clear a data reservation for this inode.
3694  */
3695 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3696 {
3697         struct btrfs_root *root = BTRFS_I(inode)->root;
3698         struct btrfs_space_info *data_sinfo;
3699
3700         /* make sure bytes are sectorsize aligned */
3701         bytes = ALIGN(bytes, root->sectorsize);
3702
3703         data_sinfo = root->fs_info->data_sinfo;
3704         spin_lock(&data_sinfo->lock);
3705         WARN_ON(data_sinfo->bytes_may_use < bytes);
3706         data_sinfo->bytes_may_use -= bytes;
3707         trace_btrfs_space_reservation(root->fs_info, "space_info",
3708                                       data_sinfo->flags, bytes, 0);
3709         spin_unlock(&data_sinfo->lock);
3710 }
3711
3712 static void force_metadata_allocation(struct btrfs_fs_info *info)
3713 {
3714         struct list_head *head = &info->space_info;
3715         struct btrfs_space_info *found;
3716
3717         rcu_read_lock();
3718         list_for_each_entry_rcu(found, head, list) {
3719                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3720                         found->force_alloc = CHUNK_ALLOC_FORCE;
3721         }
3722         rcu_read_unlock();
3723 }
3724
3725 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3726 {
3727         return (global->size << 1);
3728 }
3729
3730 static int should_alloc_chunk(struct btrfs_root *root,
3731                               struct btrfs_space_info *sinfo, int force)
3732 {
3733         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3734         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3735         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3736         u64 thresh;
3737
3738         if (force == CHUNK_ALLOC_FORCE)
3739                 return 1;
3740
3741         /*
3742          * We need to take into account the global rsv because for all intents
3743          * and purposes it's used space.  Don't worry about locking the
3744          * global_rsv, it doesn't change except when the transaction commits.
3745          */
3746         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3747                 num_allocated += calc_global_rsv_need_space(global_rsv);
3748
3749         /*
3750          * in limited mode, we want to have some free space up to
3751          * about 1% of the FS size.
3752          */
3753         if (force == CHUNK_ALLOC_LIMITED) {
3754                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3755                 thresh = max_t(u64, 64 * 1024 * 1024,
3756                                div_factor_fine(thresh, 1));
3757
3758                 if (num_bytes - num_allocated < thresh)
3759                         return 1;
3760         }
3761
3762         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3763                 return 0;
3764         return 1;
3765 }
3766
3767 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3768 {
3769         u64 num_dev;
3770
3771         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3772                     BTRFS_BLOCK_GROUP_RAID0 |
3773                     BTRFS_BLOCK_GROUP_RAID5 |
3774                     BTRFS_BLOCK_GROUP_RAID6))
3775                 num_dev = root->fs_info->fs_devices->rw_devices;
3776         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3777                 num_dev = 2;
3778         else
3779                 num_dev = 1;    /* DUP or single */
3780
3781         /* metadata for updaing devices and chunk tree */
3782         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3783 }
3784
3785 static void check_system_chunk(struct btrfs_trans_handle *trans,
3786                                struct btrfs_root *root, u64 type)
3787 {
3788         struct btrfs_space_info *info;
3789         u64 left;
3790         u64 thresh;
3791
3792         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3793         spin_lock(&info->lock);
3794         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3795                 info->bytes_reserved - info->bytes_readonly;
3796         spin_unlock(&info->lock);
3797
3798         thresh = get_system_chunk_thresh(root, type);
3799         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3800                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3801                         left, thresh, type);
3802                 dump_space_info(info, 0, 0);
3803         }
3804
3805         if (left < thresh) {
3806                 u64 flags;
3807
3808                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3809                 btrfs_alloc_chunk(trans, root, flags);
3810         }
3811 }
3812
3813 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3814                           struct btrfs_root *extent_root, u64 flags, int force)
3815 {
3816         struct btrfs_space_info *space_info;
3817         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3818         int wait_for_alloc = 0;
3819         int ret = 0;
3820
3821         /* Don't re-enter if we're already allocating a chunk */
3822         if (trans->allocating_chunk)
3823                 return -ENOSPC;
3824
3825         space_info = __find_space_info(extent_root->fs_info, flags);
3826         if (!space_info) {
3827                 ret = update_space_info(extent_root->fs_info, flags,
3828                                         0, 0, &space_info);
3829                 BUG_ON(ret); /* -ENOMEM */
3830         }
3831         BUG_ON(!space_info); /* Logic error */
3832
3833 again:
3834         spin_lock(&space_info->lock);
3835         if (force < space_info->force_alloc)
3836                 force = space_info->force_alloc;
3837         if (space_info->full) {
3838                 if (should_alloc_chunk(extent_root, space_info, force))
3839                         ret = -ENOSPC;
3840                 else
3841                         ret = 0;
3842                 spin_unlock(&space_info->lock);
3843                 return ret;
3844         }
3845
3846         if (!should_alloc_chunk(extent_root, space_info, force)) {
3847                 spin_unlock(&space_info->lock);
3848                 return 0;
3849         } else if (space_info->chunk_alloc) {
3850                 wait_for_alloc = 1;
3851         } else {
3852                 space_info->chunk_alloc = 1;
3853         }
3854
3855         spin_unlock(&space_info->lock);
3856
3857         mutex_lock(&fs_info->chunk_mutex);
3858
3859         /*
3860          * The chunk_mutex is held throughout the entirety of a chunk
3861          * allocation, so once we've acquired the chunk_mutex we know that the
3862          * other guy is done and we need to recheck and see if we should
3863          * allocate.
3864          */
3865         if (wait_for_alloc) {
3866                 mutex_unlock(&fs_info->chunk_mutex);
3867                 wait_for_alloc = 0;
3868                 goto again;
3869         }
3870
3871         trans->allocating_chunk = true;
3872
3873         /*
3874          * If we have mixed data/metadata chunks we want to make sure we keep
3875          * allocating mixed chunks instead of individual chunks.
3876          */
3877         if (btrfs_mixed_space_info(space_info))
3878                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3879
3880         /*
3881          * if we're doing a data chunk, go ahead and make sure that
3882          * we keep a reasonable number of metadata chunks allocated in the
3883          * FS as well.
3884          */
3885         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3886                 fs_info->data_chunk_allocations++;
3887                 if (!(fs_info->data_chunk_allocations %
3888                       fs_info->metadata_ratio))
3889                         force_metadata_allocation(fs_info);
3890         }
3891
3892         /*
3893          * Check if we have enough space in SYSTEM chunk because we may need
3894          * to update devices.
3895          */
3896         check_system_chunk(trans, extent_root, flags);
3897
3898         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3899         trans->allocating_chunk = false;
3900
3901         spin_lock(&space_info->lock);
3902         if (ret < 0 && ret != -ENOSPC)
3903                 goto out;
3904         if (ret)
3905                 space_info->full = 1;
3906         else
3907                 ret = 1;
3908
3909         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3910 out:
3911         space_info->chunk_alloc = 0;
3912         spin_unlock(&space_info->lock);
3913         mutex_unlock(&fs_info->chunk_mutex);
3914         return ret;
3915 }
3916
3917 static int can_overcommit(struct btrfs_root *root,
3918                           struct btrfs_space_info *space_info, u64 bytes,
3919                           enum btrfs_reserve_flush_enum flush)
3920 {
3921         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3922         u64 profile = btrfs_get_alloc_profile(root, 0);
3923         u64 space_size;
3924         u64 avail;
3925         u64 used;
3926
3927         used = space_info->bytes_used + space_info->bytes_reserved +
3928                 space_info->bytes_pinned + space_info->bytes_readonly;
3929
3930         /*
3931          * We only want to allow over committing if we have lots of actual space
3932          * free, but if we don't have enough space to handle the global reserve
3933          * space then we could end up having a real enospc problem when trying
3934          * to allocate a chunk or some other such important allocation.
3935          */
3936         spin_lock(&global_rsv->lock);
3937         space_size = calc_global_rsv_need_space(global_rsv);
3938         spin_unlock(&global_rsv->lock);
3939         if (used + space_size >= space_info->total_bytes)
3940                 return 0;
3941
3942         used += space_info->bytes_may_use;
3943
3944         spin_lock(&root->fs_info->free_chunk_lock);
3945         avail = root->fs_info->free_chunk_space;
3946         spin_unlock(&root->fs_info->free_chunk_lock);
3947
3948         /*
3949          * If we have dup, raid1 or raid10 then only half of the free
3950          * space is actually useable.  For raid56, the space info used
3951          * doesn't include the parity drive, so we don't have to
3952          * change the math
3953          */
3954         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3955                        BTRFS_BLOCK_GROUP_RAID1 |
3956                        BTRFS_BLOCK_GROUP_RAID10))
3957                 avail >>= 1;
3958
3959         /*
3960          * If we aren't flushing all things, let us overcommit up to
3961          * 1/2th of the space. If we can flush, don't let us overcommit
3962          * too much, let it overcommit up to 1/8 of the space.
3963          */
3964         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3965                 avail >>= 3;
3966         else
3967                 avail >>= 1;
3968
3969         if (used + bytes < space_info->total_bytes + avail)
3970                 return 1;
3971         return 0;
3972 }
3973
3974 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3975                                          unsigned long nr_pages, int nr_items)
3976 {
3977         struct super_block *sb = root->fs_info->sb;
3978
3979         if (down_read_trylock(&sb->s_umount)) {
3980                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
3981                 up_read(&sb->s_umount);
3982         } else {
3983                 /*
3984                  * We needn't worry the filesystem going from r/w to r/o though
3985                  * we don't acquire ->s_umount mutex, because the filesystem
3986                  * should guarantee the delalloc inodes list be empty after
3987                  * the filesystem is readonly(all dirty pages are written to
3988                  * the disk).
3989                  */
3990                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
3991                 if (!current->journal_info)
3992                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
3993         }
3994 }
3995
3996 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
3997 {
3998         u64 bytes;
3999         int nr;
4000
4001         bytes = btrfs_calc_trans_metadata_size(root, 1);
4002         nr = (int)div64_u64(to_reclaim, bytes);
4003         if (!nr)
4004                 nr = 1;
4005         return nr;
4006 }
4007
4008 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4009
4010 /*
4011  * shrink metadata reservation for delalloc
4012  */
4013 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4014                             bool wait_ordered)
4015 {
4016         struct btrfs_block_rsv *block_rsv;
4017         struct btrfs_space_info *space_info;
4018         struct btrfs_trans_handle *trans;
4019         u64 delalloc_bytes;
4020         u64 max_reclaim;
4021         long time_left;
4022         unsigned long nr_pages;
4023         int loops;
4024         int items;
4025         enum btrfs_reserve_flush_enum flush;
4026
4027         /* Calc the number of the pages we need flush for space reservation */
4028         items = calc_reclaim_items_nr(root, to_reclaim);
4029         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4030
4031         trans = (struct btrfs_trans_handle *)current->journal_info;
4032         block_rsv = &root->fs_info->delalloc_block_rsv;
4033         space_info = block_rsv->space_info;
4034
4035         delalloc_bytes = percpu_counter_sum_positive(
4036                                                 &root->fs_info->delalloc_bytes);
4037         if (delalloc_bytes == 0) {
4038                 if (trans)
4039                         return;
4040                 if (wait_ordered)
4041                         btrfs_wait_ordered_roots(root->fs_info, items);
4042                 return;
4043         }
4044
4045         loops = 0;
4046         while (delalloc_bytes && loops < 3) {
4047                 max_reclaim = min(delalloc_bytes, to_reclaim);
4048                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4049                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4050                 /*
4051                  * We need to wait for the async pages to actually start before
4052                  * we do anything.
4053                  */
4054                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4055                 if (!max_reclaim)
4056                         goto skip_async;
4057
4058                 if (max_reclaim <= nr_pages)
4059                         max_reclaim = 0;
4060                 else
4061                         max_reclaim -= nr_pages;
4062
4063                 wait_event(root->fs_info->async_submit_wait,
4064                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4065                            (int)max_reclaim);
4066 skip_async:
4067                 if (!trans)
4068                         flush = BTRFS_RESERVE_FLUSH_ALL;
4069                 else
4070                         flush = BTRFS_RESERVE_NO_FLUSH;
4071                 spin_lock(&space_info->lock);
4072                 if (can_overcommit(root, space_info, orig, flush)) {
4073                         spin_unlock(&space_info->lock);
4074                         break;
4075                 }
4076                 spin_unlock(&space_info->lock);
4077
4078                 loops++;
4079                 if (wait_ordered && !trans) {
4080                         btrfs_wait_ordered_roots(root->fs_info, items);
4081                 } else {
4082                         time_left = schedule_timeout_killable(1);
4083                         if (time_left)
4084                                 break;
4085                 }
4086                 delalloc_bytes = percpu_counter_sum_positive(
4087                                                 &root->fs_info->delalloc_bytes);
4088         }
4089 }
4090
4091 /**
4092  * maybe_commit_transaction - possibly commit the transaction if its ok to
4093  * @root - the root we're allocating for
4094  * @bytes - the number of bytes we want to reserve
4095  * @force - force the commit
4096  *
4097  * This will check to make sure that committing the transaction will actually
4098  * get us somewhere and then commit the transaction if it does.  Otherwise it
4099  * will return -ENOSPC.
4100  */
4101 static int may_commit_transaction(struct btrfs_root *root,
4102                                   struct btrfs_space_info *space_info,
4103                                   u64 bytes, int force)
4104 {
4105         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4106         struct btrfs_trans_handle *trans;
4107
4108         trans = (struct btrfs_trans_handle *)current->journal_info;
4109         if (trans)
4110                 return -EAGAIN;
4111
4112         if (force)
4113                 goto commit;
4114
4115         /* See if there is enough pinned space to make this reservation */
4116         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4117                                    bytes) >= 0)
4118                 goto commit;
4119
4120         /*
4121          * See if there is some space in the delayed insertion reservation for
4122          * this reservation.
4123          */
4124         if (space_info != delayed_rsv->space_info)
4125                 return -ENOSPC;
4126
4127         spin_lock(&delayed_rsv->lock);
4128         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4129                                    bytes - delayed_rsv->size) >= 0) {
4130                 spin_unlock(&delayed_rsv->lock);
4131                 return -ENOSPC;
4132         }
4133         spin_unlock(&delayed_rsv->lock);
4134
4135 commit:
4136         trans = btrfs_join_transaction(root);
4137         if (IS_ERR(trans))
4138                 return -ENOSPC;
4139
4140         return btrfs_commit_transaction(trans, root);
4141 }
4142
4143 enum flush_state {
4144         FLUSH_DELAYED_ITEMS_NR  =       1,
4145         FLUSH_DELAYED_ITEMS     =       2,
4146         FLUSH_DELALLOC          =       3,
4147         FLUSH_DELALLOC_WAIT     =       4,
4148         ALLOC_CHUNK             =       5,
4149         COMMIT_TRANS            =       6,
4150 };
4151
4152 static int flush_space(struct btrfs_root *root,
4153                        struct btrfs_space_info *space_info, u64 num_bytes,
4154                        u64 orig_bytes, int state)
4155 {
4156         struct btrfs_trans_handle *trans;
4157         int nr;
4158         int ret = 0;
4159
4160         switch (state) {
4161         case FLUSH_DELAYED_ITEMS_NR:
4162         case FLUSH_DELAYED_ITEMS:
4163                 if (state == FLUSH_DELAYED_ITEMS_NR)
4164                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4165                 else
4166                         nr = -1;
4167
4168                 trans = btrfs_join_transaction(root);
4169                 if (IS_ERR(trans)) {
4170                         ret = PTR_ERR(trans);
4171                         break;
4172                 }
4173                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4174                 btrfs_end_transaction(trans, root);
4175                 break;
4176         case FLUSH_DELALLOC:
4177         case FLUSH_DELALLOC_WAIT:
4178                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4179                                 state == FLUSH_DELALLOC_WAIT);
4180                 break;
4181         case ALLOC_CHUNK:
4182                 trans = btrfs_join_transaction(root);
4183                 if (IS_ERR(trans)) {
4184                         ret = PTR_ERR(trans);
4185                         break;
4186                 }
4187                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4188                                      btrfs_get_alloc_profile(root, 0),
4189                                      CHUNK_ALLOC_NO_FORCE);
4190                 btrfs_end_transaction(trans, root);
4191                 if (ret == -ENOSPC)
4192                         ret = 0;
4193                 break;
4194         case COMMIT_TRANS:
4195                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4196                 break;
4197         default:
4198                 ret = -ENOSPC;
4199                 break;
4200         }
4201
4202         return ret;
4203 }
4204 /**
4205  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4206  * @root - the root we're allocating for
4207  * @block_rsv - the block_rsv we're allocating for
4208  * @orig_bytes - the number of bytes we want
4209  * @flush - whether or not we can flush to make our reservation
4210  *
4211  * This will reserve orgi_bytes number of bytes from the space info associated
4212  * with the block_rsv.  If there is not enough space it will make an attempt to
4213  * flush out space to make room.  It will do this by flushing delalloc if
4214  * possible or committing the transaction.  If flush is 0 then no attempts to
4215  * regain reservations will be made and this will fail if there is not enough
4216  * space already.
4217  */
4218 static int reserve_metadata_bytes(struct btrfs_root *root,
4219                                   struct btrfs_block_rsv *block_rsv,
4220                                   u64 orig_bytes,
4221                                   enum btrfs_reserve_flush_enum flush)
4222 {
4223         struct btrfs_space_info *space_info = block_rsv->space_info;
4224         u64 used;
4225         u64 num_bytes = orig_bytes;
4226         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4227         int ret = 0;
4228         bool flushing = false;
4229
4230 again:
4231         ret = 0;
4232         spin_lock(&space_info->lock);
4233         /*
4234          * We only want to wait if somebody other than us is flushing and we
4235          * are actually allowed to flush all things.
4236          */
4237         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4238                space_info->flush) {
4239                 spin_unlock(&space_info->lock);
4240                 /*
4241                  * If we have a trans handle we can't wait because the flusher
4242                  * may have to commit the transaction, which would mean we would
4243                  * deadlock since we are waiting for the flusher to finish, but
4244                  * hold the current transaction open.
4245                  */
4246                 if (current->journal_info)
4247                         return -EAGAIN;
4248                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4249                 /* Must have been killed, return */
4250                 if (ret)
4251                         return -EINTR;
4252
4253                 spin_lock(&space_info->lock);
4254         }
4255
4256         ret = -ENOSPC;
4257         used = space_info->bytes_used + space_info->bytes_reserved +
4258                 space_info->bytes_pinned + space_info->bytes_readonly +
4259                 space_info->bytes_may_use;
4260
4261         /*
4262          * The idea here is that we've not already over-reserved the block group
4263          * then we can go ahead and save our reservation first and then start
4264          * flushing if we need to.  Otherwise if we've already overcommitted
4265          * lets start flushing stuff first and then come back and try to make
4266          * our reservation.
4267          */
4268         if (used <= space_info->total_bytes) {
4269                 if (used + orig_bytes <= space_info->total_bytes) {
4270                         space_info->bytes_may_use += orig_bytes;
4271                         trace_btrfs_space_reservation(root->fs_info,
4272                                 "space_info", space_info->flags, orig_bytes, 1);
4273                         ret = 0;
4274                 } else {
4275                         /*
4276                          * Ok set num_bytes to orig_bytes since we aren't
4277                          * overocmmitted, this way we only try and reclaim what
4278                          * we need.
4279                          */
4280                         num_bytes = orig_bytes;
4281                 }
4282         } else {
4283                 /*
4284                  * Ok we're over committed, set num_bytes to the overcommitted
4285                  * amount plus the amount of bytes that we need for this
4286                  * reservation.
4287                  */
4288                 num_bytes = used - space_info->total_bytes +
4289                         (orig_bytes * 2);
4290         }
4291
4292         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4293                 space_info->bytes_may_use += orig_bytes;
4294                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4295                                               space_info->flags, orig_bytes,
4296                                               1);
4297                 ret = 0;
4298         }
4299
4300         /*
4301          * Couldn't make our reservation, save our place so while we're trying
4302          * to reclaim space we can actually use it instead of somebody else
4303          * stealing it from us.
4304          *
4305          * We make the other tasks wait for the flush only when we can flush
4306          * all things.
4307          */
4308         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4309                 flushing = true;
4310                 space_info->flush = 1;
4311         }
4312
4313         spin_unlock(&space_info->lock);
4314
4315         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4316                 goto out;
4317
4318         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4319                           flush_state);
4320         flush_state++;
4321
4322         /*
4323          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4324          * would happen. So skip delalloc flush.
4325          */
4326         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4327             (flush_state == FLUSH_DELALLOC ||
4328              flush_state == FLUSH_DELALLOC_WAIT))
4329                 flush_state = ALLOC_CHUNK;
4330
4331         if (!ret)
4332                 goto again;
4333         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4334                  flush_state < COMMIT_TRANS)
4335                 goto again;
4336         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4337                  flush_state <= COMMIT_TRANS)
4338                 goto again;
4339
4340 out:
4341         if (ret == -ENOSPC &&
4342             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4343                 struct btrfs_block_rsv *global_rsv =
4344                         &root->fs_info->global_block_rsv;
4345
4346                 if (block_rsv != global_rsv &&
4347                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4348                         ret = 0;
4349         }
4350         if (ret == -ENOSPC)
4351                 trace_btrfs_space_reservation(root->fs_info,
4352                                               "space_info:enospc",
4353                                               space_info->flags, orig_bytes, 1);
4354         if (flushing) {
4355                 spin_lock(&space_info->lock);
4356                 space_info->flush = 0;
4357                 wake_up_all(&space_info->wait);
4358                 spin_unlock(&space_info->lock);
4359         }
4360         return ret;
4361 }
4362
4363 static struct btrfs_block_rsv *get_block_rsv(
4364                                         const struct btrfs_trans_handle *trans,
4365                                         const struct btrfs_root *root)
4366 {
4367         struct btrfs_block_rsv *block_rsv = NULL;
4368
4369         if (root->ref_cows)
4370                 block_rsv = trans->block_rsv;
4371
4372         if (root == root->fs_info->csum_root && trans->adding_csums)
4373                 block_rsv = trans->block_rsv;
4374
4375         if (root == root->fs_info->uuid_root)
4376                 block_rsv = trans->block_rsv;
4377
4378         if (!block_rsv)
4379                 block_rsv = root->block_rsv;
4380
4381         if (!block_rsv)
4382                 block_rsv = &root->fs_info->empty_block_rsv;
4383
4384         return block_rsv;
4385 }
4386
4387 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4388                                u64 num_bytes)
4389 {
4390         int ret = -ENOSPC;
4391         spin_lock(&block_rsv->lock);
4392         if (block_rsv->reserved >= num_bytes) {
4393                 block_rsv->reserved -= num_bytes;
4394                 if (block_rsv->reserved < block_rsv->size)
4395                         block_rsv->full = 0;
4396                 ret = 0;
4397         }
4398         spin_unlock(&block_rsv->lock);
4399         return ret;
4400 }
4401
4402 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4403                                 u64 num_bytes, int update_size)
4404 {
4405         spin_lock(&block_rsv->lock);
4406         block_rsv->reserved += num_bytes;
4407         if (update_size)
4408                 block_rsv->size += num_bytes;
4409         else if (block_rsv->reserved >= block_rsv->size)
4410                 block_rsv->full = 1;
4411         spin_unlock(&block_rsv->lock);
4412 }
4413
4414 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4415                              struct btrfs_block_rsv *dest, u64 num_bytes,
4416                              int min_factor)
4417 {
4418         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4419         u64 min_bytes;
4420
4421         if (global_rsv->space_info != dest->space_info)
4422                 return -ENOSPC;
4423
4424         spin_lock(&global_rsv->lock);
4425         min_bytes = div_factor(global_rsv->size, min_factor);
4426         if (global_rsv->reserved < min_bytes + num_bytes) {
4427                 spin_unlock(&global_rsv->lock);
4428                 return -ENOSPC;
4429         }
4430         global_rsv->reserved -= num_bytes;
4431         if (global_rsv->reserved < global_rsv->size)
4432                 global_rsv->full = 0;
4433         spin_unlock(&global_rsv->lock);
4434
4435         block_rsv_add_bytes(dest, num_bytes, 1);
4436         return 0;
4437 }
4438
4439 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4440                                     struct btrfs_block_rsv *block_rsv,
4441                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4442 {
4443         struct btrfs_space_info *space_info = block_rsv->space_info;
4444
4445         spin_lock(&block_rsv->lock);
4446         if (num_bytes == (u64)-1)
4447                 num_bytes = block_rsv->size;
4448         block_rsv->size -= num_bytes;
4449         if (block_rsv->reserved >= block_rsv->size) {
4450                 num_bytes = block_rsv->reserved - block_rsv->size;
4451                 block_rsv->reserved = block_rsv->size;
4452                 block_rsv->full = 1;
4453         } else {
4454                 num_bytes = 0;
4455         }
4456         spin_unlock(&block_rsv->lock);
4457
4458         if (num_bytes > 0) {
4459                 if (dest) {
4460                         spin_lock(&dest->lock);
4461                         if (!dest->full) {
4462                                 u64 bytes_to_add;
4463
4464                                 bytes_to_add = dest->size - dest->reserved;
4465                                 bytes_to_add = min(num_bytes, bytes_to_add);
4466                                 dest->reserved += bytes_to_add;
4467                                 if (dest->reserved >= dest->size)
4468                                         dest->full = 1;
4469                                 num_bytes -= bytes_to_add;
4470                         }
4471                         spin_unlock(&dest->lock);
4472                 }
4473                 if (num_bytes) {
4474                         spin_lock(&space_info->lock);
4475                         space_info->bytes_may_use -= num_bytes;
4476                         trace_btrfs_space_reservation(fs_info, "space_info",
4477                                         space_info->flags, num_bytes, 0);
4478                         spin_unlock(&space_info->lock);
4479                 }
4480         }
4481 }
4482
4483 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4484                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4485 {
4486         int ret;
4487
4488         ret = block_rsv_use_bytes(src, num_bytes);
4489         if (ret)
4490                 return ret;
4491
4492         block_rsv_add_bytes(dst, num_bytes, 1);
4493         return 0;
4494 }
4495
4496 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4497 {
4498         memset(rsv, 0, sizeof(*rsv));
4499         spin_lock_init(&rsv->lock);
4500         rsv->type = type;
4501 }
4502
4503 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4504                                               unsigned short type)
4505 {
4506         struct btrfs_block_rsv *block_rsv;
4507         struct btrfs_fs_info *fs_info = root->fs_info;
4508
4509         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4510         if (!block_rsv)
4511                 return NULL;
4512
4513         btrfs_init_block_rsv(block_rsv, type);
4514         block_rsv->space_info = __find_space_info(fs_info,
4515                                                   BTRFS_BLOCK_GROUP_METADATA);
4516         return block_rsv;
4517 }
4518
4519 void btrfs_free_block_rsv(struct btrfs_root *root,
4520                           struct btrfs_block_rsv *rsv)
4521 {
4522         if (!rsv)
4523                 return;
4524         btrfs_block_rsv_release(root, rsv, (u64)-1);
4525         kfree(rsv);
4526 }
4527
4528 int btrfs_block_rsv_add(struct btrfs_root *root,
4529                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4530                         enum btrfs_reserve_flush_enum flush)
4531 {
4532         int ret;
4533
4534         if (num_bytes == 0)
4535                 return 0;
4536
4537         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4538         if (!ret) {
4539                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4540                 return 0;
4541         }
4542
4543         return ret;
4544 }
4545
4546 int btrfs_block_rsv_check(struct btrfs_root *root,
4547                           struct btrfs_block_rsv *block_rsv, int min_factor)
4548 {
4549         u64 num_bytes = 0;
4550         int ret = -ENOSPC;
4551
4552         if (!block_rsv)
4553                 return 0;
4554
4555         spin_lock(&block_rsv->lock);
4556         num_bytes = div_factor(block_rsv->size, min_factor);
4557         if (block_rsv->reserved >= num_bytes)
4558                 ret = 0;
4559         spin_unlock(&block_rsv->lock);
4560
4561         return ret;
4562 }
4563
4564 int btrfs_block_rsv_refill(struct btrfs_root *root,
4565                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4566                            enum btrfs_reserve_flush_enum flush)
4567 {
4568         u64 num_bytes = 0;
4569         int ret = -ENOSPC;
4570
4571         if (!block_rsv)
4572                 return 0;
4573
4574         spin_lock(&block_rsv->lock);
4575         num_bytes = min_reserved;
4576         if (block_rsv->reserved >= num_bytes)
4577                 ret = 0;
4578         else
4579                 num_bytes -= block_rsv->reserved;
4580         spin_unlock(&block_rsv->lock);
4581
4582         if (!ret)
4583                 return 0;
4584
4585         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4586         if (!ret) {
4587                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4588                 return 0;
4589         }
4590
4591         return ret;
4592 }
4593
4594 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4595                             struct btrfs_block_rsv *dst_rsv,
4596                             u64 num_bytes)
4597 {
4598         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4599 }
4600
4601 void btrfs_block_rsv_release(struct btrfs_root *root,
4602                              struct btrfs_block_rsv *block_rsv,
4603                              u64 num_bytes)
4604 {
4605         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4606         if (global_rsv == block_rsv ||
4607             block_rsv->space_info != global_rsv->space_info)
4608                 global_rsv = NULL;
4609         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4610                                 num_bytes);
4611 }
4612
4613 /*
4614  * helper to calculate size of global block reservation.
4615  * the desired value is sum of space used by extent tree,
4616  * checksum tree and root tree
4617  */
4618 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4619 {
4620         struct btrfs_space_info *sinfo;
4621         u64 num_bytes;
4622         u64 meta_used;
4623         u64 data_used;
4624         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4625
4626         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4627         spin_lock(&sinfo->lock);
4628         data_used = sinfo->bytes_used;
4629         spin_unlock(&sinfo->lock);
4630
4631         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4632         spin_lock(&sinfo->lock);
4633         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4634                 data_used = 0;
4635         meta_used = sinfo->bytes_used;
4636         spin_unlock(&sinfo->lock);
4637
4638         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4639                     csum_size * 2;
4640         num_bytes += div64_u64(data_used + meta_used, 50);
4641
4642         if (num_bytes * 3 > meta_used)
4643                 num_bytes = div64_u64(meta_used, 3);
4644
4645         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4646 }
4647
4648 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4649 {
4650         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4651         struct btrfs_space_info *sinfo = block_rsv->space_info;
4652         u64 num_bytes;
4653
4654         num_bytes = calc_global_metadata_size(fs_info);
4655
4656         spin_lock(&sinfo->lock);
4657         spin_lock(&block_rsv->lock);
4658
4659         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4660
4661         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4662                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4663                     sinfo->bytes_may_use;
4664
4665         if (sinfo->total_bytes > num_bytes) {
4666                 num_bytes = sinfo->total_bytes - num_bytes;
4667                 block_rsv->reserved += num_bytes;
4668                 sinfo->bytes_may_use += num_bytes;
4669                 trace_btrfs_space_reservation(fs_info, "space_info",
4670                                       sinfo->flags, num_bytes, 1);
4671         }
4672
4673         if (block_rsv->reserved >= block_rsv->size) {
4674                 num_bytes = block_rsv->reserved - block_rsv->size;
4675                 sinfo->bytes_may_use -= num_bytes;
4676                 trace_btrfs_space_reservation(fs_info, "space_info",
4677                                       sinfo->flags, num_bytes, 0);
4678                 block_rsv->reserved = block_rsv->size;
4679                 block_rsv->full = 1;
4680         }
4681
4682         spin_unlock(&block_rsv->lock);
4683         spin_unlock(&sinfo->lock);
4684 }
4685
4686 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4687 {
4688         struct btrfs_space_info *space_info;
4689
4690         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4691         fs_info->chunk_block_rsv.space_info = space_info;
4692
4693         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4694         fs_info->global_block_rsv.space_info = space_info;
4695         fs_info->delalloc_block_rsv.space_info = space_info;
4696         fs_info->trans_block_rsv.space_info = space_info;
4697         fs_info->empty_block_rsv.space_info = space_info;
4698         fs_info->delayed_block_rsv.space_info = space_info;
4699
4700         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4701         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4702         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4703         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4704         if (fs_info->quota_root)
4705                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4706         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4707
4708         update_global_block_rsv(fs_info);
4709 }
4710
4711 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4712 {
4713         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4714                                 (u64)-1);
4715         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4716         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4717         WARN_ON(fs_info->trans_block_rsv.size > 0);
4718         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4719         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4720         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4721         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4722         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4723 }
4724
4725 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4726                                   struct btrfs_root *root)
4727 {
4728         if (!trans->block_rsv)
4729                 return;
4730
4731         if (!trans->bytes_reserved)
4732                 return;
4733
4734         trace_btrfs_space_reservation(root->fs_info, "transaction",
4735                                       trans->transid, trans->bytes_reserved, 0);
4736         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4737         trans->bytes_reserved = 0;
4738 }
4739
4740 /* Can only return 0 or -ENOSPC */
4741 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4742                                   struct inode *inode)
4743 {
4744         struct btrfs_root *root = BTRFS_I(inode)->root;
4745         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4746         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4747
4748         /*
4749          * We need to hold space in order to delete our orphan item once we've
4750          * added it, so this takes the reservation so we can release it later
4751          * when we are truly done with the orphan item.
4752          */
4753         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4754         trace_btrfs_space_reservation(root->fs_info, "orphan",
4755                                       btrfs_ino(inode), num_bytes, 1);
4756         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4757 }
4758
4759 void btrfs_orphan_release_metadata(struct inode *inode)
4760 {
4761         struct btrfs_root *root = BTRFS_I(inode)->root;
4762         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4763         trace_btrfs_space_reservation(root->fs_info, "orphan",
4764                                       btrfs_ino(inode), num_bytes, 0);
4765         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4766 }
4767
4768 /*
4769  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4770  * root: the root of the parent directory
4771  * rsv: block reservation
4772  * items: the number of items that we need do reservation
4773  * qgroup_reserved: used to return the reserved size in qgroup
4774  *
4775  * This function is used to reserve the space for snapshot/subvolume
4776  * creation and deletion. Those operations are different with the
4777  * common file/directory operations, they change two fs/file trees
4778  * and root tree, the number of items that the qgroup reserves is
4779  * different with the free space reservation. So we can not use
4780  * the space reseravtion mechanism in start_transaction().
4781  */
4782 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4783                                      struct btrfs_block_rsv *rsv,
4784                                      int items,
4785                                      u64 *qgroup_reserved,
4786                                      bool use_global_rsv)
4787 {
4788         u64 num_bytes;
4789         int ret;
4790         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4791
4792         if (root->fs_info->quota_enabled) {
4793                 /* One for parent inode, two for dir entries */
4794                 num_bytes = 3 * root->leafsize;
4795                 ret = btrfs_qgroup_reserve(root, num_bytes);
4796                 if (ret)
4797                         return ret;
4798         } else {
4799                 num_bytes = 0;
4800         }
4801
4802         *qgroup_reserved = num_bytes;
4803
4804         num_bytes = btrfs_calc_trans_metadata_size(root, items);
4805         rsv->space_info = __find_space_info(root->fs_info,
4806                                             BTRFS_BLOCK_GROUP_METADATA);
4807         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4808                                   BTRFS_RESERVE_FLUSH_ALL);
4809
4810         if (ret == -ENOSPC && use_global_rsv)
4811                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
4812
4813         if (ret) {
4814                 if (*qgroup_reserved)
4815                         btrfs_qgroup_free(root, *qgroup_reserved);
4816         }
4817
4818         return ret;
4819 }
4820
4821 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4822                                       struct btrfs_block_rsv *rsv,
4823                                       u64 qgroup_reserved)
4824 {
4825         btrfs_block_rsv_release(root, rsv, (u64)-1);
4826         if (qgroup_reserved)
4827                 btrfs_qgroup_free(root, qgroup_reserved);
4828 }
4829
4830 /**
4831  * drop_outstanding_extent - drop an outstanding extent
4832  * @inode: the inode we're dropping the extent for
4833  *
4834  * This is called when we are freeing up an outstanding extent, either called
4835  * after an error or after an extent is written.  This will return the number of
4836  * reserved extents that need to be freed.  This must be called with
4837  * BTRFS_I(inode)->lock held.
4838  */
4839 static unsigned drop_outstanding_extent(struct inode *inode)
4840 {
4841         unsigned drop_inode_space = 0;
4842         unsigned dropped_extents = 0;
4843
4844         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4845         BTRFS_I(inode)->outstanding_extents--;
4846
4847         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4848             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4849                                &BTRFS_I(inode)->runtime_flags))
4850                 drop_inode_space = 1;
4851
4852         /*
4853          * If we have more or the same amount of outsanding extents than we have
4854          * reserved then we need to leave the reserved extents count alone.
4855          */
4856         if (BTRFS_I(inode)->outstanding_extents >=
4857             BTRFS_I(inode)->reserved_extents)
4858                 return drop_inode_space;
4859
4860         dropped_extents = BTRFS_I(inode)->reserved_extents -
4861                 BTRFS_I(inode)->outstanding_extents;
4862         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4863         return dropped_extents + drop_inode_space;
4864 }
4865
4866 /**
4867  * calc_csum_metadata_size - return the amount of metada space that must be
4868  *      reserved/free'd for the given bytes.
4869  * @inode: the inode we're manipulating
4870  * @num_bytes: the number of bytes in question
4871  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4872  *
4873  * This adjusts the number of csum_bytes in the inode and then returns the
4874  * correct amount of metadata that must either be reserved or freed.  We
4875  * calculate how many checksums we can fit into one leaf and then divide the
4876  * number of bytes that will need to be checksumed by this value to figure out
4877  * how many checksums will be required.  If we are adding bytes then the number
4878  * may go up and we will return the number of additional bytes that must be
4879  * reserved.  If it is going down we will return the number of bytes that must
4880  * be freed.
4881  *
4882  * This must be called with BTRFS_I(inode)->lock held.
4883  */
4884 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4885                                    int reserve)
4886 {
4887         struct btrfs_root *root = BTRFS_I(inode)->root;
4888         u64 csum_size;
4889         int num_csums_per_leaf;
4890         int num_csums;
4891         int old_csums;
4892
4893         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4894             BTRFS_I(inode)->csum_bytes == 0)
4895                 return 0;
4896
4897         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4898         if (reserve)
4899                 BTRFS_I(inode)->csum_bytes += num_bytes;
4900         else
4901                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4902         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4903         num_csums_per_leaf = (int)div64_u64(csum_size,
4904                                             sizeof(struct btrfs_csum_item) +
4905                                             sizeof(struct btrfs_disk_key));
4906         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4907         num_csums = num_csums + num_csums_per_leaf - 1;
4908         num_csums = num_csums / num_csums_per_leaf;
4909
4910         old_csums = old_csums + num_csums_per_leaf - 1;
4911         old_csums = old_csums / num_csums_per_leaf;
4912
4913         /* No change, no need to reserve more */
4914         if (old_csums == num_csums)
4915                 return 0;
4916
4917         if (reserve)
4918                 return btrfs_calc_trans_metadata_size(root,
4919                                                       num_csums - old_csums);
4920
4921         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4922 }
4923
4924 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4925 {
4926         struct btrfs_root *root = BTRFS_I(inode)->root;
4927         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4928         u64 to_reserve = 0;
4929         u64 csum_bytes;
4930         unsigned nr_extents = 0;
4931         int extra_reserve = 0;
4932         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4933         int ret = 0;
4934         bool delalloc_lock = true;
4935         u64 to_free = 0;
4936         unsigned dropped;
4937
4938         /* If we are a free space inode we need to not flush since we will be in
4939          * the middle of a transaction commit.  We also don't need the delalloc
4940          * mutex since we won't race with anybody.  We need this mostly to make
4941          * lockdep shut its filthy mouth.
4942          */
4943         if (btrfs_is_free_space_inode(inode)) {
4944                 flush = BTRFS_RESERVE_NO_FLUSH;
4945                 delalloc_lock = false;
4946         }
4947
4948         if (flush != BTRFS_RESERVE_NO_FLUSH &&
4949             btrfs_transaction_in_commit(root->fs_info))
4950                 schedule_timeout(1);
4951
4952         if (delalloc_lock)
4953                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4954
4955         num_bytes = ALIGN(num_bytes, root->sectorsize);
4956
4957         spin_lock(&BTRFS_I(inode)->lock);
4958         BTRFS_I(inode)->outstanding_extents++;
4959
4960         if (BTRFS_I(inode)->outstanding_extents >
4961             BTRFS_I(inode)->reserved_extents)
4962                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4963                         BTRFS_I(inode)->reserved_extents;
4964
4965         /*
4966          * Add an item to reserve for updating the inode when we complete the
4967          * delalloc io.
4968          */
4969         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4970                       &BTRFS_I(inode)->runtime_flags)) {
4971                 nr_extents++;
4972                 extra_reserve = 1;
4973         }
4974
4975         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4976         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4977         csum_bytes = BTRFS_I(inode)->csum_bytes;
4978         spin_unlock(&BTRFS_I(inode)->lock);
4979
4980         if (root->fs_info->quota_enabled) {
4981                 ret = btrfs_qgroup_reserve(root, num_bytes +
4982                                            nr_extents * root->leafsize);
4983                 if (ret)
4984                         goto out_fail;
4985         }
4986
4987         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4988         if (unlikely(ret)) {
4989                 if (root->fs_info->quota_enabled)
4990                         btrfs_qgroup_free(root, num_bytes +
4991                                                 nr_extents * root->leafsize);
4992                 goto out_fail;
4993         }
4994
4995         spin_lock(&BTRFS_I(inode)->lock);
4996         if (extra_reserve) {
4997                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4998                         &BTRFS_I(inode)->runtime_flags);
4999                 nr_extents--;
5000         }
5001         BTRFS_I(inode)->reserved_extents += nr_extents;
5002         spin_unlock(&BTRFS_I(inode)->lock);
5003
5004         if (delalloc_lock)
5005                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5006
5007         if (to_reserve)
5008                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5009                                               btrfs_ino(inode), to_reserve, 1);
5010         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5011
5012         return 0;
5013
5014 out_fail:
5015         spin_lock(&BTRFS_I(inode)->lock);
5016         dropped = drop_outstanding_extent(inode);
5017         /*
5018          * If the inodes csum_bytes is the same as the original
5019          * csum_bytes then we know we haven't raced with any free()ers
5020          * so we can just reduce our inodes csum bytes and carry on.
5021          */
5022         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5023                 calc_csum_metadata_size(inode, num_bytes, 0);
5024         } else {
5025                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5026                 u64 bytes;
5027
5028                 /*
5029                  * This is tricky, but first we need to figure out how much we
5030                  * free'd from any free-ers that occured during this
5031                  * reservation, so we reset ->csum_bytes to the csum_bytes
5032                  * before we dropped our lock, and then call the free for the
5033                  * number of bytes that were freed while we were trying our
5034                  * reservation.
5035                  */
5036                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5037                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5038                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5039
5040
5041                 /*
5042                  * Now we need to see how much we would have freed had we not
5043                  * been making this reservation and our ->csum_bytes were not
5044                  * artificially inflated.
5045                  */
5046                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5047                 bytes = csum_bytes - orig_csum_bytes;
5048                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5049
5050                 /*
5051                  * Now reset ->csum_bytes to what it should be.  If bytes is
5052                  * more than to_free then we would have free'd more space had we
5053                  * not had an artificially high ->csum_bytes, so we need to free
5054                  * the remainder.  If bytes is the same or less then we don't
5055                  * need to do anything, the other free-ers did the correct
5056                  * thing.
5057                  */
5058                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5059                 if (bytes > to_free)
5060                         to_free = bytes - to_free;
5061                 else
5062                         to_free = 0;
5063         }
5064         spin_unlock(&BTRFS_I(inode)->lock);
5065         if (dropped)
5066                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5067
5068         if (to_free) {
5069                 btrfs_block_rsv_release(root, block_rsv, to_free);
5070                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5071                                               btrfs_ino(inode), to_free, 0);
5072         }
5073         if (delalloc_lock)
5074                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5075         return ret;
5076 }
5077
5078 /**
5079  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5080  * @inode: the inode to release the reservation for
5081  * @num_bytes: the number of bytes we're releasing
5082  *
5083  * This will release the metadata reservation for an inode.  This can be called
5084  * once we complete IO for a given set of bytes to release their metadata
5085  * reservations.
5086  */
5087 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5088 {
5089         struct btrfs_root *root = BTRFS_I(inode)->root;
5090         u64 to_free = 0;
5091         unsigned dropped;
5092
5093         num_bytes = ALIGN(num_bytes, root->sectorsize);
5094         spin_lock(&BTRFS_I(inode)->lock);
5095         dropped = drop_outstanding_extent(inode);
5096
5097         if (num_bytes)
5098                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5099         spin_unlock(&BTRFS_I(inode)->lock);
5100         if (dropped > 0)
5101                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5102
5103         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5104                                       btrfs_ino(inode), to_free, 0);
5105         if (root->fs_info->quota_enabled) {
5106                 btrfs_qgroup_free(root, num_bytes +
5107                                         dropped * root->leafsize);
5108         }
5109
5110         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5111                                 to_free);
5112 }
5113
5114 /**
5115  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5116  * @inode: inode we're writing to
5117  * @num_bytes: the number of bytes we want to allocate
5118  *
5119  * This will do the following things
5120  *
5121  * o reserve space in the data space info for num_bytes
5122  * o reserve space in the metadata space info based on number of outstanding
5123  *   extents and how much csums will be needed
5124  * o add to the inodes ->delalloc_bytes
5125  * o add it to the fs_info's delalloc inodes list.
5126  *
5127  * This will return 0 for success and -ENOSPC if there is no space left.
5128  */
5129 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5130 {
5131         int ret;
5132
5133         ret = btrfs_check_data_free_space(inode, num_bytes);
5134         if (ret)
5135                 return ret;
5136
5137         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5138         if (ret) {
5139                 btrfs_free_reserved_data_space(inode, num_bytes);
5140                 return ret;
5141         }
5142
5143         return 0;
5144 }
5145
5146 /**
5147  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5148  * @inode: inode we're releasing space for
5149  * @num_bytes: the number of bytes we want to free up
5150  *
5151  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5152  * called in the case that we don't need the metadata AND data reservations
5153  * anymore.  So if there is an error or we insert an inline extent.
5154  *
5155  * This function will release the metadata space that was not used and will
5156  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5157  * list if there are no delalloc bytes left.
5158  */
5159 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5160 {
5161         btrfs_delalloc_release_metadata(inode, num_bytes);
5162         btrfs_free_reserved_data_space(inode, num_bytes);
5163 }
5164
5165 static int update_block_group(struct btrfs_root *root,
5166                               u64 bytenr, u64 num_bytes, int alloc)
5167 {
5168         struct btrfs_block_group_cache *cache = NULL;
5169         struct btrfs_fs_info *info = root->fs_info;
5170         u64 total = num_bytes;
5171         u64 old_val;
5172         u64 byte_in_group;
5173         int factor;
5174
5175         /* block accounting for super block */
5176         spin_lock(&info->delalloc_root_lock);
5177         old_val = btrfs_super_bytes_used(info->super_copy);
5178         if (alloc)
5179                 old_val += num_bytes;
5180         else
5181                 old_val -= num_bytes;
5182         btrfs_set_super_bytes_used(info->super_copy, old_val);
5183         spin_unlock(&info->delalloc_root_lock);
5184
5185         while (total) {
5186                 cache = btrfs_lookup_block_group(info, bytenr);
5187                 if (!cache)
5188                         return -ENOENT;
5189                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5190                                     BTRFS_BLOCK_GROUP_RAID1 |
5191                                     BTRFS_BLOCK_GROUP_RAID10))
5192                         factor = 2;
5193                 else
5194                         factor = 1;
5195                 /*
5196                  * If this block group has free space cache written out, we
5197                  * need to make sure to load it if we are removing space.  This
5198                  * is because we need the unpinning stage to actually add the
5199                  * space back to the block group, otherwise we will leak space.
5200                  */
5201                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5202                         cache_block_group(cache, 1);
5203
5204                 byte_in_group = bytenr - cache->key.objectid;
5205                 WARN_ON(byte_in_group > cache->key.offset);
5206
5207                 spin_lock(&cache->space_info->lock);
5208                 spin_lock(&cache->lock);
5209
5210                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5211                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5212                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5213
5214                 cache->dirty = 1;
5215                 old_val = btrfs_block_group_used(&cache->item);
5216                 num_bytes = min(total, cache->key.offset - byte_in_group);
5217                 if (alloc) {
5218                         old_val += num_bytes;
5219                         btrfs_set_block_group_used(&cache->item, old_val);
5220                         cache->reserved -= num_bytes;
5221                         cache->space_info->bytes_reserved -= num_bytes;
5222                         cache->space_info->bytes_used += num_bytes;
5223                         cache->space_info->disk_used += num_bytes * factor;
5224                         spin_unlock(&cache->lock);
5225                         spin_unlock(&cache->space_info->lock);
5226                 } else {
5227                         old_val -= num_bytes;
5228                         btrfs_set_block_group_used(&cache->item, old_val);
5229                         cache->pinned += num_bytes;
5230                         cache->space_info->bytes_pinned += num_bytes;
5231                         cache->space_info->bytes_used -= num_bytes;
5232                         cache->space_info->disk_used -= num_bytes * factor;
5233                         spin_unlock(&cache->lock);
5234                         spin_unlock(&cache->space_info->lock);
5235
5236                         set_extent_dirty(info->pinned_extents,
5237                                          bytenr, bytenr + num_bytes - 1,
5238                                          GFP_NOFS | __GFP_NOFAIL);
5239                 }
5240                 btrfs_put_block_group(cache);
5241                 total -= num_bytes;
5242                 bytenr += num_bytes;
5243         }
5244         return 0;
5245 }
5246
5247 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5248 {
5249         struct btrfs_block_group_cache *cache;
5250         u64 bytenr;
5251
5252         spin_lock(&root->fs_info->block_group_cache_lock);
5253         bytenr = root->fs_info->first_logical_byte;
5254         spin_unlock(&root->fs_info->block_group_cache_lock);
5255
5256         if (bytenr < (u64)-1)
5257                 return bytenr;
5258
5259         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5260         if (!cache)
5261                 return 0;
5262
5263         bytenr = cache->key.objectid;
5264         btrfs_put_block_group(cache);
5265
5266         return bytenr;
5267 }
5268
5269 static int pin_down_extent(struct btrfs_root *root,
5270                            struct btrfs_block_group_cache *cache,
5271                            u64 bytenr, u64 num_bytes, int reserved)
5272 {
5273         spin_lock(&cache->space_info->lock);
5274         spin_lock(&cache->lock);
5275         cache->pinned += num_bytes;
5276         cache->space_info->bytes_pinned += num_bytes;
5277         if (reserved) {
5278                 cache->reserved -= num_bytes;
5279                 cache->space_info->bytes_reserved -= num_bytes;
5280         }
5281         spin_unlock(&cache->lock);
5282         spin_unlock(&cache->space_info->lock);
5283
5284         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5285                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5286         if (reserved)
5287                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5288         return 0;
5289 }
5290
5291 /*
5292  * this function must be called within transaction
5293  */
5294 int btrfs_pin_extent(struct btrfs_root *root,
5295                      u64 bytenr, u64 num_bytes, int reserved)
5296 {
5297         struct btrfs_block_group_cache *cache;
5298
5299         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5300         BUG_ON(!cache); /* Logic error */
5301
5302         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5303
5304         btrfs_put_block_group(cache);
5305         return 0;
5306 }
5307
5308 /*
5309  * this function must be called within transaction
5310  */
5311 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5312                                     u64 bytenr, u64 num_bytes)
5313 {
5314         struct btrfs_block_group_cache *cache;
5315         int ret;
5316
5317         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5318         if (!cache)
5319                 return -EINVAL;
5320
5321         /*
5322          * pull in the free space cache (if any) so that our pin
5323          * removes the free space from the cache.  We have load_only set
5324          * to one because the slow code to read in the free extents does check
5325          * the pinned extents.
5326          */
5327         cache_block_group(cache, 1);
5328
5329         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5330
5331         /* remove us from the free space cache (if we're there at all) */
5332         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5333         btrfs_put_block_group(cache);
5334         return ret;
5335 }
5336
5337 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5338 {
5339         int ret;
5340         struct btrfs_block_group_cache *block_group;
5341         struct btrfs_caching_control *caching_ctl;
5342
5343         block_group = btrfs_lookup_block_group(root->fs_info, start);
5344         if (!block_group)
5345                 return -EINVAL;
5346
5347         cache_block_group(block_group, 0);
5348         caching_ctl = get_caching_control(block_group);
5349
5350         if (!caching_ctl) {
5351                 /* Logic error */
5352                 BUG_ON(!block_group_cache_done(block_group));
5353                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5354         } else {
5355                 mutex_lock(&caching_ctl->mutex);
5356
5357                 if (start >= caching_ctl->progress) {
5358                         ret = add_excluded_extent(root, start, num_bytes);
5359                 } else if (start + num_bytes <= caching_ctl->progress) {
5360                         ret = btrfs_remove_free_space(block_group,
5361                                                       start, num_bytes);
5362                 } else {
5363                         num_bytes = caching_ctl->progress - start;
5364                         ret = btrfs_remove_free_space(block_group,
5365                                                       start, num_bytes);
5366                         if (ret)
5367                                 goto out_lock;
5368
5369                         num_bytes = (start + num_bytes) -
5370                                 caching_ctl->progress;
5371                         start = caching_ctl->progress;
5372                         ret = add_excluded_extent(root, start, num_bytes);
5373                 }
5374 out_lock:
5375                 mutex_unlock(&caching_ctl->mutex);
5376                 put_caching_control(caching_ctl);
5377         }
5378         btrfs_put_block_group(block_group);
5379         return ret;
5380 }
5381
5382 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5383                                  struct extent_buffer *eb)
5384 {
5385         struct btrfs_file_extent_item *item;
5386         struct btrfs_key key;
5387         int found_type;
5388         int i;
5389
5390         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5391                 return 0;
5392
5393         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5394                 btrfs_item_key_to_cpu(eb, &key, i);
5395                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5396                         continue;
5397                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5398                 found_type = btrfs_file_extent_type(eb, item);
5399                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5400                         continue;
5401                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5402                         continue;
5403                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5404                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5405                 __exclude_logged_extent(log, key.objectid, key.offset);
5406         }
5407
5408         return 0;
5409 }
5410
5411 /**
5412  * btrfs_update_reserved_bytes - update the block_group and space info counters
5413  * @cache:      The cache we are manipulating
5414  * @num_bytes:  The number of bytes in question
5415  * @reserve:    One of the reservation enums
5416  *
5417  * This is called by the allocator when it reserves space, or by somebody who is
5418  * freeing space that was never actually used on disk.  For example if you
5419  * reserve some space for a new leaf in transaction A and before transaction A
5420  * commits you free that leaf, you call this with reserve set to 0 in order to
5421  * clear the reservation.
5422  *
5423  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5424  * ENOSPC accounting.  For data we handle the reservation through clearing the
5425  * delalloc bits in the io_tree.  We have to do this since we could end up
5426  * allocating less disk space for the amount of data we have reserved in the
5427  * case of compression.
5428  *
5429  * If this is a reservation and the block group has become read only we cannot
5430  * make the reservation and return -EAGAIN, otherwise this function always
5431  * succeeds.
5432  */
5433 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5434                                        u64 num_bytes, int reserve)
5435 {
5436         struct btrfs_space_info *space_info = cache->space_info;
5437         int ret = 0;
5438
5439         spin_lock(&space_info->lock);
5440         spin_lock(&cache->lock);
5441         if (reserve != RESERVE_FREE) {
5442                 if (cache->ro) {
5443                         ret = -EAGAIN;
5444                 } else {
5445                         cache->reserved += num_bytes;
5446                         space_info->bytes_reserved += num_bytes;
5447                         if (reserve == RESERVE_ALLOC) {
5448                                 trace_btrfs_space_reservation(cache->fs_info,
5449                                                 "space_info", space_info->flags,
5450                                                 num_bytes, 0);
5451                                 space_info->bytes_may_use -= num_bytes;
5452                         }
5453                 }
5454         } else {
5455                 if (cache->ro)
5456                         space_info->bytes_readonly += num_bytes;
5457                 cache->reserved -= num_bytes;
5458                 space_info->bytes_reserved -= num_bytes;
5459         }
5460         spin_unlock(&cache->lock);
5461         spin_unlock(&space_info->lock);
5462         return ret;
5463 }
5464
5465 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5466                                 struct btrfs_root *root)
5467 {
5468         struct btrfs_fs_info *fs_info = root->fs_info;
5469         struct btrfs_caching_control *next;
5470         struct btrfs_caching_control *caching_ctl;
5471         struct btrfs_block_group_cache *cache;
5472         struct btrfs_space_info *space_info;
5473
5474         down_write(&fs_info->commit_root_sem);
5475
5476         list_for_each_entry_safe(caching_ctl, next,
5477                                  &fs_info->caching_block_groups, list) {
5478                 cache = caching_ctl->block_group;
5479                 if (block_group_cache_done(cache)) {
5480                         cache->last_byte_to_unpin = (u64)-1;
5481                         list_del_init(&caching_ctl->list);
5482                         put_caching_control(caching_ctl);
5483                 } else {
5484                         cache->last_byte_to_unpin = caching_ctl->progress;
5485                 }
5486         }
5487
5488         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5489                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5490         else
5491                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5492
5493         up_write(&fs_info->commit_root_sem);
5494
5495         list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
5496                 percpu_counter_set(&space_info->total_bytes_pinned, 0);
5497
5498         update_global_block_rsv(fs_info);
5499 }
5500
5501 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5502 {
5503         struct btrfs_fs_info *fs_info = root->fs_info;
5504         struct btrfs_block_group_cache *cache = NULL;
5505         struct btrfs_space_info *space_info;
5506         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5507         u64 len;
5508         bool readonly;
5509
5510         while (start <= end) {
5511                 readonly = false;
5512                 if (!cache ||
5513                     start >= cache->key.objectid + cache->key.offset) {
5514                         if (cache)
5515                                 btrfs_put_block_group(cache);
5516                         cache = btrfs_lookup_block_group(fs_info, start);
5517                         BUG_ON(!cache); /* Logic error */
5518                 }
5519
5520                 len = cache->key.objectid + cache->key.offset - start;
5521                 len = min(len, end + 1 - start);
5522
5523                 if (start < cache->last_byte_to_unpin) {
5524                         len = min(len, cache->last_byte_to_unpin - start);
5525                         btrfs_add_free_space(cache, start, len);
5526                 }
5527
5528                 start += len;
5529                 space_info = cache->space_info;
5530
5531                 spin_lock(&space_info->lock);
5532                 spin_lock(&cache->lock);
5533                 cache->pinned -= len;
5534                 space_info->bytes_pinned -= len;
5535                 if (cache->ro) {
5536                         space_info->bytes_readonly += len;
5537                         readonly = true;
5538                 }
5539                 spin_unlock(&cache->lock);
5540                 if (!readonly && global_rsv->space_info == space_info) {
5541                         spin_lock(&global_rsv->lock);
5542                         if (!global_rsv->full) {
5543                                 len = min(len, global_rsv->size -
5544                                           global_rsv->reserved);
5545                                 global_rsv->reserved += len;
5546                                 space_info->bytes_may_use += len;
5547                                 if (global_rsv->reserved >= global_rsv->size)
5548                                         global_rsv->full = 1;
5549                         }
5550                         spin_unlock(&global_rsv->lock);
5551                 }
5552                 spin_unlock(&space_info->lock);
5553         }
5554
5555         if (cache)
5556                 btrfs_put_block_group(cache);
5557         return 0;
5558 }
5559
5560 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5561                                struct btrfs_root *root)
5562 {
5563         struct btrfs_fs_info *fs_info = root->fs_info;
5564         struct extent_io_tree *unpin;
5565         u64 start;
5566         u64 end;
5567         int ret;
5568
5569         if (trans->aborted)
5570                 return 0;
5571
5572         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5573                 unpin = &fs_info->freed_extents[1];
5574         else
5575                 unpin = &fs_info->freed_extents[0];
5576
5577         while (1) {
5578                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5579                                             EXTENT_DIRTY, NULL);
5580                 if (ret)
5581                         break;
5582
5583                 if (btrfs_test_opt(root, DISCARD))
5584                         ret = btrfs_discard_extent(root, start,
5585                                                    end + 1 - start, NULL);
5586
5587                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5588                 unpin_extent_range(root, start, end);
5589                 cond_resched();
5590         }
5591
5592         return 0;
5593 }
5594
5595 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5596                              u64 owner, u64 root_objectid)
5597 {
5598         struct btrfs_space_info *space_info;
5599         u64 flags;
5600
5601         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5602                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5603                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
5604                 else
5605                         flags = BTRFS_BLOCK_GROUP_METADATA;
5606         } else {
5607                 flags = BTRFS_BLOCK_GROUP_DATA;
5608         }
5609
5610         space_info = __find_space_info(fs_info, flags);
5611         BUG_ON(!space_info); /* Logic bug */
5612         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5613 }
5614
5615
5616 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5617                                 struct btrfs_root *root,
5618                                 u64 bytenr, u64 num_bytes, u64 parent,
5619                                 u64 root_objectid, u64 owner_objectid,
5620                                 u64 owner_offset, int refs_to_drop,
5621                                 struct btrfs_delayed_extent_op *extent_op)
5622 {
5623         struct btrfs_key key;
5624         struct btrfs_path *path;
5625         struct btrfs_fs_info *info = root->fs_info;
5626         struct btrfs_root *extent_root = info->extent_root;
5627         struct extent_buffer *leaf;
5628         struct btrfs_extent_item *ei;
5629         struct btrfs_extent_inline_ref *iref;
5630         int ret;
5631         int is_data;
5632         int extent_slot = 0;
5633         int found_extent = 0;
5634         int num_to_del = 1;
5635         u32 item_size;
5636         u64 refs;
5637         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5638                                                  SKINNY_METADATA);
5639
5640         path = btrfs_alloc_path();
5641         if (!path)
5642                 return -ENOMEM;
5643
5644         path->reada = 1;
5645         path->leave_spinning = 1;
5646
5647         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5648         BUG_ON(!is_data && refs_to_drop != 1);
5649
5650         if (is_data)
5651                 skinny_metadata = 0;
5652
5653         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5654                                     bytenr, num_bytes, parent,
5655                                     root_objectid, owner_objectid,
5656                                     owner_offset);
5657         if (ret == 0) {
5658                 extent_slot = path->slots[0];
5659                 while (extent_slot >= 0) {
5660                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5661                                               extent_slot);
5662                         if (key.objectid != bytenr)
5663                                 break;
5664                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5665                             key.offset == num_bytes) {
5666                                 found_extent = 1;
5667                                 break;
5668                         }
5669                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5670                             key.offset == owner_objectid) {
5671                                 found_extent = 1;
5672                                 break;
5673                         }
5674                         if (path->slots[0] - extent_slot > 5)
5675                                 break;
5676                         extent_slot--;
5677                 }
5678 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5679                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5680                 if (found_extent && item_size < sizeof(*ei))
5681                         found_extent = 0;
5682 #endif
5683                 if (!found_extent) {
5684                         BUG_ON(iref);
5685                         ret = remove_extent_backref(trans, extent_root, path,
5686                                                     NULL, refs_to_drop,
5687                                                     is_data);
5688                         if (ret) {
5689                                 btrfs_abort_transaction(trans, extent_root, ret);
5690                                 goto out;
5691                         }
5692                         btrfs_release_path(path);
5693                         path->leave_spinning = 1;
5694
5695                         key.objectid = bytenr;
5696                         key.type = BTRFS_EXTENT_ITEM_KEY;
5697                         key.offset = num_bytes;
5698
5699                         if (!is_data && skinny_metadata) {
5700                                 key.type = BTRFS_METADATA_ITEM_KEY;
5701                                 key.offset = owner_objectid;
5702                         }
5703
5704                         ret = btrfs_search_slot(trans, extent_root,
5705                                                 &key, path, -1, 1);
5706                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5707                                 /*
5708                                  * Couldn't find our skinny metadata item,
5709                                  * see if we have ye olde extent item.
5710                                  */
5711                                 path->slots[0]--;
5712                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5713                                                       path->slots[0]);
5714                                 if (key.objectid == bytenr &&
5715                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5716                                     key.offset == num_bytes)
5717                                         ret = 0;
5718                         }
5719
5720                         if (ret > 0 && skinny_metadata) {
5721                                 skinny_metadata = false;
5722                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5723                                 key.offset = num_bytes;
5724                                 btrfs_release_path(path);
5725                                 ret = btrfs_search_slot(trans, extent_root,
5726                                                         &key, path, -1, 1);
5727                         }
5728
5729                         if (ret) {
5730                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5731                                         ret, bytenr);
5732                                 if (ret > 0)
5733                                         btrfs_print_leaf(extent_root,
5734                                                          path->nodes[0]);
5735                         }
5736                         if (ret < 0) {
5737                                 btrfs_abort_transaction(trans, extent_root, ret);
5738                                 goto out;
5739                         }
5740                         extent_slot = path->slots[0];
5741                 }
5742         } else if (WARN_ON(ret == -ENOENT)) {
5743                 btrfs_print_leaf(extent_root, path->nodes[0]);
5744                 btrfs_err(info,
5745                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5746                         bytenr, parent, root_objectid, owner_objectid,
5747                         owner_offset);
5748                 btrfs_abort_transaction(trans, extent_root, ret);
5749                 goto out;
5750         } else {
5751                 btrfs_abort_transaction(trans, extent_root, ret);
5752                 goto out;
5753         }
5754
5755         leaf = path->nodes[0];
5756         item_size = btrfs_item_size_nr(leaf, extent_slot);
5757 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5758         if (item_size < sizeof(*ei)) {
5759                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5760                 ret = convert_extent_item_v0(trans, extent_root, path,
5761                                              owner_objectid, 0);
5762                 if (ret < 0) {
5763                         btrfs_abort_transaction(trans, extent_root, ret);
5764                         goto out;
5765                 }
5766
5767                 btrfs_release_path(path);
5768                 path->leave_spinning = 1;
5769
5770                 key.objectid = bytenr;
5771                 key.type = BTRFS_EXTENT_ITEM_KEY;
5772                 key.offset = num_bytes;
5773
5774                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5775                                         -1, 1);
5776                 if (ret) {
5777                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5778                                 ret, bytenr);
5779                         btrfs_print_leaf(extent_root, path->nodes[0]);
5780                 }
5781                 if (ret < 0) {
5782                         btrfs_abort_transaction(trans, extent_root, ret);
5783                         goto out;
5784                 }
5785
5786                 extent_slot = path->slots[0];
5787                 leaf = path->nodes[0];
5788                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5789         }
5790 #endif
5791         BUG_ON(item_size < sizeof(*ei));
5792         ei = btrfs_item_ptr(leaf, extent_slot,
5793                             struct btrfs_extent_item);
5794         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5795             key.type == BTRFS_EXTENT_ITEM_KEY) {
5796                 struct btrfs_tree_block_info *bi;
5797                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5798                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5799                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5800         }
5801
5802         refs = btrfs_extent_refs(leaf, ei);
5803         if (refs < refs_to_drop) {
5804                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
5805                           "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
5806                 ret = -EINVAL;
5807                 btrfs_abort_transaction(trans, extent_root, ret);
5808                 goto out;
5809         }
5810         refs -= refs_to_drop;
5811
5812         if (refs > 0) {
5813                 if (extent_op)
5814                         __run_delayed_extent_op(extent_op, leaf, ei);
5815                 /*
5816                  * In the case of inline back ref, reference count will
5817                  * be updated by remove_extent_backref
5818                  */
5819                 if (iref) {
5820                         BUG_ON(!found_extent);
5821                 } else {
5822                         btrfs_set_extent_refs(leaf, ei, refs);
5823                         btrfs_mark_buffer_dirty(leaf);
5824                 }
5825                 if (found_extent) {
5826                         ret = remove_extent_backref(trans, extent_root, path,
5827                                                     iref, refs_to_drop,
5828                                                     is_data);
5829                         if (ret) {
5830                                 btrfs_abort_transaction(trans, extent_root, ret);
5831                                 goto out;
5832                         }
5833                 }
5834                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
5835                                  root_objectid);
5836         } else {
5837                 if (found_extent) {
5838                         BUG_ON(is_data && refs_to_drop !=
5839                                extent_data_ref_count(root, path, iref));
5840                         if (iref) {
5841                                 BUG_ON(path->slots[0] != extent_slot);
5842                         } else {
5843                                 BUG_ON(path->slots[0] != extent_slot + 1);
5844                                 path->slots[0] = extent_slot;
5845                                 num_to_del = 2;
5846                         }
5847                 }
5848
5849                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5850                                       num_to_del);
5851                 if (ret) {
5852                         btrfs_abort_transaction(trans, extent_root, ret);
5853                         goto out;
5854                 }
5855                 btrfs_release_path(path);
5856
5857                 if (is_data) {
5858                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5859                         if (ret) {
5860                                 btrfs_abort_transaction(trans, extent_root, ret);
5861                                 goto out;
5862                         }
5863                 }
5864
5865                 ret = update_block_group(root, bytenr, num_bytes, 0);
5866                 if (ret) {
5867                         btrfs_abort_transaction(trans, extent_root, ret);
5868                         goto out;
5869                 }
5870         }
5871 out:
5872         btrfs_free_path(path);
5873         return ret;
5874 }
5875
5876 /*
5877  * when we free an block, it is possible (and likely) that we free the last
5878  * delayed ref for that extent as well.  This searches the delayed ref tree for
5879  * a given extent, and if there are no other delayed refs to be processed, it
5880  * removes it from the tree.
5881  */
5882 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5883                                       struct btrfs_root *root, u64 bytenr)
5884 {
5885         struct btrfs_delayed_ref_head *head;
5886         struct btrfs_delayed_ref_root *delayed_refs;
5887         int ret = 0;
5888
5889         delayed_refs = &trans->transaction->delayed_refs;
5890         spin_lock(&delayed_refs->lock);
5891         head = btrfs_find_delayed_ref_head(trans, bytenr);
5892         if (!head)
5893                 goto out_delayed_unlock;
5894
5895         spin_lock(&head->lock);
5896         if (rb_first(&head->ref_root))
5897                 goto out;
5898
5899         if (head->extent_op) {
5900                 if (!head->must_insert_reserved)
5901                         goto out;
5902                 btrfs_free_delayed_extent_op(head->extent_op);
5903                 head->extent_op = NULL;
5904         }
5905
5906         /*
5907          * waiting for the lock here would deadlock.  If someone else has it
5908          * locked they are already in the process of dropping it anyway
5909          */
5910         if (!mutex_trylock(&head->mutex))
5911                 goto out;
5912
5913         /*
5914          * at this point we have a head with no other entries.  Go
5915          * ahead and process it.
5916          */
5917         head->node.in_tree = 0;
5918         rb_erase(&head->href_node, &delayed_refs->href_root);
5919
5920         atomic_dec(&delayed_refs->num_entries);
5921
5922         /*
5923          * we don't take a ref on the node because we're removing it from the
5924          * tree, so we just steal the ref the tree was holding.
5925          */
5926         delayed_refs->num_heads--;
5927         if (head->processing == 0)
5928                 delayed_refs->num_heads_ready--;
5929         head->processing = 0;
5930         spin_unlock(&head->lock);
5931         spin_unlock(&delayed_refs->lock);
5932
5933         BUG_ON(head->extent_op);
5934         if (head->must_insert_reserved)
5935                 ret = 1;
5936
5937         mutex_unlock(&head->mutex);
5938         btrfs_put_delayed_ref(&head->node);
5939         return ret;
5940 out:
5941         spin_unlock(&head->lock);
5942
5943 out_delayed_unlock:
5944         spin_unlock(&delayed_refs->lock);
5945         return 0;
5946 }
5947
5948 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5949                            struct btrfs_root *root,
5950                            struct extent_buffer *buf,
5951                            u64 parent, int last_ref)
5952 {
5953         struct btrfs_block_group_cache *cache = NULL;
5954         int pin = 1;
5955         int ret;
5956
5957         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5958                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5959                                         buf->start, buf->len,
5960                                         parent, root->root_key.objectid,
5961                                         btrfs_header_level(buf),
5962                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5963                 BUG_ON(ret); /* -ENOMEM */
5964         }
5965
5966         if (!last_ref)
5967                 return;
5968
5969         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5970
5971         if (btrfs_header_generation(buf) == trans->transid) {
5972                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5973                         ret = check_ref_cleanup(trans, root, buf->start);
5974                         if (!ret)
5975                                 goto out;
5976                 }
5977
5978                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5979                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5980                         goto out;
5981                 }
5982
5983                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5984
5985                 btrfs_add_free_space(cache, buf->start, buf->len);
5986                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5987                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
5988                 pin = 0;
5989         }
5990 out:
5991         if (pin)
5992                 add_pinned_bytes(root->fs_info, buf->len,
5993                                  btrfs_header_level(buf),
5994                                  root->root_key.objectid);
5995
5996         /*
5997          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5998          * anymore.
5999          */
6000         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6001         btrfs_put_block_group(cache);
6002 }
6003
6004 /* Can return -ENOMEM */
6005 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6006                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6007                       u64 owner, u64 offset, int for_cow)
6008 {
6009         int ret;
6010         struct btrfs_fs_info *fs_info = root->fs_info;
6011
6012         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6013
6014         /*
6015          * tree log blocks never actually go into the extent allocation
6016          * tree, just update pinning info and exit early.
6017          */
6018         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6019                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6020                 /* unlocks the pinned mutex */
6021                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6022                 ret = 0;
6023         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6024                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6025                                         num_bytes,
6026                                         parent, root_objectid, (int)owner,
6027                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
6028         } else {
6029                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6030                                                 num_bytes,
6031                                                 parent, root_objectid, owner,
6032                                                 offset, BTRFS_DROP_DELAYED_REF,
6033                                                 NULL, for_cow);
6034         }
6035         return ret;
6036 }
6037
6038 static u64 stripe_align(struct btrfs_root *root,
6039                         struct btrfs_block_group_cache *cache,
6040                         u64 val, u64 num_bytes)
6041 {
6042         u64 ret = ALIGN(val, root->stripesize);
6043         return ret;
6044 }
6045
6046 /*
6047  * when we wait for progress in the block group caching, its because
6048  * our allocation attempt failed at least once.  So, we must sleep
6049  * and let some progress happen before we try again.
6050  *
6051  * This function will sleep at least once waiting for new free space to
6052  * show up, and then it will check the block group free space numbers
6053  * for our min num_bytes.  Another option is to have it go ahead
6054  * and look in the rbtree for a free extent of a given size, but this
6055  * is a good start.
6056  *
6057  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6058  * any of the information in this block group.
6059  */
6060 static noinline void
6061 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6062                                 u64 num_bytes)
6063 {
6064         struct btrfs_caching_control *caching_ctl;
6065
6066         caching_ctl = get_caching_control(cache);
6067         if (!caching_ctl)
6068                 return;
6069
6070         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6071                    (cache->free_space_ctl->free_space >= num_bytes));
6072
6073         put_caching_control(caching_ctl);
6074 }
6075
6076 static noinline int
6077 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6078 {
6079         struct btrfs_caching_control *caching_ctl;
6080         int ret = 0;
6081
6082         caching_ctl = get_caching_control(cache);
6083         if (!caching_ctl)
6084                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6085
6086         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6087         if (cache->cached == BTRFS_CACHE_ERROR)
6088                 ret = -EIO;
6089         put_caching_control(caching_ctl);
6090         return ret;
6091 }
6092
6093 int __get_raid_index(u64 flags)
6094 {
6095         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6096                 return BTRFS_RAID_RAID10;
6097         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6098                 return BTRFS_RAID_RAID1;
6099         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6100                 return BTRFS_RAID_DUP;
6101         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6102                 return BTRFS_RAID_RAID0;
6103         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6104                 return BTRFS_RAID_RAID5;
6105         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6106                 return BTRFS_RAID_RAID6;
6107
6108         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6109 }
6110
6111 int get_block_group_index(struct btrfs_block_group_cache *cache)
6112 {
6113         return __get_raid_index(cache->flags);
6114 }
6115
6116 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6117         [BTRFS_RAID_RAID10]     = "raid10",
6118         [BTRFS_RAID_RAID1]      = "raid1",
6119         [BTRFS_RAID_DUP]        = "dup",
6120         [BTRFS_RAID_RAID0]      = "raid0",
6121         [BTRFS_RAID_SINGLE]     = "single",
6122         [BTRFS_RAID_RAID5]      = "raid5",
6123         [BTRFS_RAID_RAID6]      = "raid6",
6124 };
6125
6126 static const char *get_raid_name(enum btrfs_raid_types type)
6127 {
6128         if (type >= BTRFS_NR_RAID_TYPES)
6129                 return NULL;
6130
6131         return btrfs_raid_type_names[type];
6132 }
6133
6134 enum btrfs_loop_type {
6135         LOOP_CACHING_NOWAIT = 0,
6136         LOOP_CACHING_WAIT = 1,
6137         LOOP_ALLOC_CHUNK = 2,
6138         LOOP_NO_EMPTY_SIZE = 3,
6139 };
6140
6141 /*
6142  * walks the btree of allocated extents and find a hole of a given size.
6143  * The key ins is changed to record the hole:
6144  * ins->objectid == start position
6145  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6146  * ins->offset == the size of the hole.
6147  * Any available blocks before search_start are skipped.
6148  *
6149  * If there is no suitable free space, we will record the max size of
6150  * the free space extent currently.
6151  */
6152 static noinline int find_free_extent(struct btrfs_root *orig_root,
6153                                      u64 num_bytes, u64 empty_size,
6154                                      u64 hint_byte, struct btrfs_key *ins,
6155                                      u64 flags)
6156 {
6157         int ret = 0;
6158         struct btrfs_root *root = orig_root->fs_info->extent_root;
6159         struct btrfs_free_cluster *last_ptr = NULL;
6160         struct btrfs_block_group_cache *block_group = NULL;
6161         u64 search_start = 0;
6162         u64 max_extent_size = 0;
6163         int empty_cluster = 2 * 1024 * 1024;
6164         struct btrfs_space_info *space_info;
6165         int loop = 0;
6166         int index = __get_raid_index(flags);
6167         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6168                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6169         bool failed_cluster_refill = false;
6170         bool failed_alloc = false;
6171         bool use_cluster = true;
6172         bool have_caching_bg = false;
6173
6174         WARN_ON(num_bytes < root->sectorsize);
6175         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
6176         ins->objectid = 0;
6177         ins->offset = 0;
6178
6179         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6180
6181         space_info = __find_space_info(root->fs_info, flags);
6182         if (!space_info) {
6183                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6184                 return -ENOSPC;
6185         }
6186
6187         /*
6188          * If the space info is for both data and metadata it means we have a
6189          * small filesystem and we can't use the clustering stuff.
6190          */
6191         if (btrfs_mixed_space_info(space_info))
6192                 use_cluster = false;
6193
6194         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6195                 last_ptr = &root->fs_info->meta_alloc_cluster;
6196                 if (!btrfs_test_opt(root, SSD))
6197                         empty_cluster = 64 * 1024;
6198         }
6199
6200         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6201             btrfs_test_opt(root, SSD)) {
6202                 last_ptr = &root->fs_info->data_alloc_cluster;
6203         }
6204
6205         if (last_ptr) {
6206                 spin_lock(&last_ptr->lock);
6207                 if (last_ptr->block_group)
6208                         hint_byte = last_ptr->window_start;
6209                 spin_unlock(&last_ptr->lock);
6210         }
6211
6212         search_start = max(search_start, first_logical_byte(root, 0));
6213         search_start = max(search_start, hint_byte);
6214
6215         if (!last_ptr)
6216                 empty_cluster = 0;
6217
6218         if (search_start == hint_byte) {
6219                 block_group = btrfs_lookup_block_group(root->fs_info,
6220                                                        search_start);
6221                 /*
6222                  * we don't want to use the block group if it doesn't match our
6223                  * allocation bits, or if its not cached.
6224                  *
6225                  * However if we are re-searching with an ideal block group
6226                  * picked out then we don't care that the block group is cached.
6227                  */
6228                 if (block_group && block_group_bits(block_group, flags) &&
6229                     block_group->cached != BTRFS_CACHE_NO) {
6230                         down_read(&space_info->groups_sem);
6231                         if (list_empty(&block_group->list) ||
6232                             block_group->ro) {
6233                                 /*
6234                                  * someone is removing this block group,
6235                                  * we can't jump into the have_block_group
6236                                  * target because our list pointers are not
6237                                  * valid
6238                                  */
6239                                 btrfs_put_block_group(block_group);
6240                                 up_read(&space_info->groups_sem);
6241                         } else {
6242                                 index = get_block_group_index(block_group);
6243                                 goto have_block_group;
6244                         }
6245                 } else if (block_group) {
6246                         btrfs_put_block_group(block_group);
6247                 }
6248         }
6249 search:
6250         have_caching_bg = false;
6251         down_read(&space_info->groups_sem);
6252         list_for_each_entry(block_group, &space_info->block_groups[index],
6253                             list) {
6254                 u64 offset;
6255                 int cached;
6256
6257                 btrfs_get_block_group(block_group);
6258                 search_start = block_group->key.objectid;
6259
6260                 /*
6261                  * this can happen if we end up cycling through all the
6262                  * raid types, but we want to make sure we only allocate
6263                  * for the proper type.
6264                  */
6265                 if (!block_group_bits(block_group, flags)) {
6266                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6267                                 BTRFS_BLOCK_GROUP_RAID1 |
6268                                 BTRFS_BLOCK_GROUP_RAID5 |
6269                                 BTRFS_BLOCK_GROUP_RAID6 |
6270                                 BTRFS_BLOCK_GROUP_RAID10;
6271
6272                         /*
6273                          * if they asked for extra copies and this block group
6274                          * doesn't provide them, bail.  This does allow us to
6275                          * fill raid0 from raid1.
6276                          */
6277                         if ((flags & extra) && !(block_group->flags & extra))
6278                                 goto loop;
6279                 }
6280
6281 have_block_group:
6282                 cached = block_group_cache_done(block_group);
6283                 if (unlikely(!cached)) {
6284                         ret = cache_block_group(block_group, 0);
6285                         BUG_ON(ret < 0);
6286                         ret = 0;
6287                 }
6288
6289                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6290                         goto loop;
6291                 if (unlikely(block_group->ro))
6292                         goto loop;
6293
6294                 /*
6295                  * Ok we want to try and use the cluster allocator, so
6296                  * lets look there
6297                  */
6298                 if (last_ptr) {
6299                         struct btrfs_block_group_cache *used_block_group;
6300                         unsigned long aligned_cluster;
6301                         /*
6302                          * the refill lock keeps out other
6303                          * people trying to start a new cluster
6304                          */
6305                         spin_lock(&last_ptr->refill_lock);
6306                         used_block_group = last_ptr->block_group;
6307                         if (used_block_group != block_group &&
6308                             (!used_block_group ||
6309                              used_block_group->ro ||
6310                              !block_group_bits(used_block_group, flags)))
6311                                 goto refill_cluster;
6312
6313                         if (used_block_group != block_group)
6314                                 btrfs_get_block_group(used_block_group);
6315
6316                         offset = btrfs_alloc_from_cluster(used_block_group,
6317                                                 last_ptr,
6318                                                 num_bytes,
6319                                                 used_block_group->key.objectid,
6320                                                 &max_extent_size);
6321                         if (offset) {
6322                                 /* we have a block, we're done */
6323                                 spin_unlock(&last_ptr->refill_lock);
6324                                 trace_btrfs_reserve_extent_cluster(root,
6325                                                 used_block_group,
6326                                                 search_start, num_bytes);
6327                                 if (used_block_group != block_group) {
6328                                         btrfs_put_block_group(block_group);
6329                                         block_group = used_block_group;
6330                                 }
6331                                 goto checks;
6332                         }
6333
6334                         WARN_ON(last_ptr->block_group != used_block_group);
6335                         if (used_block_group != block_group)
6336                                 btrfs_put_block_group(used_block_group);
6337 refill_cluster:
6338                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6339                          * set up a new clusters, so lets just skip it
6340                          * and let the allocator find whatever block
6341                          * it can find.  If we reach this point, we
6342                          * will have tried the cluster allocator
6343                          * plenty of times and not have found
6344                          * anything, so we are likely way too
6345                          * fragmented for the clustering stuff to find
6346                          * anything.
6347                          *
6348                          * However, if the cluster is taken from the
6349                          * current block group, release the cluster
6350                          * first, so that we stand a better chance of
6351                          * succeeding in the unclustered
6352                          * allocation.  */
6353                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6354                             last_ptr->block_group != block_group) {
6355                                 spin_unlock(&last_ptr->refill_lock);
6356                                 goto unclustered_alloc;
6357                         }
6358
6359                         /*
6360                          * this cluster didn't work out, free it and
6361                          * start over
6362                          */
6363                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6364
6365                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6366                                 spin_unlock(&last_ptr->refill_lock);
6367                                 goto unclustered_alloc;
6368                         }
6369
6370                         aligned_cluster = max_t(unsigned long,
6371                                                 empty_cluster + empty_size,
6372                                               block_group->full_stripe_len);
6373
6374                         /* allocate a cluster in this block group */
6375                         ret = btrfs_find_space_cluster(root, block_group,
6376                                                        last_ptr, search_start,
6377                                                        num_bytes,
6378                                                        aligned_cluster);
6379                         if (ret == 0) {
6380                                 /*
6381                                  * now pull our allocation out of this
6382                                  * cluster
6383                                  */
6384                                 offset = btrfs_alloc_from_cluster(block_group,
6385                                                         last_ptr,
6386                                                         num_bytes,
6387                                                         search_start,
6388                                                         &max_extent_size);
6389                                 if (offset) {
6390                                         /* we found one, proceed */
6391                                         spin_unlock(&last_ptr->refill_lock);
6392                                         trace_btrfs_reserve_extent_cluster(root,
6393                                                 block_group, search_start,
6394                                                 num_bytes);
6395                                         goto checks;
6396                                 }
6397                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6398                                    && !failed_cluster_refill) {
6399                                 spin_unlock(&last_ptr->refill_lock);
6400
6401                                 failed_cluster_refill = true;
6402                                 wait_block_group_cache_progress(block_group,
6403                                        num_bytes + empty_cluster + empty_size);
6404                                 goto have_block_group;
6405                         }
6406
6407                         /*
6408                          * at this point we either didn't find a cluster
6409                          * or we weren't able to allocate a block from our
6410                          * cluster.  Free the cluster we've been trying
6411                          * to use, and go to the next block group
6412                          */
6413                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6414                         spin_unlock(&last_ptr->refill_lock);
6415                         goto loop;
6416                 }
6417
6418 unclustered_alloc:
6419                 spin_lock(&block_group->free_space_ctl->tree_lock);
6420                 if (cached &&
6421                     block_group->free_space_ctl->free_space <
6422                     num_bytes + empty_cluster + empty_size) {
6423                         if (block_group->free_space_ctl->free_space >
6424                             max_extent_size)
6425                                 max_extent_size =
6426                                         block_group->free_space_ctl->free_space;
6427                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6428                         goto loop;
6429                 }
6430                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6431
6432                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6433                                                     num_bytes, empty_size,
6434                                                     &max_extent_size);
6435                 /*
6436                  * If we didn't find a chunk, and we haven't failed on this
6437                  * block group before, and this block group is in the middle of
6438                  * caching and we are ok with waiting, then go ahead and wait
6439                  * for progress to be made, and set failed_alloc to true.
6440                  *
6441                  * If failed_alloc is true then we've already waited on this
6442                  * block group once and should move on to the next block group.
6443                  */
6444                 if (!offset && !failed_alloc && !cached &&
6445                     loop > LOOP_CACHING_NOWAIT) {
6446                         wait_block_group_cache_progress(block_group,
6447                                                 num_bytes + empty_size);
6448                         failed_alloc = true;
6449                         goto have_block_group;
6450                 } else if (!offset) {
6451                         if (!cached)
6452                                 have_caching_bg = true;
6453                         goto loop;
6454                 }
6455 checks:
6456                 search_start = stripe_align(root, block_group,
6457                                             offset, num_bytes);
6458
6459                 /* move on to the next group */
6460                 if (search_start + num_bytes >
6461                     block_group->key.objectid + block_group->key.offset) {
6462                         btrfs_add_free_space(block_group, offset, num_bytes);
6463                         goto loop;
6464                 }
6465
6466                 if (offset < search_start)
6467                         btrfs_add_free_space(block_group, offset,
6468                                              search_start - offset);
6469                 BUG_ON(offset > search_start);
6470
6471                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
6472                                                   alloc_type);
6473                 if (ret == -EAGAIN) {
6474                         btrfs_add_free_space(block_group, offset, num_bytes);
6475                         goto loop;
6476                 }
6477
6478                 /* we are all good, lets return */
6479                 ins->objectid = search_start;
6480                 ins->offset = num_bytes;
6481
6482                 trace_btrfs_reserve_extent(orig_root, block_group,
6483                                            search_start, num_bytes);
6484                 btrfs_put_block_group(block_group);
6485                 break;
6486 loop:
6487                 failed_cluster_refill = false;
6488                 failed_alloc = false;
6489                 BUG_ON(index != get_block_group_index(block_group));
6490                 btrfs_put_block_group(block_group);
6491         }
6492         up_read(&space_info->groups_sem);
6493
6494         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6495                 goto search;
6496
6497         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6498                 goto search;
6499
6500         /*
6501          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6502          *                      caching kthreads as we move along
6503          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6504          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6505          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6506          *                      again
6507          */
6508         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6509                 index = 0;
6510                 loop++;
6511                 if (loop == LOOP_ALLOC_CHUNK) {
6512                         struct btrfs_trans_handle *trans;
6513
6514                         trans = btrfs_join_transaction(root);
6515                         if (IS_ERR(trans)) {
6516                                 ret = PTR_ERR(trans);
6517                                 goto out;
6518                         }
6519
6520                         ret = do_chunk_alloc(trans, root, flags,
6521                                              CHUNK_ALLOC_FORCE);
6522                         /*
6523                          * Do not bail out on ENOSPC since we
6524                          * can do more things.
6525                          */
6526                         if (ret < 0 && ret != -ENOSPC)
6527                                 btrfs_abort_transaction(trans,
6528                                                         root, ret);
6529                         else
6530                                 ret = 0;
6531                         btrfs_end_transaction(trans, root);
6532                         if (ret)
6533                                 goto out;
6534                 }
6535
6536                 if (loop == LOOP_NO_EMPTY_SIZE) {
6537                         empty_size = 0;
6538                         empty_cluster = 0;
6539                 }
6540
6541                 goto search;
6542         } else if (!ins->objectid) {
6543                 ret = -ENOSPC;
6544         } else if (ins->objectid) {
6545                 ret = 0;
6546         }
6547 out:
6548         if (ret == -ENOSPC)
6549                 ins->offset = max_extent_size;
6550         return ret;
6551 }
6552
6553 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6554                             int dump_block_groups)
6555 {
6556         struct btrfs_block_group_cache *cache;
6557         int index = 0;
6558
6559         spin_lock(&info->lock);
6560         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
6561                info->flags,
6562                info->total_bytes - info->bytes_used - info->bytes_pinned -
6563                info->bytes_reserved - info->bytes_readonly,
6564                (info->full) ? "" : "not ");
6565         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
6566                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6567                info->total_bytes, info->bytes_used, info->bytes_pinned,
6568                info->bytes_reserved, info->bytes_may_use,
6569                info->bytes_readonly);
6570         spin_unlock(&info->lock);
6571
6572         if (!dump_block_groups)
6573                 return;
6574
6575         down_read(&info->groups_sem);
6576 again:
6577         list_for_each_entry(cache, &info->block_groups[index], list) {
6578                 spin_lock(&cache->lock);
6579                 printk(KERN_INFO "BTRFS: "
6580                            "block group %llu has %llu bytes, "
6581                            "%llu used %llu pinned %llu reserved %s\n",
6582                        cache->key.objectid, cache->key.offset,
6583                        btrfs_block_group_used(&cache->item), cache->pinned,
6584                        cache->reserved, cache->ro ? "[readonly]" : "");
6585                 btrfs_dump_free_space(cache, bytes);
6586                 spin_unlock(&cache->lock);
6587         }
6588         if (++index < BTRFS_NR_RAID_TYPES)
6589                 goto again;
6590         up_read(&info->groups_sem);
6591 }
6592
6593 int btrfs_reserve_extent(struct btrfs_root *root,
6594                          u64 num_bytes, u64 min_alloc_size,
6595                          u64 empty_size, u64 hint_byte,
6596                          struct btrfs_key *ins, int is_data)
6597 {
6598         bool final_tried = false;
6599         u64 flags;
6600         int ret;
6601
6602         flags = btrfs_get_alloc_profile(root, is_data);
6603 again:
6604         WARN_ON(num_bytes < root->sectorsize);
6605         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6606                                flags);
6607
6608         if (ret == -ENOSPC) {
6609                 if (!final_tried && ins->offset) {
6610                         num_bytes = min(num_bytes >> 1, ins->offset);
6611                         num_bytes = round_down(num_bytes, root->sectorsize);
6612                         num_bytes = max(num_bytes, min_alloc_size);
6613                         if (num_bytes == min_alloc_size)
6614                                 final_tried = true;
6615                         goto again;
6616                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6617                         struct btrfs_space_info *sinfo;
6618
6619                         sinfo = __find_space_info(root->fs_info, flags);
6620                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6621                                 flags, num_bytes);
6622                         if (sinfo)
6623                                 dump_space_info(sinfo, num_bytes, 1);
6624                 }
6625         }
6626
6627         return ret;
6628 }
6629
6630 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6631                                         u64 start, u64 len, int pin)
6632 {
6633         struct btrfs_block_group_cache *cache;
6634         int ret = 0;
6635
6636         cache = btrfs_lookup_block_group(root->fs_info, start);
6637         if (!cache) {
6638                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6639                         start);
6640                 return -ENOSPC;
6641         }
6642
6643         if (btrfs_test_opt(root, DISCARD))
6644                 ret = btrfs_discard_extent(root, start, len, NULL);
6645
6646         if (pin)
6647                 pin_down_extent(root, cache, start, len, 1);
6648         else {
6649                 btrfs_add_free_space(cache, start, len);
6650                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6651         }
6652         btrfs_put_block_group(cache);
6653
6654         trace_btrfs_reserved_extent_free(root, start, len);
6655
6656         return ret;
6657 }
6658
6659 int btrfs_free_reserved_extent(struct btrfs_root *root,
6660                                         u64 start, u64 len)
6661 {
6662         return __btrfs_free_reserved_extent(root, start, len, 0);
6663 }
6664
6665 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6666                                        u64 start, u64 len)
6667 {
6668         return __btrfs_free_reserved_extent(root, start, len, 1);
6669 }
6670
6671 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6672                                       struct btrfs_root *root,
6673                                       u64 parent, u64 root_objectid,
6674                                       u64 flags, u64 owner, u64 offset,
6675                                       struct btrfs_key *ins, int ref_mod)
6676 {
6677         int ret;
6678         struct btrfs_fs_info *fs_info = root->fs_info;
6679         struct btrfs_extent_item *extent_item;
6680         struct btrfs_extent_inline_ref *iref;
6681         struct btrfs_path *path;
6682         struct extent_buffer *leaf;
6683         int type;
6684         u32 size;
6685
6686         if (parent > 0)
6687                 type = BTRFS_SHARED_DATA_REF_KEY;
6688         else
6689                 type = BTRFS_EXTENT_DATA_REF_KEY;
6690
6691         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6692
6693         path = btrfs_alloc_path();
6694         if (!path)
6695                 return -ENOMEM;
6696
6697         path->leave_spinning = 1;
6698         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6699                                       ins, size);
6700         if (ret) {
6701                 btrfs_free_path(path);
6702                 return ret;
6703         }
6704
6705         leaf = path->nodes[0];
6706         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6707                                      struct btrfs_extent_item);
6708         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6709         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6710         btrfs_set_extent_flags(leaf, extent_item,
6711                                flags | BTRFS_EXTENT_FLAG_DATA);
6712
6713         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6714         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6715         if (parent > 0) {
6716                 struct btrfs_shared_data_ref *ref;
6717                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6718                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6719                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6720         } else {
6721                 struct btrfs_extent_data_ref *ref;
6722                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6723                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6724                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6725                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6726                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6727         }
6728
6729         btrfs_mark_buffer_dirty(path->nodes[0]);
6730         btrfs_free_path(path);
6731
6732         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6733         if (ret) { /* -ENOENT, logic error */
6734                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6735                         ins->objectid, ins->offset);
6736                 BUG();
6737         }
6738         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6739         return ret;
6740 }
6741
6742 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6743                                      struct btrfs_root *root,
6744                                      u64 parent, u64 root_objectid,
6745                                      u64 flags, struct btrfs_disk_key *key,
6746                                      int level, struct btrfs_key *ins)
6747 {
6748         int ret;
6749         struct btrfs_fs_info *fs_info = root->fs_info;
6750         struct btrfs_extent_item *extent_item;
6751         struct btrfs_tree_block_info *block_info;
6752         struct btrfs_extent_inline_ref *iref;
6753         struct btrfs_path *path;
6754         struct extent_buffer *leaf;
6755         u32 size = sizeof(*extent_item) + sizeof(*iref);
6756         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6757                                                  SKINNY_METADATA);
6758
6759         if (!skinny_metadata)
6760                 size += sizeof(*block_info);
6761
6762         path = btrfs_alloc_path();
6763         if (!path) {
6764                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
6765                                                    root->leafsize);
6766                 return -ENOMEM;
6767         }
6768
6769         path->leave_spinning = 1;
6770         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6771                                       ins, size);
6772         if (ret) {
6773                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
6774                                                    root->leafsize);
6775                 btrfs_free_path(path);
6776                 return ret;
6777         }
6778
6779         leaf = path->nodes[0];
6780         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6781                                      struct btrfs_extent_item);
6782         btrfs_set_extent_refs(leaf, extent_item, 1);
6783         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6784         btrfs_set_extent_flags(leaf, extent_item,
6785                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6786
6787         if (skinny_metadata) {
6788                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6789         } else {
6790                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6791                 btrfs_set_tree_block_key(leaf, block_info, key);
6792                 btrfs_set_tree_block_level(leaf, block_info, level);
6793                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6794         }
6795
6796         if (parent > 0) {
6797                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6798                 btrfs_set_extent_inline_ref_type(leaf, iref,
6799                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6800                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6801         } else {
6802                 btrfs_set_extent_inline_ref_type(leaf, iref,
6803                                                  BTRFS_TREE_BLOCK_REF_KEY);
6804                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6805         }
6806
6807         btrfs_mark_buffer_dirty(leaf);
6808         btrfs_free_path(path);
6809
6810         ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6811         if (ret) { /* -ENOENT, logic error */
6812                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6813                         ins->objectid, ins->offset);
6814                 BUG();
6815         }
6816
6817         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->leafsize);
6818         return ret;
6819 }
6820
6821 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6822                                      struct btrfs_root *root,
6823                                      u64 root_objectid, u64 owner,
6824                                      u64 offset, struct btrfs_key *ins)
6825 {
6826         int ret;
6827
6828         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6829
6830         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6831                                          ins->offset, 0,
6832                                          root_objectid, owner, offset,
6833                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6834         return ret;
6835 }
6836
6837 /*
6838  * this is used by the tree logging recovery code.  It records that
6839  * an extent has been allocated and makes sure to clear the free
6840  * space cache bits as well
6841  */
6842 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6843                                    struct btrfs_root *root,
6844                                    u64 root_objectid, u64 owner, u64 offset,
6845                                    struct btrfs_key *ins)
6846 {
6847         int ret;
6848         struct btrfs_block_group_cache *block_group;
6849
6850         /*
6851          * Mixed block groups will exclude before processing the log so we only
6852          * need to do the exlude dance if this fs isn't mixed.
6853          */
6854         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
6855                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
6856                 if (ret)
6857                         return ret;
6858         }
6859
6860         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6861         if (!block_group)
6862                 return -EINVAL;
6863
6864         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6865                                           RESERVE_ALLOC_NO_ACCOUNT);
6866         BUG_ON(ret); /* logic error */
6867         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6868                                          0, owner, offset, ins, 1);
6869         btrfs_put_block_group(block_group);
6870         return ret;
6871 }
6872
6873 static struct extent_buffer *
6874 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6875                       u64 bytenr, u32 blocksize, int level)
6876 {
6877         struct extent_buffer *buf;
6878
6879         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6880         if (!buf)
6881                 return ERR_PTR(-ENOMEM);
6882         btrfs_set_header_generation(buf, trans->transid);
6883         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6884         btrfs_tree_lock(buf);
6885         clean_tree_block(trans, root, buf);
6886         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6887
6888         btrfs_set_lock_blocking(buf);
6889         btrfs_set_buffer_uptodate(buf);
6890
6891         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6892                 /*
6893                  * we allow two log transactions at a time, use different
6894                  * EXENT bit to differentiate dirty pages.
6895                  */
6896                 if (root->log_transid % 2 == 0)
6897                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6898                                         buf->start + buf->len - 1, GFP_NOFS);
6899                 else
6900                         set_extent_new(&root->dirty_log_pages, buf->start,
6901                                         buf->start + buf->len - 1, GFP_NOFS);
6902         } else {
6903                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6904                          buf->start + buf->len - 1, GFP_NOFS);
6905         }
6906         trans->blocks_used++;
6907         /* this returns a buffer locked for blocking */
6908         return buf;
6909 }
6910
6911 static struct btrfs_block_rsv *
6912 use_block_rsv(struct btrfs_trans_handle *trans,
6913               struct btrfs_root *root, u32 blocksize)
6914 {
6915         struct btrfs_block_rsv *block_rsv;
6916         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6917         int ret;
6918         bool global_updated = false;
6919
6920         block_rsv = get_block_rsv(trans, root);
6921
6922         if (unlikely(block_rsv->size == 0))
6923                 goto try_reserve;
6924 again:
6925         ret = block_rsv_use_bytes(block_rsv, blocksize);
6926         if (!ret)
6927                 return block_rsv;
6928
6929         if (block_rsv->failfast)
6930                 return ERR_PTR(ret);
6931
6932         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
6933                 global_updated = true;
6934                 update_global_block_rsv(root->fs_info);
6935                 goto again;
6936         }
6937
6938         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6939                 static DEFINE_RATELIMIT_STATE(_rs,
6940                                 DEFAULT_RATELIMIT_INTERVAL * 10,
6941                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
6942                 if (__ratelimit(&_rs))
6943                         WARN(1, KERN_DEBUG
6944                                 "BTRFS: block rsv returned %d\n", ret);
6945         }
6946 try_reserve:
6947         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6948                                      BTRFS_RESERVE_NO_FLUSH);
6949         if (!ret)
6950                 return block_rsv;
6951         /*
6952          * If we couldn't reserve metadata bytes try and use some from
6953          * the global reserve if its space type is the same as the global
6954          * reservation.
6955          */
6956         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
6957             block_rsv->space_info == global_rsv->space_info) {
6958                 ret = block_rsv_use_bytes(global_rsv, blocksize);
6959                 if (!ret)
6960                         return global_rsv;
6961         }
6962         return ERR_PTR(ret);
6963 }
6964
6965 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6966                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6967 {
6968         block_rsv_add_bytes(block_rsv, blocksize, 0);
6969         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6970 }
6971
6972 /*
6973  * finds a free extent and does all the dirty work required for allocation
6974  * returns the key for the extent through ins, and a tree buffer for
6975  * the first block of the extent through buf.
6976  *
6977  * returns the tree buffer or NULL.
6978  */
6979 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6980                                         struct btrfs_root *root, u32 blocksize,
6981                                         u64 parent, u64 root_objectid,
6982                                         struct btrfs_disk_key *key, int level,
6983                                         u64 hint, u64 empty_size)
6984 {
6985         struct btrfs_key ins;
6986         struct btrfs_block_rsv *block_rsv;
6987         struct extent_buffer *buf;
6988         u64 flags = 0;
6989         int ret;
6990         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6991                                                  SKINNY_METADATA);
6992
6993         block_rsv = use_block_rsv(trans, root, blocksize);
6994         if (IS_ERR(block_rsv))
6995                 return ERR_CAST(block_rsv);
6996
6997         ret = btrfs_reserve_extent(root, blocksize, blocksize,
6998                                    empty_size, hint, &ins, 0);
6999         if (ret) {
7000                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7001                 return ERR_PTR(ret);
7002         }
7003
7004         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
7005                                     blocksize, level);
7006         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
7007
7008         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7009                 if (parent == 0)
7010                         parent = ins.objectid;
7011                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7012         } else
7013                 BUG_ON(parent > 0);
7014
7015         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7016                 struct btrfs_delayed_extent_op *extent_op;
7017                 extent_op = btrfs_alloc_delayed_extent_op();
7018                 BUG_ON(!extent_op); /* -ENOMEM */
7019                 if (key)
7020                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7021                 else
7022                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7023                 extent_op->flags_to_set = flags;
7024                 if (skinny_metadata)
7025                         extent_op->update_key = 0;
7026                 else
7027                         extent_op->update_key = 1;
7028                 extent_op->update_flags = 1;
7029                 extent_op->is_data = 0;
7030                 extent_op->level = level;
7031
7032                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7033                                         ins.objectid,
7034                                         ins.offset, parent, root_objectid,
7035                                         level, BTRFS_ADD_DELAYED_EXTENT,
7036                                         extent_op, 0);
7037                 BUG_ON(ret); /* -ENOMEM */
7038         }
7039         return buf;
7040 }
7041
7042 struct walk_control {
7043         u64 refs[BTRFS_MAX_LEVEL];
7044         u64 flags[BTRFS_MAX_LEVEL];
7045         struct btrfs_key update_progress;
7046         int stage;
7047         int level;
7048         int shared_level;
7049         int update_ref;
7050         int keep_locks;
7051         int reada_slot;
7052         int reada_count;
7053         int for_reloc;
7054 };
7055
7056 #define DROP_REFERENCE  1
7057 #define UPDATE_BACKREF  2
7058
7059 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7060                                      struct btrfs_root *root,
7061                                      struct walk_control *wc,
7062                                      struct btrfs_path *path)
7063 {
7064         u64 bytenr;
7065         u64 generation;
7066         u64 refs;
7067         u64 flags;
7068         u32 nritems;
7069         u32 blocksize;
7070         struct btrfs_key key;
7071         struct extent_buffer *eb;
7072         int ret;
7073         int slot;
7074         int nread = 0;
7075
7076         if (path->slots[wc->level] < wc->reada_slot) {
7077                 wc->reada_count = wc->reada_count * 2 / 3;
7078                 wc->reada_count = max(wc->reada_count, 2);
7079         } else {
7080                 wc->reada_count = wc->reada_count * 3 / 2;
7081                 wc->reada_count = min_t(int, wc->reada_count,
7082                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7083         }
7084
7085         eb = path->nodes[wc->level];
7086         nritems = btrfs_header_nritems(eb);
7087         blocksize = btrfs_level_size(root, wc->level - 1);
7088
7089         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7090                 if (nread >= wc->reada_count)
7091                         break;
7092
7093                 cond_resched();
7094                 bytenr = btrfs_node_blockptr(eb, slot);
7095                 generation = btrfs_node_ptr_generation(eb, slot);
7096
7097                 if (slot == path->slots[wc->level])
7098                         goto reada;
7099
7100                 if (wc->stage == UPDATE_BACKREF &&
7101                     generation <= root->root_key.offset)
7102                         continue;
7103
7104                 /* We don't lock the tree block, it's OK to be racy here */
7105                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7106                                                wc->level - 1, 1, &refs,
7107                                                &flags);
7108                 /* We don't care about errors in readahead. */
7109                 if (ret < 0)
7110                         continue;
7111                 BUG_ON(refs == 0);
7112
7113                 if (wc->stage == DROP_REFERENCE) {
7114                         if (refs == 1)
7115                                 goto reada;
7116
7117                         if (wc->level == 1 &&
7118                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7119                                 continue;
7120                         if (!wc->update_ref ||
7121                             generation <= root->root_key.offset)
7122                                 continue;
7123                         btrfs_node_key_to_cpu(eb, &key, slot);
7124                         ret = btrfs_comp_cpu_keys(&key,
7125                                                   &wc->update_progress);
7126                         if (ret < 0)
7127                                 continue;
7128                 } else {
7129                         if (wc->level == 1 &&
7130                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7131                                 continue;
7132                 }
7133 reada:
7134                 ret = readahead_tree_block(root, bytenr, blocksize,
7135                                            generation);
7136                 if (ret)
7137                         break;
7138                 nread++;
7139         }
7140         wc->reada_slot = slot;
7141 }
7142
7143 /*
7144  * helper to process tree block while walking down the tree.
7145  *
7146  * when wc->stage == UPDATE_BACKREF, this function updates
7147  * back refs for pointers in the block.
7148  *
7149  * NOTE: return value 1 means we should stop walking down.
7150  */
7151 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7152                                    struct btrfs_root *root,
7153                                    struct btrfs_path *path,
7154                                    struct walk_control *wc, int lookup_info)
7155 {
7156         int level = wc->level;
7157         struct extent_buffer *eb = path->nodes[level];
7158         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7159         int ret;
7160
7161         if (wc->stage == UPDATE_BACKREF &&
7162             btrfs_header_owner(eb) != root->root_key.objectid)
7163                 return 1;
7164
7165         /*
7166          * when reference count of tree block is 1, it won't increase
7167          * again. once full backref flag is set, we never clear it.
7168          */
7169         if (lookup_info &&
7170             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7171              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7172                 BUG_ON(!path->locks[level]);
7173                 ret = btrfs_lookup_extent_info(trans, root,
7174                                                eb->start, level, 1,
7175                                                &wc->refs[level],
7176                                                &wc->flags[level]);
7177                 BUG_ON(ret == -ENOMEM);
7178                 if (ret)
7179                         return ret;
7180                 BUG_ON(wc->refs[level] == 0);
7181         }
7182
7183         if (wc->stage == DROP_REFERENCE) {
7184                 if (wc->refs[level] > 1)
7185                         return 1;
7186
7187                 if (path->locks[level] && !wc->keep_locks) {
7188                         btrfs_tree_unlock_rw(eb, path->locks[level]);
7189                         path->locks[level] = 0;
7190                 }
7191                 return 0;
7192         }
7193
7194         /* wc->stage == UPDATE_BACKREF */
7195         if (!(wc->flags[level] & flag)) {
7196                 BUG_ON(!path->locks[level]);
7197                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
7198                 BUG_ON(ret); /* -ENOMEM */
7199                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
7200                 BUG_ON(ret); /* -ENOMEM */
7201                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7202                                                   eb->len, flag,
7203                                                   btrfs_header_level(eb), 0);
7204                 BUG_ON(ret); /* -ENOMEM */
7205                 wc->flags[level] |= flag;
7206         }
7207
7208         /*
7209          * the block is shared by multiple trees, so it's not good to
7210          * keep the tree lock
7211          */
7212         if (path->locks[level] && level > 0) {
7213                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7214                 path->locks[level] = 0;
7215         }
7216         return 0;
7217 }
7218
7219 /*
7220  * helper to process tree block pointer.
7221  *
7222  * when wc->stage == DROP_REFERENCE, this function checks
7223  * reference count of the block pointed to. if the block
7224  * is shared and we need update back refs for the subtree
7225  * rooted at the block, this function changes wc->stage to
7226  * UPDATE_BACKREF. if the block is shared and there is no
7227  * need to update back, this function drops the reference
7228  * to the block.
7229  *
7230  * NOTE: return value 1 means we should stop walking down.
7231  */
7232 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7233                                  struct btrfs_root *root,
7234                                  struct btrfs_path *path,
7235                                  struct walk_control *wc, int *lookup_info)
7236 {
7237         u64 bytenr;
7238         u64 generation;
7239         u64 parent;
7240         u32 blocksize;
7241         struct btrfs_key key;
7242         struct extent_buffer *next;
7243         int level = wc->level;
7244         int reada = 0;
7245         int ret = 0;
7246
7247         generation = btrfs_node_ptr_generation(path->nodes[level],
7248                                                path->slots[level]);
7249         /*
7250          * if the lower level block was created before the snapshot
7251          * was created, we know there is no need to update back refs
7252          * for the subtree
7253          */
7254         if (wc->stage == UPDATE_BACKREF &&
7255             generation <= root->root_key.offset) {
7256                 *lookup_info = 1;
7257                 return 1;
7258         }
7259
7260         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7261         blocksize = btrfs_level_size(root, level - 1);
7262
7263         next = btrfs_find_tree_block(root, bytenr, blocksize);
7264         if (!next) {
7265                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7266                 if (!next)
7267                         return -ENOMEM;
7268                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7269                                                level - 1);
7270                 reada = 1;
7271         }
7272         btrfs_tree_lock(next);
7273         btrfs_set_lock_blocking(next);
7274
7275         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7276                                        &wc->refs[level - 1],
7277                                        &wc->flags[level - 1]);
7278         if (ret < 0) {
7279                 btrfs_tree_unlock(next);
7280                 return ret;
7281         }
7282
7283         if (unlikely(wc->refs[level - 1] == 0)) {
7284                 btrfs_err(root->fs_info, "Missing references.");
7285                 BUG();
7286         }
7287         *lookup_info = 0;
7288
7289         if (wc->stage == DROP_REFERENCE) {
7290                 if (wc->refs[level - 1] > 1) {
7291                         if (level == 1 &&
7292                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7293                                 goto skip;
7294
7295                         if (!wc->update_ref ||
7296                             generation <= root->root_key.offset)
7297                                 goto skip;
7298
7299                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7300                                               path->slots[level]);
7301                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7302                         if (ret < 0)
7303                                 goto skip;
7304
7305                         wc->stage = UPDATE_BACKREF;
7306                         wc->shared_level = level - 1;
7307                 }
7308         } else {
7309                 if (level == 1 &&
7310                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7311                         goto skip;
7312         }
7313
7314         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7315                 btrfs_tree_unlock(next);
7316                 free_extent_buffer(next);
7317                 next = NULL;
7318                 *lookup_info = 1;
7319         }
7320
7321         if (!next) {
7322                 if (reada && level == 1)
7323                         reada_walk_down(trans, root, wc, path);
7324                 next = read_tree_block(root, bytenr, blocksize, generation);
7325                 if (!next || !extent_buffer_uptodate(next)) {
7326                         free_extent_buffer(next);
7327                         return -EIO;
7328                 }
7329                 btrfs_tree_lock(next);
7330                 btrfs_set_lock_blocking(next);
7331         }
7332
7333         level--;
7334         BUG_ON(level != btrfs_header_level(next));
7335         path->nodes[level] = next;
7336         path->slots[level] = 0;
7337         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7338         wc->level = level;
7339         if (wc->level == 1)
7340                 wc->reada_slot = 0;
7341         return 0;
7342 skip:
7343         wc->refs[level - 1] = 0;
7344         wc->flags[level - 1] = 0;
7345         if (wc->stage == DROP_REFERENCE) {
7346                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7347                         parent = path->nodes[level]->start;
7348                 } else {
7349                         BUG_ON(root->root_key.objectid !=
7350                                btrfs_header_owner(path->nodes[level]));
7351                         parent = 0;
7352                 }
7353
7354                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7355                                 root->root_key.objectid, level - 1, 0, 0);
7356                 BUG_ON(ret); /* -ENOMEM */
7357         }
7358         btrfs_tree_unlock(next);
7359         free_extent_buffer(next);
7360         *lookup_info = 1;
7361         return 1;
7362 }
7363
7364 /*
7365  * helper to process tree block while walking up the tree.
7366  *
7367  * when wc->stage == DROP_REFERENCE, this function drops
7368  * reference count on the block.
7369  *
7370  * when wc->stage == UPDATE_BACKREF, this function changes
7371  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7372  * to UPDATE_BACKREF previously while processing the block.
7373  *
7374  * NOTE: return value 1 means we should stop walking up.
7375  */
7376 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7377                                  struct btrfs_root *root,
7378                                  struct btrfs_path *path,
7379                                  struct walk_control *wc)
7380 {
7381         int ret;
7382         int level = wc->level;
7383         struct extent_buffer *eb = path->nodes[level];
7384         u64 parent = 0;
7385
7386         if (wc->stage == UPDATE_BACKREF) {
7387                 BUG_ON(wc->shared_level < level);
7388                 if (level < wc->shared_level)
7389                         goto out;
7390
7391                 ret = find_next_key(path, level + 1, &wc->update_progress);
7392                 if (ret > 0)
7393                         wc->update_ref = 0;
7394
7395                 wc->stage = DROP_REFERENCE;
7396                 wc->shared_level = -1;
7397                 path->slots[level] = 0;
7398
7399                 /*
7400                  * check reference count again if the block isn't locked.
7401                  * we should start walking down the tree again if reference
7402                  * count is one.
7403                  */
7404                 if (!path->locks[level]) {
7405                         BUG_ON(level == 0);
7406                         btrfs_tree_lock(eb);
7407                         btrfs_set_lock_blocking(eb);
7408                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7409
7410                         ret = btrfs_lookup_extent_info(trans, root,
7411                                                        eb->start, level, 1,
7412                                                        &wc->refs[level],
7413                                                        &wc->flags[level]);
7414                         if (ret < 0) {
7415                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7416                                 path->locks[level] = 0;
7417                                 return ret;
7418                         }
7419                         BUG_ON(wc->refs[level] == 0);
7420                         if (wc->refs[level] == 1) {
7421                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7422                                 path->locks[level] = 0;
7423                                 return 1;
7424                         }
7425                 }
7426         }
7427
7428         /* wc->stage == DROP_REFERENCE */
7429         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7430
7431         if (wc->refs[level] == 1) {
7432                 if (level == 0) {
7433                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7434                                 ret = btrfs_dec_ref(trans, root, eb, 1,
7435                                                     wc->for_reloc);
7436                         else
7437                                 ret = btrfs_dec_ref(trans, root, eb, 0,
7438                                                     wc->for_reloc);
7439                         BUG_ON(ret); /* -ENOMEM */
7440                 }
7441                 /* make block locked assertion in clean_tree_block happy */
7442                 if (!path->locks[level] &&
7443                     btrfs_header_generation(eb) == trans->transid) {
7444                         btrfs_tree_lock(eb);
7445                         btrfs_set_lock_blocking(eb);
7446                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7447                 }
7448                 clean_tree_block(trans, root, eb);
7449         }
7450
7451         if (eb == root->node) {
7452                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7453                         parent = eb->start;
7454                 else
7455                         BUG_ON(root->root_key.objectid !=
7456                                btrfs_header_owner(eb));
7457         } else {
7458                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7459                         parent = path->nodes[level + 1]->start;
7460                 else
7461                         BUG_ON(root->root_key.objectid !=
7462                                btrfs_header_owner(path->nodes[level + 1]));
7463         }
7464
7465         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7466 out:
7467         wc->refs[level] = 0;
7468         wc->flags[level] = 0;
7469         return 0;
7470 }
7471
7472 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7473                                    struct btrfs_root *root,
7474                                    struct btrfs_path *path,
7475                                    struct walk_control *wc)
7476 {
7477         int level = wc->level;
7478         int lookup_info = 1;
7479         int ret;
7480
7481         while (level >= 0) {
7482                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7483                 if (ret > 0)
7484                         break;
7485
7486                 if (level == 0)
7487                         break;
7488
7489                 if (path->slots[level] >=
7490                     btrfs_header_nritems(path->nodes[level]))
7491                         break;
7492
7493                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7494                 if (ret > 0) {
7495                         path->slots[level]++;
7496                         continue;
7497                 } else if (ret < 0)
7498                         return ret;
7499                 level = wc->level;
7500         }
7501         return 0;
7502 }
7503
7504 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7505                                  struct btrfs_root *root,
7506                                  struct btrfs_path *path,
7507                                  struct walk_control *wc, int max_level)
7508 {
7509         int level = wc->level;
7510         int ret;
7511
7512         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7513         while (level < max_level && path->nodes[level]) {
7514                 wc->level = level;
7515                 if (path->slots[level] + 1 <
7516                     btrfs_header_nritems(path->nodes[level])) {
7517                         path->slots[level]++;
7518                         return 0;
7519                 } else {
7520                         ret = walk_up_proc(trans, root, path, wc);
7521                         if (ret > 0)
7522                                 return 0;
7523
7524                         if (path->locks[level]) {
7525                                 btrfs_tree_unlock_rw(path->nodes[level],
7526                                                      path->locks[level]);
7527                                 path->locks[level] = 0;
7528                         }
7529                         free_extent_buffer(path->nodes[level]);
7530                         path->nodes[level] = NULL;
7531                         level++;
7532                 }
7533         }
7534         return 1;
7535 }
7536
7537 /*
7538  * drop a subvolume tree.
7539  *
7540  * this function traverses the tree freeing any blocks that only
7541  * referenced by the tree.
7542  *
7543  * when a shared tree block is found. this function decreases its
7544  * reference count by one. if update_ref is true, this function
7545  * also make sure backrefs for the shared block and all lower level
7546  * blocks are properly updated.
7547  *
7548  * If called with for_reloc == 0, may exit early with -EAGAIN
7549  */
7550 int btrfs_drop_snapshot(struct btrfs_root *root,
7551                          struct btrfs_block_rsv *block_rsv, int update_ref,
7552                          int for_reloc)
7553 {
7554         struct btrfs_path *path;
7555         struct btrfs_trans_handle *trans;
7556         struct btrfs_root *tree_root = root->fs_info->tree_root;
7557         struct btrfs_root_item *root_item = &root->root_item;
7558         struct walk_control *wc;
7559         struct btrfs_key key;
7560         int err = 0;
7561         int ret;
7562         int level;
7563         bool root_dropped = false;
7564
7565         path = btrfs_alloc_path();
7566         if (!path) {
7567                 err = -ENOMEM;
7568                 goto out;
7569         }
7570
7571         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7572         if (!wc) {
7573                 btrfs_free_path(path);
7574                 err = -ENOMEM;
7575                 goto out;
7576         }
7577
7578         trans = btrfs_start_transaction(tree_root, 0);
7579         if (IS_ERR(trans)) {
7580                 err = PTR_ERR(trans);
7581                 goto out_free;
7582         }
7583
7584         if (block_rsv)
7585                 trans->block_rsv = block_rsv;
7586
7587         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7588                 level = btrfs_header_level(root->node);
7589                 path->nodes[level] = btrfs_lock_root_node(root);
7590                 btrfs_set_lock_blocking(path->nodes[level]);
7591                 path->slots[level] = 0;
7592                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7593                 memset(&wc->update_progress, 0,
7594                        sizeof(wc->update_progress));
7595         } else {
7596                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7597                 memcpy(&wc->update_progress, &key,
7598                        sizeof(wc->update_progress));
7599
7600                 level = root_item->drop_level;
7601                 BUG_ON(level == 0);
7602                 path->lowest_level = level;
7603                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7604                 path->lowest_level = 0;
7605                 if (ret < 0) {
7606                         err = ret;
7607                         goto out_end_trans;
7608                 }
7609                 WARN_ON(ret > 0);
7610
7611                 /*
7612                  * unlock our path, this is safe because only this
7613                  * function is allowed to delete this snapshot
7614                  */
7615                 btrfs_unlock_up_safe(path, 0);
7616
7617                 level = btrfs_header_level(root->node);
7618                 while (1) {
7619                         btrfs_tree_lock(path->nodes[level]);
7620                         btrfs_set_lock_blocking(path->nodes[level]);
7621                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7622
7623                         ret = btrfs_lookup_extent_info(trans, root,
7624                                                 path->nodes[level]->start,
7625                                                 level, 1, &wc->refs[level],
7626                                                 &wc->flags[level]);
7627                         if (ret < 0) {
7628                                 err = ret;
7629                                 goto out_end_trans;
7630                         }
7631                         BUG_ON(wc->refs[level] == 0);
7632
7633                         if (level == root_item->drop_level)
7634                                 break;
7635
7636                         btrfs_tree_unlock(path->nodes[level]);
7637                         path->locks[level] = 0;
7638                         WARN_ON(wc->refs[level] != 1);
7639                         level--;
7640                 }
7641         }
7642
7643         wc->level = level;
7644         wc->shared_level = -1;
7645         wc->stage = DROP_REFERENCE;
7646         wc->update_ref = update_ref;
7647         wc->keep_locks = 0;
7648         wc->for_reloc = for_reloc;
7649         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7650
7651         while (1) {
7652
7653                 ret = walk_down_tree(trans, root, path, wc);
7654                 if (ret < 0) {
7655                         err = ret;
7656                         break;
7657                 }
7658
7659                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7660                 if (ret < 0) {
7661                         err = ret;
7662                         break;
7663                 }
7664
7665                 if (ret > 0) {
7666                         BUG_ON(wc->stage != DROP_REFERENCE);
7667                         break;
7668                 }
7669
7670                 if (wc->stage == DROP_REFERENCE) {
7671                         level = wc->level;
7672                         btrfs_node_key(path->nodes[level],
7673                                        &root_item->drop_progress,
7674                                        path->slots[level]);
7675                         root_item->drop_level = level;
7676                 }
7677
7678                 BUG_ON(wc->level == 0);
7679                 if (btrfs_should_end_transaction(trans, tree_root) ||
7680                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
7681                         ret = btrfs_update_root(trans, tree_root,
7682                                                 &root->root_key,
7683                                                 root_item);
7684                         if (ret) {
7685                                 btrfs_abort_transaction(trans, tree_root, ret);
7686                                 err = ret;
7687                                 goto out_end_trans;
7688                         }
7689
7690                         btrfs_end_transaction_throttle(trans, tree_root);
7691                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
7692                                 pr_debug("BTRFS: drop snapshot early exit\n");
7693                                 err = -EAGAIN;
7694                                 goto out_free;
7695                         }
7696
7697                         trans = btrfs_start_transaction(tree_root, 0);
7698                         if (IS_ERR(trans)) {
7699                                 err = PTR_ERR(trans);
7700                                 goto out_free;
7701                         }
7702                         if (block_rsv)
7703                                 trans->block_rsv = block_rsv;
7704                 }
7705         }
7706         btrfs_release_path(path);
7707         if (err)
7708                 goto out_end_trans;
7709
7710         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7711         if (ret) {
7712                 btrfs_abort_transaction(trans, tree_root, ret);
7713                 goto out_end_trans;
7714         }
7715
7716         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7717                 ret = btrfs_find_root(tree_root, &root->root_key, path,
7718                                       NULL, NULL);
7719                 if (ret < 0) {
7720                         btrfs_abort_transaction(trans, tree_root, ret);
7721                         err = ret;
7722                         goto out_end_trans;
7723                 } else if (ret > 0) {
7724                         /* if we fail to delete the orphan item this time
7725                          * around, it'll get picked up the next time.
7726                          *
7727                          * The most common failure here is just -ENOENT.
7728                          */
7729                         btrfs_del_orphan_item(trans, tree_root,
7730                                               root->root_key.objectid);
7731                 }
7732         }
7733
7734         if (root->in_radix) {
7735                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
7736         } else {
7737                 free_extent_buffer(root->node);
7738                 free_extent_buffer(root->commit_root);
7739                 btrfs_put_fs_root(root);
7740         }
7741         root_dropped = true;
7742 out_end_trans:
7743         btrfs_end_transaction_throttle(trans, tree_root);
7744 out_free:
7745         kfree(wc);
7746         btrfs_free_path(path);
7747 out:
7748         /*
7749          * So if we need to stop dropping the snapshot for whatever reason we
7750          * need to make sure to add it back to the dead root list so that we
7751          * keep trying to do the work later.  This also cleans up roots if we
7752          * don't have it in the radix (like when we recover after a power fail
7753          * or unmount) so we don't leak memory.
7754          */
7755         if (!for_reloc && root_dropped == false)
7756                 btrfs_add_dead_root(root);
7757         if (err && err != -EAGAIN)
7758                 btrfs_std_error(root->fs_info, err);
7759         return err;
7760 }
7761
7762 /*
7763  * drop subtree rooted at tree block 'node'.
7764  *
7765  * NOTE: this function will unlock and release tree block 'node'
7766  * only used by relocation code
7767  */
7768 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7769                         struct btrfs_root *root,
7770                         struct extent_buffer *node,
7771                         struct extent_buffer *parent)
7772 {
7773         struct btrfs_path *path;
7774         struct walk_control *wc;
7775         int level;
7776         int parent_level;
7777         int ret = 0;
7778         int wret;
7779
7780         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7781
7782         path = btrfs_alloc_path();
7783         if (!path)
7784                 return -ENOMEM;
7785
7786         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7787         if (!wc) {
7788                 btrfs_free_path(path);
7789                 return -ENOMEM;
7790         }
7791
7792         btrfs_assert_tree_locked(parent);
7793         parent_level = btrfs_header_level(parent);
7794         extent_buffer_get(parent);
7795         path->nodes[parent_level] = parent;
7796         path->slots[parent_level] = btrfs_header_nritems(parent);
7797
7798         btrfs_assert_tree_locked(node);
7799         level = btrfs_header_level(node);
7800         path->nodes[level] = node;
7801         path->slots[level] = 0;
7802         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7803
7804         wc->refs[parent_level] = 1;
7805         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7806         wc->level = level;
7807         wc->shared_level = -1;
7808         wc->stage = DROP_REFERENCE;
7809         wc->update_ref = 0;
7810         wc->keep_locks = 1;
7811         wc->for_reloc = 1;
7812         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7813
7814         while (1) {
7815                 wret = walk_down_tree(trans, root, path, wc);
7816                 if (wret < 0) {
7817                         ret = wret;
7818                         break;
7819                 }
7820
7821                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7822                 if (wret < 0)
7823                         ret = wret;
7824                 if (wret != 0)
7825                         break;
7826         }
7827
7828         kfree(wc);
7829         btrfs_free_path(path);
7830         return ret;
7831 }
7832
7833 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7834 {
7835         u64 num_devices;
7836         u64 stripped;
7837
7838         /*
7839          * if restripe for this chunk_type is on pick target profile and
7840          * return, otherwise do the usual balance
7841          */
7842         stripped = get_restripe_target(root->fs_info, flags);
7843         if (stripped)
7844                 return extended_to_chunk(stripped);
7845
7846         /*
7847          * we add in the count of missing devices because we want
7848          * to make sure that any RAID levels on a degraded FS
7849          * continue to be honored.
7850          */
7851         num_devices = root->fs_info->fs_devices->rw_devices +
7852                 root->fs_info->fs_devices->missing_devices;
7853
7854         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7855                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7856                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7857
7858         if (num_devices == 1) {
7859                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7860                 stripped = flags & ~stripped;
7861
7862                 /* turn raid0 into single device chunks */
7863                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7864                         return stripped;
7865
7866                 /* turn mirroring into duplication */
7867                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7868                              BTRFS_BLOCK_GROUP_RAID10))
7869                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7870         } else {
7871                 /* they already had raid on here, just return */
7872                 if (flags & stripped)
7873                         return flags;
7874
7875                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7876                 stripped = flags & ~stripped;
7877
7878                 /* switch duplicated blocks with raid1 */
7879                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7880                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7881
7882                 /* this is drive concat, leave it alone */
7883         }
7884
7885         return flags;
7886 }
7887
7888 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7889 {
7890         struct btrfs_space_info *sinfo = cache->space_info;
7891         u64 num_bytes;
7892         u64 min_allocable_bytes;
7893         int ret = -ENOSPC;
7894
7895
7896         /*
7897          * We need some metadata space and system metadata space for
7898          * allocating chunks in some corner cases until we force to set
7899          * it to be readonly.
7900          */
7901         if ((sinfo->flags &
7902              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7903             !force)
7904                 min_allocable_bytes = 1 * 1024 * 1024;
7905         else
7906                 min_allocable_bytes = 0;
7907
7908         spin_lock(&sinfo->lock);
7909         spin_lock(&cache->lock);
7910
7911         if (cache->ro) {
7912                 ret = 0;
7913                 goto out;
7914         }
7915
7916         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7917                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7918
7919         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7920             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7921             min_allocable_bytes <= sinfo->total_bytes) {
7922                 sinfo->bytes_readonly += num_bytes;
7923                 cache->ro = 1;
7924                 ret = 0;
7925         }
7926 out:
7927         spin_unlock(&cache->lock);
7928         spin_unlock(&sinfo->lock);
7929         return ret;
7930 }
7931
7932 int btrfs_set_block_group_ro(struct btrfs_root *root,
7933                              struct btrfs_block_group_cache *cache)
7934
7935 {
7936         struct btrfs_trans_handle *trans;
7937         u64 alloc_flags;
7938         int ret;
7939
7940         BUG_ON(cache->ro);
7941
7942         trans = btrfs_join_transaction(root);
7943         if (IS_ERR(trans))
7944                 return PTR_ERR(trans);
7945
7946         alloc_flags = update_block_group_flags(root, cache->flags);
7947         if (alloc_flags != cache->flags) {
7948                 ret = do_chunk_alloc(trans, root, alloc_flags,
7949                                      CHUNK_ALLOC_FORCE);
7950                 if (ret < 0)
7951                         goto out;
7952         }
7953
7954         ret = set_block_group_ro(cache, 0);
7955         if (!ret)
7956                 goto out;
7957         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7958         ret = do_chunk_alloc(trans, root, alloc_flags,
7959                              CHUNK_ALLOC_FORCE);
7960         if (ret < 0)
7961                 goto out;
7962         ret = set_block_group_ro(cache, 0);
7963 out:
7964         btrfs_end_transaction(trans, root);
7965         return ret;
7966 }
7967
7968 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7969                             struct btrfs_root *root, u64 type)
7970 {
7971         u64 alloc_flags = get_alloc_profile(root, type);
7972         return do_chunk_alloc(trans, root, alloc_flags,
7973                               CHUNK_ALLOC_FORCE);
7974 }
7975
7976 /*
7977  * helper to account the unused space of all the readonly block group in the
7978  * list. takes mirrors into account.
7979  */
7980 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7981 {
7982         struct btrfs_block_group_cache *block_group;
7983         u64 free_bytes = 0;
7984         int factor;
7985
7986         list_for_each_entry(block_group, groups_list, list) {
7987                 spin_lock(&block_group->lock);
7988
7989                 if (!block_group->ro) {
7990                         spin_unlock(&block_group->lock);
7991                         continue;
7992                 }
7993
7994                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7995                                           BTRFS_BLOCK_GROUP_RAID10 |
7996                                           BTRFS_BLOCK_GROUP_DUP))
7997                         factor = 2;
7998                 else
7999                         factor = 1;
8000
8001                 free_bytes += (block_group->key.offset -
8002                                btrfs_block_group_used(&block_group->item)) *
8003                                factor;
8004
8005                 spin_unlock(&block_group->lock);
8006         }
8007
8008         return free_bytes;
8009 }
8010
8011 /*
8012  * helper to account the unused space of all the readonly block group in the
8013  * space_info. takes mirrors into account.
8014  */
8015 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8016 {
8017         int i;
8018         u64 free_bytes = 0;
8019
8020         spin_lock(&sinfo->lock);
8021
8022         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8023                 if (!list_empty(&sinfo->block_groups[i]))
8024                         free_bytes += __btrfs_get_ro_block_group_free_space(
8025                                                 &sinfo->block_groups[i]);
8026
8027         spin_unlock(&sinfo->lock);
8028
8029         return free_bytes;
8030 }
8031
8032 void btrfs_set_block_group_rw(struct btrfs_root *root,
8033                               struct btrfs_block_group_cache *cache)
8034 {
8035         struct btrfs_space_info *sinfo = cache->space_info;
8036         u64 num_bytes;
8037
8038         BUG_ON(!cache->ro);
8039
8040         spin_lock(&sinfo->lock);
8041         spin_lock(&cache->lock);
8042         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8043                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8044         sinfo->bytes_readonly -= num_bytes;
8045         cache->ro = 0;
8046         spin_unlock(&cache->lock);
8047         spin_unlock(&sinfo->lock);
8048 }
8049
8050 /*
8051  * checks to see if its even possible to relocate this block group.
8052  *
8053  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8054  * ok to go ahead and try.
8055  */
8056 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8057 {
8058         struct btrfs_block_group_cache *block_group;
8059         struct btrfs_space_info *space_info;
8060         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8061         struct btrfs_device *device;
8062         struct btrfs_trans_handle *trans;
8063         u64 min_free;
8064         u64 dev_min = 1;
8065         u64 dev_nr = 0;
8066         u64 target;
8067         int index;
8068         int full = 0;
8069         int ret = 0;
8070
8071         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8072
8073         /* odd, couldn't find the block group, leave it alone */
8074         if (!block_group)
8075                 return -1;
8076
8077         min_free = btrfs_block_group_used(&block_group->item);
8078
8079         /* no bytes used, we're good */
8080         if (!min_free)
8081                 goto out;
8082
8083         space_info = block_group->space_info;
8084         spin_lock(&space_info->lock);
8085
8086         full = space_info->full;
8087
8088         /*
8089          * if this is the last block group we have in this space, we can't
8090          * relocate it unless we're able to allocate a new chunk below.
8091          *
8092          * Otherwise, we need to make sure we have room in the space to handle
8093          * all of the extents from this block group.  If we can, we're good
8094          */
8095         if ((space_info->total_bytes != block_group->key.offset) &&
8096             (space_info->bytes_used + space_info->bytes_reserved +
8097              space_info->bytes_pinned + space_info->bytes_readonly +
8098              min_free < space_info->total_bytes)) {
8099                 spin_unlock(&space_info->lock);
8100                 goto out;
8101         }
8102         spin_unlock(&space_info->lock);
8103
8104         /*
8105          * ok we don't have enough space, but maybe we have free space on our
8106          * devices to allocate new chunks for relocation, so loop through our
8107          * alloc devices and guess if we have enough space.  if this block
8108          * group is going to be restriped, run checks against the target
8109          * profile instead of the current one.
8110          */
8111         ret = -1;
8112
8113         /*
8114          * index:
8115          *      0: raid10
8116          *      1: raid1
8117          *      2: dup
8118          *      3: raid0
8119          *      4: single
8120          */
8121         target = get_restripe_target(root->fs_info, block_group->flags);
8122         if (target) {
8123                 index = __get_raid_index(extended_to_chunk(target));
8124         } else {
8125                 /*
8126                  * this is just a balance, so if we were marked as full
8127                  * we know there is no space for a new chunk
8128                  */
8129                 if (full)
8130                         goto out;
8131
8132                 index = get_block_group_index(block_group);
8133         }
8134
8135         if (index == BTRFS_RAID_RAID10) {
8136                 dev_min = 4;
8137                 /* Divide by 2 */
8138                 min_free >>= 1;
8139         } else if (index == BTRFS_RAID_RAID1) {
8140                 dev_min = 2;
8141         } else if (index == BTRFS_RAID_DUP) {
8142                 /* Multiply by 2 */
8143                 min_free <<= 1;
8144         } else if (index == BTRFS_RAID_RAID0) {
8145                 dev_min = fs_devices->rw_devices;
8146                 do_div(min_free, dev_min);
8147         }
8148
8149         /* We need to do this so that we can look at pending chunks */
8150         trans = btrfs_join_transaction(root);
8151         if (IS_ERR(trans)) {
8152                 ret = PTR_ERR(trans);
8153                 goto out;
8154         }
8155
8156         mutex_lock(&root->fs_info->chunk_mutex);
8157         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8158                 u64 dev_offset;
8159
8160                 /*
8161                  * check to make sure we can actually find a chunk with enough
8162                  * space to fit our block group in.
8163                  */
8164                 if (device->total_bytes > device->bytes_used + min_free &&
8165                     !device->is_tgtdev_for_dev_replace) {
8166                         ret = find_free_dev_extent(trans, device, min_free,
8167                                                    &dev_offset, NULL);
8168                         if (!ret)
8169                                 dev_nr++;
8170
8171                         if (dev_nr >= dev_min)
8172                                 break;
8173
8174                         ret = -1;
8175                 }
8176         }
8177         mutex_unlock(&root->fs_info->chunk_mutex);
8178         btrfs_end_transaction(trans, root);
8179 out:
8180         btrfs_put_block_group(block_group);
8181         return ret;
8182 }
8183
8184 static int find_first_block_group(struct btrfs_root *root,
8185                 struct btrfs_path *path, struct btrfs_key *key)
8186 {
8187         int ret = 0;
8188         struct btrfs_key found_key;
8189         struct extent_buffer *leaf;
8190         int slot;
8191
8192         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8193         if (ret < 0)
8194                 goto out;
8195
8196         while (1) {
8197                 slot = path->slots[0];
8198                 leaf = path->nodes[0];
8199                 if (slot >= btrfs_header_nritems(leaf)) {
8200                         ret = btrfs_next_leaf(root, path);
8201                         if (ret == 0)
8202                                 continue;
8203                         if (ret < 0)
8204                                 goto out;
8205                         break;
8206                 }
8207                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8208
8209                 if (found_key.objectid >= key->objectid &&
8210                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8211                         ret = 0;
8212                         goto out;
8213                 }
8214                 path->slots[0]++;
8215         }
8216 out:
8217         return ret;
8218 }
8219
8220 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8221 {
8222         struct btrfs_block_group_cache *block_group;
8223         u64 last = 0;
8224
8225         while (1) {
8226                 struct inode *inode;
8227
8228                 block_group = btrfs_lookup_first_block_group(info, last);
8229                 while (block_group) {
8230                         spin_lock(&block_group->lock);
8231                         if (block_group->iref)
8232                                 break;
8233                         spin_unlock(&block_group->lock);
8234                         block_group = next_block_group(info->tree_root,
8235                                                        block_group);
8236                 }
8237                 if (!block_group) {
8238                         if (last == 0)
8239                                 break;
8240                         last = 0;
8241                         continue;
8242                 }
8243
8244                 inode = block_group->inode;
8245                 block_group->iref = 0;
8246                 block_group->inode = NULL;
8247                 spin_unlock(&block_group->lock);
8248                 iput(inode);
8249                 last = block_group->key.objectid + block_group->key.offset;
8250                 btrfs_put_block_group(block_group);
8251         }
8252 }
8253
8254 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8255 {
8256         struct btrfs_block_group_cache *block_group;
8257         struct btrfs_space_info *space_info;
8258         struct btrfs_caching_control *caching_ctl;
8259         struct rb_node *n;
8260
8261         down_write(&info->commit_root_sem);
8262         while (!list_empty(&info->caching_block_groups)) {
8263                 caching_ctl = list_entry(info->caching_block_groups.next,
8264                                          struct btrfs_caching_control, list);
8265                 list_del(&caching_ctl->list);
8266                 put_caching_control(caching_ctl);
8267         }
8268         up_write(&info->commit_root_sem);
8269
8270         spin_lock(&info->block_group_cache_lock);
8271         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8272                 block_group = rb_entry(n, struct btrfs_block_group_cache,
8273                                        cache_node);
8274                 rb_erase(&block_group->cache_node,
8275                          &info->block_group_cache_tree);
8276                 spin_unlock(&info->block_group_cache_lock);
8277
8278                 down_write(&block_group->space_info->groups_sem);
8279                 list_del(&block_group->list);
8280                 up_write(&block_group->space_info->groups_sem);
8281
8282                 if (block_group->cached == BTRFS_CACHE_STARTED)
8283                         wait_block_group_cache_done(block_group);
8284
8285                 /*
8286                  * We haven't cached this block group, which means we could
8287                  * possibly have excluded extents on this block group.
8288                  */
8289                 if (block_group->cached == BTRFS_CACHE_NO ||
8290                     block_group->cached == BTRFS_CACHE_ERROR)
8291                         free_excluded_extents(info->extent_root, block_group);
8292
8293                 btrfs_remove_free_space_cache(block_group);
8294                 btrfs_put_block_group(block_group);
8295
8296                 spin_lock(&info->block_group_cache_lock);
8297         }
8298         spin_unlock(&info->block_group_cache_lock);
8299
8300         /* now that all the block groups are freed, go through and
8301          * free all the space_info structs.  This is only called during
8302          * the final stages of unmount, and so we know nobody is
8303          * using them.  We call synchronize_rcu() once before we start,
8304          * just to be on the safe side.
8305          */
8306         synchronize_rcu();
8307
8308         release_global_block_rsv(info);
8309
8310         while (!list_empty(&info->space_info)) {
8311                 int i;
8312
8313                 space_info = list_entry(info->space_info.next,
8314                                         struct btrfs_space_info,
8315                                         list);
8316                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8317                         if (WARN_ON(space_info->bytes_pinned > 0 ||
8318                             space_info->bytes_reserved > 0 ||
8319                             space_info->bytes_may_use > 0)) {
8320                                 dump_space_info(space_info, 0, 0);
8321                         }
8322                 }
8323                 list_del(&space_info->list);
8324                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
8325                         struct kobject *kobj;
8326                         kobj = &space_info->block_group_kobjs[i];
8327                         if (kobj->parent) {
8328                                 kobject_del(kobj);
8329                                 kobject_put(kobj);
8330                         }
8331                 }
8332                 kobject_del(&space_info->kobj);
8333                 kobject_put(&space_info->kobj);
8334         }
8335         return 0;
8336 }
8337
8338 static void __link_block_group(struct btrfs_space_info *space_info,
8339                                struct btrfs_block_group_cache *cache)
8340 {
8341         int index = get_block_group_index(cache);
8342         bool first = false;
8343
8344         down_write(&space_info->groups_sem);
8345         if (list_empty(&space_info->block_groups[index]))
8346                 first = true;
8347         list_add_tail(&cache->list, &space_info->block_groups[index]);
8348         up_write(&space_info->groups_sem);
8349
8350         if (first) {
8351                 struct kobject *kobj = &space_info->block_group_kobjs[index];
8352                 int ret;
8353
8354                 kobject_get(&space_info->kobj); /* put in release */
8355                 ret = kobject_add(kobj, &space_info->kobj, "%s",
8356                                   get_raid_name(index));
8357                 if (ret) {
8358                         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
8359                         kobject_put(&space_info->kobj);
8360                 }
8361         }
8362 }
8363
8364 static struct btrfs_block_group_cache *
8365 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
8366 {
8367         struct btrfs_block_group_cache *cache;
8368
8369         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8370         if (!cache)
8371                 return NULL;
8372
8373         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8374                                         GFP_NOFS);
8375         if (!cache->free_space_ctl) {
8376                 kfree(cache);
8377                 return NULL;
8378         }
8379
8380         cache->key.objectid = start;
8381         cache->key.offset = size;
8382         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8383
8384         cache->sectorsize = root->sectorsize;
8385         cache->fs_info = root->fs_info;
8386         cache->full_stripe_len = btrfs_full_stripe_len(root,
8387                                                &root->fs_info->mapping_tree,
8388                                                start);
8389         atomic_set(&cache->count, 1);
8390         spin_lock_init(&cache->lock);
8391         INIT_LIST_HEAD(&cache->list);
8392         INIT_LIST_HEAD(&cache->cluster_list);
8393         INIT_LIST_HEAD(&cache->new_bg_list);
8394         btrfs_init_free_space_ctl(cache);
8395
8396         return cache;
8397 }
8398
8399 int btrfs_read_block_groups(struct btrfs_root *root)
8400 {
8401         struct btrfs_path *path;
8402         int ret;
8403         struct btrfs_block_group_cache *cache;
8404         struct btrfs_fs_info *info = root->fs_info;
8405         struct btrfs_space_info *space_info;
8406         struct btrfs_key key;
8407         struct btrfs_key found_key;
8408         struct extent_buffer *leaf;
8409         int need_clear = 0;
8410         u64 cache_gen;
8411
8412         root = info->extent_root;
8413         key.objectid = 0;
8414         key.offset = 0;
8415         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8416         path = btrfs_alloc_path();
8417         if (!path)
8418                 return -ENOMEM;
8419         path->reada = 1;
8420
8421         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8422         if (btrfs_test_opt(root, SPACE_CACHE) &&
8423             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8424                 need_clear = 1;
8425         if (btrfs_test_opt(root, CLEAR_CACHE))
8426                 need_clear = 1;
8427
8428         while (1) {
8429                 ret = find_first_block_group(root, path, &key);
8430                 if (ret > 0)
8431                         break;
8432                 if (ret != 0)
8433                         goto error;
8434
8435                 leaf = path->nodes[0];
8436                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8437
8438                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
8439                                                        found_key.offset);
8440                 if (!cache) {
8441                         ret = -ENOMEM;
8442                         goto error;
8443                 }
8444
8445                 if (need_clear) {
8446                         /*
8447                          * When we mount with old space cache, we need to
8448                          * set BTRFS_DC_CLEAR and set dirty flag.
8449                          *
8450                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
8451                          *    truncate the old free space cache inode and
8452                          *    setup a new one.
8453                          * b) Setting 'dirty flag' makes sure that we flush
8454                          *    the new space cache info onto disk.
8455                          */
8456                         cache->disk_cache_state = BTRFS_DC_CLEAR;
8457                         if (btrfs_test_opt(root, SPACE_CACHE))
8458                                 cache->dirty = 1;
8459                 }
8460
8461                 read_extent_buffer(leaf, &cache->item,
8462                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
8463                                    sizeof(cache->item));
8464                 cache->flags = btrfs_block_group_flags(&cache->item);
8465
8466                 key.objectid = found_key.objectid + found_key.offset;
8467                 btrfs_release_path(path);
8468
8469                 /*
8470                  * We need to exclude the super stripes now so that the space
8471                  * info has super bytes accounted for, otherwise we'll think
8472                  * we have more space than we actually do.
8473                  */
8474                 ret = exclude_super_stripes(root, cache);
8475                 if (ret) {
8476                         /*
8477                          * We may have excluded something, so call this just in
8478                          * case.
8479                          */
8480                         free_excluded_extents(root, cache);
8481                         btrfs_put_block_group(cache);
8482                         goto error;
8483                 }
8484
8485                 /*
8486                  * check for two cases, either we are full, and therefore
8487                  * don't need to bother with the caching work since we won't
8488                  * find any space, or we are empty, and we can just add all
8489                  * the space in and be done with it.  This saves us _alot_ of
8490                  * time, particularly in the full case.
8491                  */
8492                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8493                         cache->last_byte_to_unpin = (u64)-1;
8494                         cache->cached = BTRFS_CACHE_FINISHED;
8495                         free_excluded_extents(root, cache);
8496                 } else if (btrfs_block_group_used(&cache->item) == 0) {
8497                         cache->last_byte_to_unpin = (u64)-1;
8498                         cache->cached = BTRFS_CACHE_FINISHED;
8499                         add_new_free_space(cache, root->fs_info,
8500                                            found_key.objectid,
8501                                            found_key.objectid +
8502                                            found_key.offset);
8503                         free_excluded_extents(root, cache);
8504                 }
8505
8506                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8507                 if (ret) {
8508                         btrfs_remove_free_space_cache(cache);
8509                         btrfs_put_block_group(cache);
8510                         goto error;
8511                 }
8512
8513                 ret = update_space_info(info, cache->flags, found_key.offset,
8514                                         btrfs_block_group_used(&cache->item),
8515                                         &space_info);
8516                 if (ret) {
8517                         btrfs_remove_free_space_cache(cache);
8518                         spin_lock(&info->block_group_cache_lock);
8519                         rb_erase(&cache->cache_node,
8520                                  &info->block_group_cache_tree);
8521                         spin_unlock(&info->block_group_cache_lock);
8522                         btrfs_put_block_group(cache);
8523                         goto error;
8524                 }
8525
8526                 cache->space_info = space_info;
8527                 spin_lock(&cache->space_info->lock);
8528                 cache->space_info->bytes_readonly += cache->bytes_super;
8529                 spin_unlock(&cache->space_info->lock);
8530
8531                 __link_block_group(space_info, cache);
8532
8533                 set_avail_alloc_bits(root->fs_info, cache->flags);
8534                 if (btrfs_chunk_readonly(root, cache->key.objectid))
8535                         set_block_group_ro(cache, 1);
8536         }
8537
8538         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8539                 if (!(get_alloc_profile(root, space_info->flags) &
8540                       (BTRFS_BLOCK_GROUP_RAID10 |
8541                        BTRFS_BLOCK_GROUP_RAID1 |
8542                        BTRFS_BLOCK_GROUP_RAID5 |
8543                        BTRFS_BLOCK_GROUP_RAID6 |
8544                        BTRFS_BLOCK_GROUP_DUP)))
8545                         continue;
8546                 /*
8547                  * avoid allocating from un-mirrored block group if there are
8548                  * mirrored block groups.
8549                  */
8550                 list_for_each_entry(cache,
8551                                 &space_info->block_groups[BTRFS_RAID_RAID0],
8552                                 list)
8553                         set_block_group_ro(cache, 1);
8554                 list_for_each_entry(cache,
8555                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
8556                                 list)
8557                         set_block_group_ro(cache, 1);
8558         }
8559
8560         init_global_block_rsv(info);
8561         ret = 0;
8562 error:
8563         btrfs_free_path(path);
8564         return ret;
8565 }
8566
8567 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8568                                        struct btrfs_root *root)
8569 {
8570         struct btrfs_block_group_cache *block_group, *tmp;
8571         struct btrfs_root *extent_root = root->fs_info->extent_root;
8572         struct btrfs_block_group_item item;
8573         struct btrfs_key key;
8574         int ret = 0;
8575
8576         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8577                                  new_bg_list) {
8578                 list_del_init(&block_group->new_bg_list);
8579
8580                 if (ret)
8581                         continue;
8582
8583                 spin_lock(&block_group->lock);
8584                 memcpy(&item, &block_group->item, sizeof(item));
8585                 memcpy(&key, &block_group->key, sizeof(key));
8586                 spin_unlock(&block_group->lock);
8587
8588                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8589                                         sizeof(item));
8590                 if (ret)
8591                         btrfs_abort_transaction(trans, extent_root, ret);
8592                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
8593                                                key.objectid, key.offset);
8594                 if (ret)
8595                         btrfs_abort_transaction(trans, extent_root, ret);
8596         }
8597 }
8598
8599 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8600                            struct btrfs_root *root, u64 bytes_used,
8601                            u64 type, u64 chunk_objectid, u64 chunk_offset,
8602                            u64 size)
8603 {
8604         int ret;
8605         struct btrfs_root *extent_root;
8606         struct btrfs_block_group_cache *cache;
8607
8608         extent_root = root->fs_info->extent_root;
8609
8610         root->fs_info->last_trans_log_full_commit = trans->transid;
8611
8612         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
8613         if (!cache)
8614                 return -ENOMEM;
8615
8616         btrfs_set_block_group_used(&cache->item, bytes_used);
8617         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8618         btrfs_set_block_group_flags(&cache->item, type);
8619
8620         cache->flags = type;
8621         cache->last_byte_to_unpin = (u64)-1;
8622         cache->cached = BTRFS_CACHE_FINISHED;
8623         ret = exclude_super_stripes(root, cache);
8624         if (ret) {
8625                 /*
8626                  * We may have excluded something, so call this just in
8627                  * case.
8628                  */
8629                 free_excluded_extents(root, cache);
8630                 btrfs_put_block_group(cache);
8631                 return ret;
8632         }
8633
8634         add_new_free_space(cache, root->fs_info, chunk_offset,
8635                            chunk_offset + size);
8636
8637         free_excluded_extents(root, cache);
8638
8639         ret = btrfs_add_block_group_cache(root->fs_info, cache);
8640         if (ret) {
8641                 btrfs_remove_free_space_cache(cache);
8642                 btrfs_put_block_group(cache);
8643                 return ret;
8644         }
8645
8646         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8647                                 &cache->space_info);
8648         if (ret) {
8649                 btrfs_remove_free_space_cache(cache);
8650                 spin_lock(&root->fs_info->block_group_cache_lock);
8651                 rb_erase(&cache->cache_node,
8652                          &root->fs_info->block_group_cache_tree);
8653                 spin_unlock(&root->fs_info->block_group_cache_lock);
8654                 btrfs_put_block_group(cache);
8655                 return ret;
8656         }
8657         update_global_block_rsv(root->fs_info);
8658
8659         spin_lock(&cache->space_info->lock);
8660         cache->space_info->bytes_readonly += cache->bytes_super;
8661         spin_unlock(&cache->space_info->lock);
8662
8663         __link_block_group(cache->space_info, cache);
8664
8665         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8666
8667         set_avail_alloc_bits(extent_root->fs_info, type);
8668
8669         return 0;
8670 }
8671
8672 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8673 {
8674         u64 extra_flags = chunk_to_extended(flags) &
8675                                 BTRFS_EXTENDED_PROFILE_MASK;
8676
8677         write_seqlock(&fs_info->profiles_lock);
8678         if (flags & BTRFS_BLOCK_GROUP_DATA)
8679                 fs_info->avail_data_alloc_bits &= ~extra_flags;
8680         if (flags & BTRFS_BLOCK_GROUP_METADATA)
8681                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8682         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8683                 fs_info->avail_system_alloc_bits &= ~extra_flags;
8684         write_sequnlock(&fs_info->profiles_lock);
8685 }
8686
8687 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8688                              struct btrfs_root *root, u64 group_start)
8689 {
8690         struct btrfs_path *path;
8691         struct btrfs_block_group_cache *block_group;
8692         struct btrfs_free_cluster *cluster;
8693         struct btrfs_root *tree_root = root->fs_info->tree_root;
8694         struct btrfs_key key;
8695         struct inode *inode;
8696         int ret;
8697         int index;
8698         int factor;
8699
8700         root = root->fs_info->extent_root;
8701
8702         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8703         BUG_ON(!block_group);
8704         BUG_ON(!block_group->ro);
8705
8706         /*
8707          * Free the reserved super bytes from this block group before
8708          * remove it.
8709          */
8710         free_excluded_extents(root, block_group);
8711
8712         memcpy(&key, &block_group->key, sizeof(key));
8713         index = get_block_group_index(block_group);
8714         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8715                                   BTRFS_BLOCK_GROUP_RAID1 |
8716                                   BTRFS_BLOCK_GROUP_RAID10))
8717                 factor = 2;
8718         else
8719                 factor = 1;
8720
8721         /* make sure this block group isn't part of an allocation cluster */
8722         cluster = &root->fs_info->data_alloc_cluster;
8723         spin_lock(&cluster->refill_lock);
8724         btrfs_return_cluster_to_free_space(block_group, cluster);
8725         spin_unlock(&cluster->refill_lock);
8726
8727         /*
8728          * make sure this block group isn't part of a metadata
8729          * allocation cluster
8730          */
8731         cluster = &root->fs_info->meta_alloc_cluster;
8732         spin_lock(&cluster->refill_lock);
8733         btrfs_return_cluster_to_free_space(block_group, cluster);
8734         spin_unlock(&cluster->refill_lock);
8735
8736         path = btrfs_alloc_path();
8737         if (!path) {
8738                 ret = -ENOMEM;
8739                 goto out;
8740         }
8741
8742         inode = lookup_free_space_inode(tree_root, block_group, path);
8743         if (!IS_ERR(inode)) {
8744                 ret = btrfs_orphan_add(trans, inode);
8745                 if (ret) {
8746                         btrfs_add_delayed_iput(inode);
8747                         goto out;
8748                 }
8749                 clear_nlink(inode);
8750                 /* One for the block groups ref */
8751                 spin_lock(&block_group->lock);
8752                 if (block_group->iref) {
8753                         block_group->iref = 0;
8754                         block_group->inode = NULL;
8755                         spin_unlock(&block_group->lock);
8756                         iput(inode);
8757                 } else {
8758                         spin_unlock(&block_group->lock);
8759                 }
8760                 /* One for our lookup ref */
8761                 btrfs_add_delayed_iput(inode);
8762         }
8763
8764         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8765         key.offset = block_group->key.objectid;
8766         key.type = 0;
8767
8768         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8769         if (ret < 0)
8770                 goto out;
8771         if (ret > 0)
8772                 btrfs_release_path(path);
8773         if (ret == 0) {
8774                 ret = btrfs_del_item(trans, tree_root, path);
8775                 if (ret)
8776                         goto out;
8777                 btrfs_release_path(path);
8778         }
8779
8780         spin_lock(&root->fs_info->block_group_cache_lock);
8781         rb_erase(&block_group->cache_node,
8782                  &root->fs_info->block_group_cache_tree);
8783
8784         if (root->fs_info->first_logical_byte == block_group->key.objectid)
8785                 root->fs_info->first_logical_byte = (u64)-1;
8786         spin_unlock(&root->fs_info->block_group_cache_lock);
8787
8788         down_write(&block_group->space_info->groups_sem);
8789         /*
8790          * we must use list_del_init so people can check to see if they
8791          * are still on the list after taking the semaphore
8792          */
8793         list_del_init(&block_group->list);
8794         if (list_empty(&block_group->space_info->block_groups[index])) {
8795                 kobject_del(&block_group->space_info->block_group_kobjs[index]);
8796                 kobject_put(&block_group->space_info->block_group_kobjs[index]);
8797                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8798         }
8799         up_write(&block_group->space_info->groups_sem);
8800
8801         if (block_group->cached == BTRFS_CACHE_STARTED)
8802                 wait_block_group_cache_done(block_group);
8803
8804         btrfs_remove_free_space_cache(block_group);
8805
8806         spin_lock(&block_group->space_info->lock);
8807         block_group->space_info->total_bytes -= block_group->key.offset;
8808         block_group->space_info->bytes_readonly -= block_group->key.offset;
8809         block_group->space_info->disk_total -= block_group->key.offset * factor;
8810         spin_unlock(&block_group->space_info->lock);
8811
8812         memcpy(&key, &block_group->key, sizeof(key));
8813
8814         btrfs_clear_space_info_full(root->fs_info);
8815
8816         btrfs_put_block_group(block_group);
8817         btrfs_put_block_group(block_group);
8818
8819         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8820         if (ret > 0)
8821                 ret = -EIO;
8822         if (ret < 0)
8823                 goto out;
8824
8825         ret = btrfs_del_item(trans, root, path);
8826 out:
8827         btrfs_free_path(path);
8828         return ret;
8829 }
8830
8831 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8832 {
8833         struct btrfs_space_info *space_info;
8834         struct btrfs_super_block *disk_super;
8835         u64 features;
8836         u64 flags;
8837         int mixed = 0;
8838         int ret;
8839
8840         disk_super = fs_info->super_copy;
8841         if (!btrfs_super_root(disk_super))
8842                 return 1;
8843
8844         features = btrfs_super_incompat_flags(disk_super);
8845         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8846                 mixed = 1;
8847
8848         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8849         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8850         if (ret)
8851                 goto out;
8852
8853         if (mixed) {
8854                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8855                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8856         } else {
8857                 flags = BTRFS_BLOCK_GROUP_METADATA;
8858                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8859                 if (ret)
8860                         goto out;
8861
8862                 flags = BTRFS_BLOCK_GROUP_DATA;
8863                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8864         }
8865 out:
8866         return ret;
8867 }
8868
8869 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8870 {
8871         return unpin_extent_range(root, start, end);
8872 }
8873
8874 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8875                                u64 num_bytes, u64 *actual_bytes)
8876 {
8877         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8878 }
8879
8880 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8881 {
8882         struct btrfs_fs_info *fs_info = root->fs_info;
8883         struct btrfs_block_group_cache *cache = NULL;
8884         u64 group_trimmed;
8885         u64 start;
8886         u64 end;
8887         u64 trimmed = 0;
8888         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8889         int ret = 0;
8890
8891         /*
8892          * try to trim all FS space, our block group may start from non-zero.
8893          */
8894         if (range->len == total_bytes)
8895                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8896         else
8897                 cache = btrfs_lookup_block_group(fs_info, range->start);
8898
8899         while (cache) {
8900                 if (cache->key.objectid >= (range->start + range->len)) {
8901                         btrfs_put_block_group(cache);
8902                         break;
8903                 }
8904
8905                 start = max(range->start, cache->key.objectid);
8906                 end = min(range->start + range->len,
8907                                 cache->key.objectid + cache->key.offset);
8908
8909                 if (end - start >= range->minlen) {
8910                         if (!block_group_cache_done(cache)) {
8911                                 ret = cache_block_group(cache, 0);
8912                                 if (ret) {
8913                                         btrfs_put_block_group(cache);
8914                                         break;
8915                                 }
8916                                 ret = wait_block_group_cache_done(cache);
8917                                 if (ret) {
8918                                         btrfs_put_block_group(cache);
8919                                         break;
8920                                 }
8921                         }
8922                         ret = btrfs_trim_block_group(cache,
8923                                                      &group_trimmed,
8924                                                      start,
8925                                                      end,
8926                                                      range->minlen);
8927
8928                         trimmed += group_trimmed;
8929                         if (ret) {
8930                                 btrfs_put_block_group(cache);
8931                                 break;
8932                         }
8933                 }
8934
8935                 cache = next_block_group(fs_info->tree_root, cache);
8936         }
8937
8938         range->len = trimmed;
8939         return ret;
8940 }
8941
8942 /*
8943  * btrfs_{start,end}_write() is similar to mnt_{want, drop}_write(),
8944  * they are used to prevent the some tasks writing data into the page cache
8945  * by nocow before the subvolume is snapshoted, but flush the data into
8946  * the disk after the snapshot creation.
8947  */
8948 void btrfs_end_nocow_write(struct btrfs_root *root)
8949 {
8950         percpu_counter_dec(&root->subv_writers->counter);
8951         /*
8952          * Make sure counter is updated before we wake up
8953          * waiters.
8954          */
8955         smp_mb();
8956         if (waitqueue_active(&root->subv_writers->wait))
8957                 wake_up(&root->subv_writers->wait);
8958 }
8959
8960 int btrfs_start_nocow_write(struct btrfs_root *root)
8961 {
8962         if (unlikely(atomic_read(&root->will_be_snapshoted)))
8963                 return 0;
8964
8965         percpu_counter_inc(&root->subv_writers->counter);
8966         /*
8967          * Make sure counter is updated before we check for snapshot creation.
8968          */
8969         smp_mb();
8970         if (unlikely(atomic_read(&root->will_be_snapshoted))) {
8971                 btrfs_end_nocow_write(root);
8972                 return 0;
8973         }
8974         return 1;
8975 }