x86: LLVMLinux: Fix "incomplete type const struct x86cpu_device_id"
[linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "raid56.h"
35 #include "locking.h"
36 #include "free-space-cache.h"
37 #include "math.h"
38 #include "sysfs.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_root *root,
78                               u64 bytenr, u64 num_bytes, int alloc);
79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80                                 struct btrfs_root *root,
81                                 u64 bytenr, u64 num_bytes, u64 parent,
82                                 u64 root_objectid, u64 owner_objectid,
83                                 u64 owner_offset, int refs_to_drop,
84                                 struct btrfs_delayed_extent_op *extra_op);
85 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
86                                     struct extent_buffer *leaf,
87                                     struct btrfs_extent_item *ei);
88 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
89                                       struct btrfs_root *root,
90                                       u64 parent, u64 root_objectid,
91                                       u64 flags, u64 owner, u64 offset,
92                                       struct btrfs_key *ins, int ref_mod);
93 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
94                                      struct btrfs_root *root,
95                                      u64 parent, u64 root_objectid,
96                                      u64 flags, struct btrfs_disk_key *key,
97                                      int level, struct btrfs_key *ins);
98 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
99                           struct btrfs_root *extent_root, u64 flags,
100                           int force);
101 static int find_next_key(struct btrfs_path *path, int level,
102                          struct btrfs_key *key);
103 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
104                             int dump_block_groups);
105 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
106                                        u64 num_bytes, int reserve);
107 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
108                                u64 num_bytes);
109 int btrfs_pin_extent(struct btrfs_root *root,
110                      u64 bytenr, u64 num_bytes, int reserved);
111
112 static noinline int
113 block_group_cache_done(struct btrfs_block_group_cache *cache)
114 {
115         smp_mb();
116         return cache->cached == BTRFS_CACHE_FINISHED ||
117                 cache->cached == BTRFS_CACHE_ERROR;
118 }
119
120 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
121 {
122         return (cache->flags & bits) == bits;
123 }
124
125 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
126 {
127         atomic_inc(&cache->count);
128 }
129
130 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
131 {
132         if (atomic_dec_and_test(&cache->count)) {
133                 WARN_ON(cache->pinned > 0);
134                 WARN_ON(cache->reserved > 0);
135                 kfree(cache->free_space_ctl);
136                 kfree(cache);
137         }
138 }
139
140 /*
141  * this adds the block group to the fs_info rb tree for the block group
142  * cache
143  */
144 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
145                                 struct btrfs_block_group_cache *block_group)
146 {
147         struct rb_node **p;
148         struct rb_node *parent = NULL;
149         struct btrfs_block_group_cache *cache;
150
151         spin_lock(&info->block_group_cache_lock);
152         p = &info->block_group_cache_tree.rb_node;
153
154         while (*p) {
155                 parent = *p;
156                 cache = rb_entry(parent, struct btrfs_block_group_cache,
157                                  cache_node);
158                 if (block_group->key.objectid < cache->key.objectid) {
159                         p = &(*p)->rb_left;
160                 } else if (block_group->key.objectid > cache->key.objectid) {
161                         p = &(*p)->rb_right;
162                 } else {
163                         spin_unlock(&info->block_group_cache_lock);
164                         return -EEXIST;
165                 }
166         }
167
168         rb_link_node(&block_group->cache_node, parent, p);
169         rb_insert_color(&block_group->cache_node,
170                         &info->block_group_cache_tree);
171
172         if (info->first_logical_byte > block_group->key.objectid)
173                 info->first_logical_byte = block_group->key.objectid;
174
175         spin_unlock(&info->block_group_cache_lock);
176
177         return 0;
178 }
179
180 /*
181  * This will return the block group at or after bytenr if contains is 0, else
182  * it will return the block group that contains the bytenr
183  */
184 static struct btrfs_block_group_cache *
185 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
186                               int contains)
187 {
188         struct btrfs_block_group_cache *cache, *ret = NULL;
189         struct rb_node *n;
190         u64 end, start;
191
192         spin_lock(&info->block_group_cache_lock);
193         n = info->block_group_cache_tree.rb_node;
194
195         while (n) {
196                 cache = rb_entry(n, struct btrfs_block_group_cache,
197                                  cache_node);
198                 end = cache->key.objectid + cache->key.offset - 1;
199                 start = cache->key.objectid;
200
201                 if (bytenr < start) {
202                         if (!contains && (!ret || start < ret->key.objectid))
203                                 ret = cache;
204                         n = n->rb_left;
205                 } else if (bytenr > start) {
206                         if (contains && bytenr <= end) {
207                                 ret = cache;
208                                 break;
209                         }
210                         n = n->rb_right;
211                 } else {
212                         ret = cache;
213                         break;
214                 }
215         }
216         if (ret) {
217                 btrfs_get_block_group(ret);
218                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
219                         info->first_logical_byte = ret->key.objectid;
220         }
221         spin_unlock(&info->block_group_cache_lock);
222
223         return ret;
224 }
225
226 static int add_excluded_extent(struct btrfs_root *root,
227                                u64 start, u64 num_bytes)
228 {
229         u64 end = start + num_bytes - 1;
230         set_extent_bits(&root->fs_info->freed_extents[0],
231                         start, end, EXTENT_UPTODATE, GFP_NOFS);
232         set_extent_bits(&root->fs_info->freed_extents[1],
233                         start, end, EXTENT_UPTODATE, GFP_NOFS);
234         return 0;
235 }
236
237 static void free_excluded_extents(struct btrfs_root *root,
238                                   struct btrfs_block_group_cache *cache)
239 {
240         u64 start, end;
241
242         start = cache->key.objectid;
243         end = start + cache->key.offset - 1;
244
245         clear_extent_bits(&root->fs_info->freed_extents[0],
246                           start, end, EXTENT_UPTODATE, GFP_NOFS);
247         clear_extent_bits(&root->fs_info->freed_extents[1],
248                           start, end, EXTENT_UPTODATE, GFP_NOFS);
249 }
250
251 static int exclude_super_stripes(struct btrfs_root *root,
252                                  struct btrfs_block_group_cache *cache)
253 {
254         u64 bytenr;
255         u64 *logical;
256         int stripe_len;
257         int i, nr, ret;
258
259         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
260                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
261                 cache->bytes_super += stripe_len;
262                 ret = add_excluded_extent(root, cache->key.objectid,
263                                           stripe_len);
264                 if (ret)
265                         return ret;
266         }
267
268         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
269                 bytenr = btrfs_sb_offset(i);
270                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
271                                        cache->key.objectid, bytenr,
272                                        0, &logical, &nr, &stripe_len);
273                 if (ret)
274                         return ret;
275
276                 while (nr--) {
277                         u64 start, len;
278
279                         if (logical[nr] > cache->key.objectid +
280                             cache->key.offset)
281                                 continue;
282
283                         if (logical[nr] + stripe_len <= cache->key.objectid)
284                                 continue;
285
286                         start = logical[nr];
287                         if (start < cache->key.objectid) {
288                                 start = cache->key.objectid;
289                                 len = (logical[nr] + stripe_len) - start;
290                         } else {
291                                 len = min_t(u64, stripe_len,
292                                             cache->key.objectid +
293                                             cache->key.offset - start);
294                         }
295
296                         cache->bytes_super += len;
297                         ret = add_excluded_extent(root, start, len);
298                         if (ret) {
299                                 kfree(logical);
300                                 return ret;
301                         }
302                 }
303
304                 kfree(logical);
305         }
306         return 0;
307 }
308
309 static struct btrfs_caching_control *
310 get_caching_control(struct btrfs_block_group_cache *cache)
311 {
312         struct btrfs_caching_control *ctl;
313
314         spin_lock(&cache->lock);
315         if (cache->cached != BTRFS_CACHE_STARTED) {
316                 spin_unlock(&cache->lock);
317                 return NULL;
318         }
319
320         /* We're loading it the fast way, so we don't have a caching_ctl. */
321         if (!cache->caching_ctl) {
322                 spin_unlock(&cache->lock);
323                 return NULL;
324         }
325
326         ctl = cache->caching_ctl;
327         atomic_inc(&ctl->count);
328         spin_unlock(&cache->lock);
329         return ctl;
330 }
331
332 static void put_caching_control(struct btrfs_caching_control *ctl)
333 {
334         if (atomic_dec_and_test(&ctl->count))
335                 kfree(ctl);
336 }
337
338 /*
339  * this is only called by cache_block_group, since we could have freed extents
340  * we need to check the pinned_extents for any extents that can't be used yet
341  * since their free space will be released as soon as the transaction commits.
342  */
343 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
344                               struct btrfs_fs_info *info, u64 start, u64 end)
345 {
346         u64 extent_start, extent_end, size, total_added = 0;
347         int ret;
348
349         while (start < end) {
350                 ret = find_first_extent_bit(info->pinned_extents, start,
351                                             &extent_start, &extent_end,
352                                             EXTENT_DIRTY | EXTENT_UPTODATE,
353                                             NULL);
354                 if (ret)
355                         break;
356
357                 if (extent_start <= start) {
358                         start = extent_end + 1;
359                 } else if (extent_start > start && extent_start < end) {
360                         size = extent_start - start;
361                         total_added += size;
362                         ret = btrfs_add_free_space(block_group, start,
363                                                    size);
364                         BUG_ON(ret); /* -ENOMEM or logic error */
365                         start = extent_end + 1;
366                 } else {
367                         break;
368                 }
369         }
370
371         if (start < end) {
372                 size = end - start;
373                 total_added += size;
374                 ret = btrfs_add_free_space(block_group, start, size);
375                 BUG_ON(ret); /* -ENOMEM or logic error */
376         }
377
378         return total_added;
379 }
380
381 static noinline void caching_thread(struct btrfs_work *work)
382 {
383         struct btrfs_block_group_cache *block_group;
384         struct btrfs_fs_info *fs_info;
385         struct btrfs_caching_control *caching_ctl;
386         struct btrfs_root *extent_root;
387         struct btrfs_path *path;
388         struct extent_buffer *leaf;
389         struct btrfs_key key;
390         u64 total_found = 0;
391         u64 last = 0;
392         u32 nritems;
393         int ret = -ENOMEM;
394
395         caching_ctl = container_of(work, struct btrfs_caching_control, work);
396         block_group = caching_ctl->block_group;
397         fs_info = block_group->fs_info;
398         extent_root = fs_info->extent_root;
399
400         path = btrfs_alloc_path();
401         if (!path)
402                 goto out;
403
404         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
405
406         /*
407          * We don't want to deadlock with somebody trying to allocate a new
408          * extent for the extent root while also trying to search the extent
409          * root to add free space.  So we skip locking and search the commit
410          * root, since its read-only
411          */
412         path->skip_locking = 1;
413         path->search_commit_root = 1;
414         path->reada = 1;
415
416         key.objectid = last;
417         key.offset = 0;
418         key.type = BTRFS_EXTENT_ITEM_KEY;
419 again:
420         mutex_lock(&caching_ctl->mutex);
421         /* need to make sure the commit_root doesn't disappear */
422         down_read(&fs_info->extent_commit_sem);
423
424 next:
425         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
426         if (ret < 0)
427                 goto err;
428
429         leaf = path->nodes[0];
430         nritems = btrfs_header_nritems(leaf);
431
432         while (1) {
433                 if (btrfs_fs_closing(fs_info) > 1) {
434                         last = (u64)-1;
435                         break;
436                 }
437
438                 if (path->slots[0] < nritems) {
439                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
440                 } else {
441                         ret = find_next_key(path, 0, &key);
442                         if (ret)
443                                 break;
444
445                         if (need_resched() ||
446                             rwsem_is_contended(&fs_info->extent_commit_sem)) {
447                                 caching_ctl->progress = last;
448                                 btrfs_release_path(path);
449                                 up_read(&fs_info->extent_commit_sem);
450                                 mutex_unlock(&caching_ctl->mutex);
451                                 cond_resched();
452                                 goto again;
453                         }
454
455                         ret = btrfs_next_leaf(extent_root, path);
456                         if (ret < 0)
457                                 goto err;
458                         if (ret)
459                                 break;
460                         leaf = path->nodes[0];
461                         nritems = btrfs_header_nritems(leaf);
462                         continue;
463                 }
464
465                 if (key.objectid < last) {
466                         key.objectid = last;
467                         key.offset = 0;
468                         key.type = BTRFS_EXTENT_ITEM_KEY;
469
470                         caching_ctl->progress = last;
471                         btrfs_release_path(path);
472                         goto next;
473                 }
474
475                 if (key.objectid < block_group->key.objectid) {
476                         path->slots[0]++;
477                         continue;
478                 }
479
480                 if (key.objectid >= block_group->key.objectid +
481                     block_group->key.offset)
482                         break;
483
484                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
485                     key.type == BTRFS_METADATA_ITEM_KEY) {
486                         total_found += add_new_free_space(block_group,
487                                                           fs_info, last,
488                                                           key.objectid);
489                         if (key.type == BTRFS_METADATA_ITEM_KEY)
490                                 last = key.objectid +
491                                         fs_info->tree_root->leafsize;
492                         else
493                                 last = key.objectid + key.offset;
494
495                         if (total_found > (1024 * 1024 * 2)) {
496                                 total_found = 0;
497                                 wake_up(&caching_ctl->wait);
498                         }
499                 }
500                 path->slots[0]++;
501         }
502         ret = 0;
503
504         total_found += add_new_free_space(block_group, fs_info, last,
505                                           block_group->key.objectid +
506                                           block_group->key.offset);
507         caching_ctl->progress = (u64)-1;
508
509         spin_lock(&block_group->lock);
510         block_group->caching_ctl = NULL;
511         block_group->cached = BTRFS_CACHE_FINISHED;
512         spin_unlock(&block_group->lock);
513
514 err:
515         btrfs_free_path(path);
516         up_read(&fs_info->extent_commit_sem);
517
518         free_excluded_extents(extent_root, block_group);
519
520         mutex_unlock(&caching_ctl->mutex);
521 out:
522         if (ret) {
523                 spin_lock(&block_group->lock);
524                 block_group->caching_ctl = NULL;
525                 block_group->cached = BTRFS_CACHE_ERROR;
526                 spin_unlock(&block_group->lock);
527         }
528         wake_up(&caching_ctl->wait);
529
530         put_caching_control(caching_ctl);
531         btrfs_put_block_group(block_group);
532 }
533
534 static int cache_block_group(struct btrfs_block_group_cache *cache,
535                              int load_cache_only)
536 {
537         DEFINE_WAIT(wait);
538         struct btrfs_fs_info *fs_info = cache->fs_info;
539         struct btrfs_caching_control *caching_ctl;
540         int ret = 0;
541
542         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
543         if (!caching_ctl)
544                 return -ENOMEM;
545
546         INIT_LIST_HEAD(&caching_ctl->list);
547         mutex_init(&caching_ctl->mutex);
548         init_waitqueue_head(&caching_ctl->wait);
549         caching_ctl->block_group = cache;
550         caching_ctl->progress = cache->key.objectid;
551         atomic_set(&caching_ctl->count, 1);
552         btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
553
554         spin_lock(&cache->lock);
555         /*
556          * This should be a rare occasion, but this could happen I think in the
557          * case where one thread starts to load the space cache info, and then
558          * some other thread starts a transaction commit which tries to do an
559          * allocation while the other thread is still loading the space cache
560          * info.  The previous loop should have kept us from choosing this block
561          * group, but if we've moved to the state where we will wait on caching
562          * block groups we need to first check if we're doing a fast load here,
563          * so we can wait for it to finish, otherwise we could end up allocating
564          * from a block group who's cache gets evicted for one reason or
565          * another.
566          */
567         while (cache->cached == BTRFS_CACHE_FAST) {
568                 struct btrfs_caching_control *ctl;
569
570                 ctl = cache->caching_ctl;
571                 atomic_inc(&ctl->count);
572                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
573                 spin_unlock(&cache->lock);
574
575                 schedule();
576
577                 finish_wait(&ctl->wait, &wait);
578                 put_caching_control(ctl);
579                 spin_lock(&cache->lock);
580         }
581
582         if (cache->cached != BTRFS_CACHE_NO) {
583                 spin_unlock(&cache->lock);
584                 kfree(caching_ctl);
585                 return 0;
586         }
587         WARN_ON(cache->caching_ctl);
588         cache->caching_ctl = caching_ctl;
589         cache->cached = BTRFS_CACHE_FAST;
590         spin_unlock(&cache->lock);
591
592         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
593                 ret = load_free_space_cache(fs_info, cache);
594
595                 spin_lock(&cache->lock);
596                 if (ret == 1) {
597                         cache->caching_ctl = NULL;
598                         cache->cached = BTRFS_CACHE_FINISHED;
599                         cache->last_byte_to_unpin = (u64)-1;
600                 } else {
601                         if (load_cache_only) {
602                                 cache->caching_ctl = NULL;
603                                 cache->cached = BTRFS_CACHE_NO;
604                         } else {
605                                 cache->cached = BTRFS_CACHE_STARTED;
606                         }
607                 }
608                 spin_unlock(&cache->lock);
609                 wake_up(&caching_ctl->wait);
610                 if (ret == 1) {
611                         put_caching_control(caching_ctl);
612                         free_excluded_extents(fs_info->extent_root, cache);
613                         return 0;
614                 }
615         } else {
616                 /*
617                  * We are not going to do the fast caching, set cached to the
618                  * appropriate value and wakeup any waiters.
619                  */
620                 spin_lock(&cache->lock);
621                 if (load_cache_only) {
622                         cache->caching_ctl = NULL;
623                         cache->cached = BTRFS_CACHE_NO;
624                 } else {
625                         cache->cached = BTRFS_CACHE_STARTED;
626                 }
627                 spin_unlock(&cache->lock);
628                 wake_up(&caching_ctl->wait);
629         }
630
631         if (load_cache_only) {
632                 put_caching_control(caching_ctl);
633                 return 0;
634         }
635
636         down_write(&fs_info->extent_commit_sem);
637         atomic_inc(&caching_ctl->count);
638         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
639         up_write(&fs_info->extent_commit_sem);
640
641         btrfs_get_block_group(cache);
642
643         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
644
645         return ret;
646 }
647
648 /*
649  * return the block group that starts at or after bytenr
650  */
651 static struct btrfs_block_group_cache *
652 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
653 {
654         struct btrfs_block_group_cache *cache;
655
656         cache = block_group_cache_tree_search(info, bytenr, 0);
657
658         return cache;
659 }
660
661 /*
662  * return the block group that contains the given bytenr
663  */
664 struct btrfs_block_group_cache *btrfs_lookup_block_group(
665                                                  struct btrfs_fs_info *info,
666                                                  u64 bytenr)
667 {
668         struct btrfs_block_group_cache *cache;
669
670         cache = block_group_cache_tree_search(info, bytenr, 1);
671
672         return cache;
673 }
674
675 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
676                                                   u64 flags)
677 {
678         struct list_head *head = &info->space_info;
679         struct btrfs_space_info *found;
680
681         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
682
683         rcu_read_lock();
684         list_for_each_entry_rcu(found, head, list) {
685                 if (found->flags & flags) {
686                         rcu_read_unlock();
687                         return found;
688                 }
689         }
690         rcu_read_unlock();
691         return NULL;
692 }
693
694 /*
695  * after adding space to the filesystem, we need to clear the full flags
696  * on all the space infos.
697  */
698 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
699 {
700         struct list_head *head = &info->space_info;
701         struct btrfs_space_info *found;
702
703         rcu_read_lock();
704         list_for_each_entry_rcu(found, head, list)
705                 found->full = 0;
706         rcu_read_unlock();
707 }
708
709 /* simple helper to search for an existing extent at a given offset */
710 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
711 {
712         int ret;
713         struct btrfs_key key;
714         struct btrfs_path *path;
715
716         path = btrfs_alloc_path();
717         if (!path)
718                 return -ENOMEM;
719
720         key.objectid = start;
721         key.offset = len;
722         key.type = BTRFS_EXTENT_ITEM_KEY;
723         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
724                                 0, 0);
725         if (ret > 0) {
726                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
727                 if (key.objectid == start &&
728                     key.type == BTRFS_METADATA_ITEM_KEY)
729                         ret = 0;
730         }
731         btrfs_free_path(path);
732         return ret;
733 }
734
735 /*
736  * helper function to lookup reference count and flags of a tree block.
737  *
738  * the head node for delayed ref is used to store the sum of all the
739  * reference count modifications queued up in the rbtree. the head
740  * node may also store the extent flags to set. This way you can check
741  * to see what the reference count and extent flags would be if all of
742  * the delayed refs are not processed.
743  */
744 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
745                              struct btrfs_root *root, u64 bytenr,
746                              u64 offset, int metadata, u64 *refs, u64 *flags)
747 {
748         struct btrfs_delayed_ref_head *head;
749         struct btrfs_delayed_ref_root *delayed_refs;
750         struct btrfs_path *path;
751         struct btrfs_extent_item *ei;
752         struct extent_buffer *leaf;
753         struct btrfs_key key;
754         u32 item_size;
755         u64 num_refs;
756         u64 extent_flags;
757         int ret;
758
759         /*
760          * If we don't have skinny metadata, don't bother doing anything
761          * different
762          */
763         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
764                 offset = root->leafsize;
765                 metadata = 0;
766         }
767
768         path = btrfs_alloc_path();
769         if (!path)
770                 return -ENOMEM;
771
772         if (!trans) {
773                 path->skip_locking = 1;
774                 path->search_commit_root = 1;
775         }
776
777 search_again:
778         key.objectid = bytenr;
779         key.offset = offset;
780         if (metadata)
781                 key.type = BTRFS_METADATA_ITEM_KEY;
782         else
783                 key.type = BTRFS_EXTENT_ITEM_KEY;
784
785 again:
786         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
787                                 &key, path, 0, 0);
788         if (ret < 0)
789                 goto out_free;
790
791         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
792                 if (path->slots[0]) {
793                         path->slots[0]--;
794                         btrfs_item_key_to_cpu(path->nodes[0], &key,
795                                               path->slots[0]);
796                         if (key.objectid == bytenr &&
797                             key.type == BTRFS_EXTENT_ITEM_KEY &&
798                             key.offset == root->leafsize)
799                                 ret = 0;
800                 }
801                 if (ret) {
802                         key.objectid = bytenr;
803                         key.type = BTRFS_EXTENT_ITEM_KEY;
804                         key.offset = root->leafsize;
805                         btrfs_release_path(path);
806                         goto again;
807                 }
808         }
809
810         if (ret == 0) {
811                 leaf = path->nodes[0];
812                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
813                 if (item_size >= sizeof(*ei)) {
814                         ei = btrfs_item_ptr(leaf, path->slots[0],
815                                             struct btrfs_extent_item);
816                         num_refs = btrfs_extent_refs(leaf, ei);
817                         extent_flags = btrfs_extent_flags(leaf, ei);
818                 } else {
819 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
820                         struct btrfs_extent_item_v0 *ei0;
821                         BUG_ON(item_size != sizeof(*ei0));
822                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
823                                              struct btrfs_extent_item_v0);
824                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
825                         /* FIXME: this isn't correct for data */
826                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
827 #else
828                         BUG();
829 #endif
830                 }
831                 BUG_ON(num_refs == 0);
832         } else {
833                 num_refs = 0;
834                 extent_flags = 0;
835                 ret = 0;
836         }
837
838         if (!trans)
839                 goto out;
840
841         delayed_refs = &trans->transaction->delayed_refs;
842         spin_lock(&delayed_refs->lock);
843         head = btrfs_find_delayed_ref_head(trans, bytenr);
844         if (head) {
845                 if (!mutex_trylock(&head->mutex)) {
846                         atomic_inc(&head->node.refs);
847                         spin_unlock(&delayed_refs->lock);
848
849                         btrfs_release_path(path);
850
851                         /*
852                          * Mutex was contended, block until it's released and try
853                          * again
854                          */
855                         mutex_lock(&head->mutex);
856                         mutex_unlock(&head->mutex);
857                         btrfs_put_delayed_ref(&head->node);
858                         goto search_again;
859                 }
860                 spin_lock(&head->lock);
861                 if (head->extent_op && head->extent_op->update_flags)
862                         extent_flags |= head->extent_op->flags_to_set;
863                 else
864                         BUG_ON(num_refs == 0);
865
866                 num_refs += head->node.ref_mod;
867                 spin_unlock(&head->lock);
868                 mutex_unlock(&head->mutex);
869         }
870         spin_unlock(&delayed_refs->lock);
871 out:
872         WARN_ON(num_refs == 0);
873         if (refs)
874                 *refs = num_refs;
875         if (flags)
876                 *flags = extent_flags;
877 out_free:
878         btrfs_free_path(path);
879         return ret;
880 }
881
882 /*
883  * Back reference rules.  Back refs have three main goals:
884  *
885  * 1) differentiate between all holders of references to an extent so that
886  *    when a reference is dropped we can make sure it was a valid reference
887  *    before freeing the extent.
888  *
889  * 2) Provide enough information to quickly find the holders of an extent
890  *    if we notice a given block is corrupted or bad.
891  *
892  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
893  *    maintenance.  This is actually the same as #2, but with a slightly
894  *    different use case.
895  *
896  * There are two kinds of back refs. The implicit back refs is optimized
897  * for pointers in non-shared tree blocks. For a given pointer in a block,
898  * back refs of this kind provide information about the block's owner tree
899  * and the pointer's key. These information allow us to find the block by
900  * b-tree searching. The full back refs is for pointers in tree blocks not
901  * referenced by their owner trees. The location of tree block is recorded
902  * in the back refs. Actually the full back refs is generic, and can be
903  * used in all cases the implicit back refs is used. The major shortcoming
904  * of the full back refs is its overhead. Every time a tree block gets
905  * COWed, we have to update back refs entry for all pointers in it.
906  *
907  * For a newly allocated tree block, we use implicit back refs for
908  * pointers in it. This means most tree related operations only involve
909  * implicit back refs. For a tree block created in old transaction, the
910  * only way to drop a reference to it is COW it. So we can detect the
911  * event that tree block loses its owner tree's reference and do the
912  * back refs conversion.
913  *
914  * When a tree block is COW'd through a tree, there are four cases:
915  *
916  * The reference count of the block is one and the tree is the block's
917  * owner tree. Nothing to do in this case.
918  *
919  * The reference count of the block is one and the tree is not the
920  * block's owner tree. In this case, full back refs is used for pointers
921  * in the block. Remove these full back refs, add implicit back refs for
922  * every pointers in the new block.
923  *
924  * The reference count of the block is greater than one and the tree is
925  * the block's owner tree. In this case, implicit back refs is used for
926  * pointers in the block. Add full back refs for every pointers in the
927  * block, increase lower level extents' reference counts. The original
928  * implicit back refs are entailed to the new block.
929  *
930  * The reference count of the block is greater than one and the tree is
931  * not the block's owner tree. Add implicit back refs for every pointer in
932  * the new block, increase lower level extents' reference count.
933  *
934  * Back Reference Key composing:
935  *
936  * The key objectid corresponds to the first byte in the extent,
937  * The key type is used to differentiate between types of back refs.
938  * There are different meanings of the key offset for different types
939  * of back refs.
940  *
941  * File extents can be referenced by:
942  *
943  * - multiple snapshots, subvolumes, or different generations in one subvol
944  * - different files inside a single subvolume
945  * - different offsets inside a file (bookend extents in file.c)
946  *
947  * The extent ref structure for the implicit back refs has fields for:
948  *
949  * - Objectid of the subvolume root
950  * - objectid of the file holding the reference
951  * - original offset in the file
952  * - how many bookend extents
953  *
954  * The key offset for the implicit back refs is hash of the first
955  * three fields.
956  *
957  * The extent ref structure for the full back refs has field for:
958  *
959  * - number of pointers in the tree leaf
960  *
961  * The key offset for the implicit back refs is the first byte of
962  * the tree leaf
963  *
964  * When a file extent is allocated, The implicit back refs is used.
965  * the fields are filled in:
966  *
967  *     (root_key.objectid, inode objectid, offset in file, 1)
968  *
969  * When a file extent is removed file truncation, we find the
970  * corresponding implicit back refs and check the following fields:
971  *
972  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
973  *
974  * Btree extents can be referenced by:
975  *
976  * - Different subvolumes
977  *
978  * Both the implicit back refs and the full back refs for tree blocks
979  * only consist of key. The key offset for the implicit back refs is
980  * objectid of block's owner tree. The key offset for the full back refs
981  * is the first byte of parent block.
982  *
983  * When implicit back refs is used, information about the lowest key and
984  * level of the tree block are required. These information are stored in
985  * tree block info structure.
986  */
987
988 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
989 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
990                                   struct btrfs_root *root,
991                                   struct btrfs_path *path,
992                                   u64 owner, u32 extra_size)
993 {
994         struct btrfs_extent_item *item;
995         struct btrfs_extent_item_v0 *ei0;
996         struct btrfs_extent_ref_v0 *ref0;
997         struct btrfs_tree_block_info *bi;
998         struct extent_buffer *leaf;
999         struct btrfs_key key;
1000         struct btrfs_key found_key;
1001         u32 new_size = sizeof(*item);
1002         u64 refs;
1003         int ret;
1004
1005         leaf = path->nodes[0];
1006         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1007
1008         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1009         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1010                              struct btrfs_extent_item_v0);
1011         refs = btrfs_extent_refs_v0(leaf, ei0);
1012
1013         if (owner == (u64)-1) {
1014                 while (1) {
1015                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1016                                 ret = btrfs_next_leaf(root, path);
1017                                 if (ret < 0)
1018                                         return ret;
1019                                 BUG_ON(ret > 0); /* Corruption */
1020                                 leaf = path->nodes[0];
1021                         }
1022                         btrfs_item_key_to_cpu(leaf, &found_key,
1023                                               path->slots[0]);
1024                         BUG_ON(key.objectid != found_key.objectid);
1025                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1026                                 path->slots[0]++;
1027                                 continue;
1028                         }
1029                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1030                                               struct btrfs_extent_ref_v0);
1031                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1032                         break;
1033                 }
1034         }
1035         btrfs_release_path(path);
1036
1037         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1038                 new_size += sizeof(*bi);
1039
1040         new_size -= sizeof(*ei0);
1041         ret = btrfs_search_slot(trans, root, &key, path,
1042                                 new_size + extra_size, 1);
1043         if (ret < 0)
1044                 return ret;
1045         BUG_ON(ret); /* Corruption */
1046
1047         btrfs_extend_item(root, path, new_size);
1048
1049         leaf = path->nodes[0];
1050         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1051         btrfs_set_extent_refs(leaf, item, refs);
1052         /* FIXME: get real generation */
1053         btrfs_set_extent_generation(leaf, item, 0);
1054         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1055                 btrfs_set_extent_flags(leaf, item,
1056                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1057                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1058                 bi = (struct btrfs_tree_block_info *)(item + 1);
1059                 /* FIXME: get first key of the block */
1060                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1061                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1062         } else {
1063                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1064         }
1065         btrfs_mark_buffer_dirty(leaf);
1066         return 0;
1067 }
1068 #endif
1069
1070 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1071 {
1072         u32 high_crc = ~(u32)0;
1073         u32 low_crc = ~(u32)0;
1074         __le64 lenum;
1075
1076         lenum = cpu_to_le64(root_objectid);
1077         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1078         lenum = cpu_to_le64(owner);
1079         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1080         lenum = cpu_to_le64(offset);
1081         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1082
1083         return ((u64)high_crc << 31) ^ (u64)low_crc;
1084 }
1085
1086 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1087                                      struct btrfs_extent_data_ref *ref)
1088 {
1089         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1090                                     btrfs_extent_data_ref_objectid(leaf, ref),
1091                                     btrfs_extent_data_ref_offset(leaf, ref));
1092 }
1093
1094 static int match_extent_data_ref(struct extent_buffer *leaf,
1095                                  struct btrfs_extent_data_ref *ref,
1096                                  u64 root_objectid, u64 owner, u64 offset)
1097 {
1098         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1099             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1100             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1101                 return 0;
1102         return 1;
1103 }
1104
1105 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1106                                            struct btrfs_root *root,
1107                                            struct btrfs_path *path,
1108                                            u64 bytenr, u64 parent,
1109                                            u64 root_objectid,
1110                                            u64 owner, u64 offset)
1111 {
1112         struct btrfs_key key;
1113         struct btrfs_extent_data_ref *ref;
1114         struct extent_buffer *leaf;
1115         u32 nritems;
1116         int ret;
1117         int recow;
1118         int err = -ENOENT;
1119
1120         key.objectid = bytenr;
1121         if (parent) {
1122                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1123                 key.offset = parent;
1124         } else {
1125                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1126                 key.offset = hash_extent_data_ref(root_objectid,
1127                                                   owner, offset);
1128         }
1129 again:
1130         recow = 0;
1131         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1132         if (ret < 0) {
1133                 err = ret;
1134                 goto fail;
1135         }
1136
1137         if (parent) {
1138                 if (!ret)
1139                         return 0;
1140 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1141                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1142                 btrfs_release_path(path);
1143                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1144                 if (ret < 0) {
1145                         err = ret;
1146                         goto fail;
1147                 }
1148                 if (!ret)
1149                         return 0;
1150 #endif
1151                 goto fail;
1152         }
1153
1154         leaf = path->nodes[0];
1155         nritems = btrfs_header_nritems(leaf);
1156         while (1) {
1157                 if (path->slots[0] >= nritems) {
1158                         ret = btrfs_next_leaf(root, path);
1159                         if (ret < 0)
1160                                 err = ret;
1161                         if (ret)
1162                                 goto fail;
1163
1164                         leaf = path->nodes[0];
1165                         nritems = btrfs_header_nritems(leaf);
1166                         recow = 1;
1167                 }
1168
1169                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1170                 if (key.objectid != bytenr ||
1171                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1172                         goto fail;
1173
1174                 ref = btrfs_item_ptr(leaf, path->slots[0],
1175                                      struct btrfs_extent_data_ref);
1176
1177                 if (match_extent_data_ref(leaf, ref, root_objectid,
1178                                           owner, offset)) {
1179                         if (recow) {
1180                                 btrfs_release_path(path);
1181                                 goto again;
1182                         }
1183                         err = 0;
1184                         break;
1185                 }
1186                 path->slots[0]++;
1187         }
1188 fail:
1189         return err;
1190 }
1191
1192 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1193                                            struct btrfs_root *root,
1194                                            struct btrfs_path *path,
1195                                            u64 bytenr, u64 parent,
1196                                            u64 root_objectid, u64 owner,
1197                                            u64 offset, int refs_to_add)
1198 {
1199         struct btrfs_key key;
1200         struct extent_buffer *leaf;
1201         u32 size;
1202         u32 num_refs;
1203         int ret;
1204
1205         key.objectid = bytenr;
1206         if (parent) {
1207                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1208                 key.offset = parent;
1209                 size = sizeof(struct btrfs_shared_data_ref);
1210         } else {
1211                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1212                 key.offset = hash_extent_data_ref(root_objectid,
1213                                                   owner, offset);
1214                 size = sizeof(struct btrfs_extent_data_ref);
1215         }
1216
1217         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1218         if (ret && ret != -EEXIST)
1219                 goto fail;
1220
1221         leaf = path->nodes[0];
1222         if (parent) {
1223                 struct btrfs_shared_data_ref *ref;
1224                 ref = btrfs_item_ptr(leaf, path->slots[0],
1225                                      struct btrfs_shared_data_ref);
1226                 if (ret == 0) {
1227                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1228                 } else {
1229                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1230                         num_refs += refs_to_add;
1231                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1232                 }
1233         } else {
1234                 struct btrfs_extent_data_ref *ref;
1235                 while (ret == -EEXIST) {
1236                         ref = btrfs_item_ptr(leaf, path->slots[0],
1237                                              struct btrfs_extent_data_ref);
1238                         if (match_extent_data_ref(leaf, ref, root_objectid,
1239                                                   owner, offset))
1240                                 break;
1241                         btrfs_release_path(path);
1242                         key.offset++;
1243                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1244                                                       size);
1245                         if (ret && ret != -EEXIST)
1246                                 goto fail;
1247
1248                         leaf = path->nodes[0];
1249                 }
1250                 ref = btrfs_item_ptr(leaf, path->slots[0],
1251                                      struct btrfs_extent_data_ref);
1252                 if (ret == 0) {
1253                         btrfs_set_extent_data_ref_root(leaf, ref,
1254                                                        root_objectid);
1255                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1256                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1257                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1258                 } else {
1259                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1260                         num_refs += refs_to_add;
1261                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1262                 }
1263         }
1264         btrfs_mark_buffer_dirty(leaf);
1265         ret = 0;
1266 fail:
1267         btrfs_release_path(path);
1268         return ret;
1269 }
1270
1271 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1272                                            struct btrfs_root *root,
1273                                            struct btrfs_path *path,
1274                                            int refs_to_drop)
1275 {
1276         struct btrfs_key key;
1277         struct btrfs_extent_data_ref *ref1 = NULL;
1278         struct btrfs_shared_data_ref *ref2 = NULL;
1279         struct extent_buffer *leaf;
1280         u32 num_refs = 0;
1281         int ret = 0;
1282
1283         leaf = path->nodes[0];
1284         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1285
1286         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1287                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1288                                       struct btrfs_extent_data_ref);
1289                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1290         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1291                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1292                                       struct btrfs_shared_data_ref);
1293                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1294 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1295         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1296                 struct btrfs_extent_ref_v0 *ref0;
1297                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1298                                       struct btrfs_extent_ref_v0);
1299                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1300 #endif
1301         } else {
1302                 BUG();
1303         }
1304
1305         BUG_ON(num_refs < refs_to_drop);
1306         num_refs -= refs_to_drop;
1307
1308         if (num_refs == 0) {
1309                 ret = btrfs_del_item(trans, root, path);
1310         } else {
1311                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1312                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1313                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1314                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1315 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1316                 else {
1317                         struct btrfs_extent_ref_v0 *ref0;
1318                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1319                                         struct btrfs_extent_ref_v0);
1320                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1321                 }
1322 #endif
1323                 btrfs_mark_buffer_dirty(leaf);
1324         }
1325         return ret;
1326 }
1327
1328 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1329                                           struct btrfs_path *path,
1330                                           struct btrfs_extent_inline_ref *iref)
1331 {
1332         struct btrfs_key key;
1333         struct extent_buffer *leaf;
1334         struct btrfs_extent_data_ref *ref1;
1335         struct btrfs_shared_data_ref *ref2;
1336         u32 num_refs = 0;
1337
1338         leaf = path->nodes[0];
1339         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1340         if (iref) {
1341                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1342                     BTRFS_EXTENT_DATA_REF_KEY) {
1343                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1344                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1345                 } else {
1346                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1347                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1348                 }
1349         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1350                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1351                                       struct btrfs_extent_data_ref);
1352                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1353         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1354                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1355                                       struct btrfs_shared_data_ref);
1356                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1357 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1358         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1359                 struct btrfs_extent_ref_v0 *ref0;
1360                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1361                                       struct btrfs_extent_ref_v0);
1362                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1363 #endif
1364         } else {
1365                 WARN_ON(1);
1366         }
1367         return num_refs;
1368 }
1369
1370 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1371                                           struct btrfs_root *root,
1372                                           struct btrfs_path *path,
1373                                           u64 bytenr, u64 parent,
1374                                           u64 root_objectid)
1375 {
1376         struct btrfs_key key;
1377         int ret;
1378
1379         key.objectid = bytenr;
1380         if (parent) {
1381                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1382                 key.offset = parent;
1383         } else {
1384                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1385                 key.offset = root_objectid;
1386         }
1387
1388         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1389         if (ret > 0)
1390                 ret = -ENOENT;
1391 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1392         if (ret == -ENOENT && parent) {
1393                 btrfs_release_path(path);
1394                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1395                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1396                 if (ret > 0)
1397                         ret = -ENOENT;
1398         }
1399 #endif
1400         return ret;
1401 }
1402
1403 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1404                                           struct btrfs_root *root,
1405                                           struct btrfs_path *path,
1406                                           u64 bytenr, u64 parent,
1407                                           u64 root_objectid)
1408 {
1409         struct btrfs_key key;
1410         int ret;
1411
1412         key.objectid = bytenr;
1413         if (parent) {
1414                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1415                 key.offset = parent;
1416         } else {
1417                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1418                 key.offset = root_objectid;
1419         }
1420
1421         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1422         btrfs_release_path(path);
1423         return ret;
1424 }
1425
1426 static inline int extent_ref_type(u64 parent, u64 owner)
1427 {
1428         int type;
1429         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1430                 if (parent > 0)
1431                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1432                 else
1433                         type = BTRFS_TREE_BLOCK_REF_KEY;
1434         } else {
1435                 if (parent > 0)
1436                         type = BTRFS_SHARED_DATA_REF_KEY;
1437                 else
1438                         type = BTRFS_EXTENT_DATA_REF_KEY;
1439         }
1440         return type;
1441 }
1442
1443 static int find_next_key(struct btrfs_path *path, int level,
1444                          struct btrfs_key *key)
1445
1446 {
1447         for (; level < BTRFS_MAX_LEVEL; level++) {
1448                 if (!path->nodes[level])
1449                         break;
1450                 if (path->slots[level] + 1 >=
1451                     btrfs_header_nritems(path->nodes[level]))
1452                         continue;
1453                 if (level == 0)
1454                         btrfs_item_key_to_cpu(path->nodes[level], key,
1455                                               path->slots[level] + 1);
1456                 else
1457                         btrfs_node_key_to_cpu(path->nodes[level], key,
1458                                               path->slots[level] + 1);
1459                 return 0;
1460         }
1461         return 1;
1462 }
1463
1464 /*
1465  * look for inline back ref. if back ref is found, *ref_ret is set
1466  * to the address of inline back ref, and 0 is returned.
1467  *
1468  * if back ref isn't found, *ref_ret is set to the address where it
1469  * should be inserted, and -ENOENT is returned.
1470  *
1471  * if insert is true and there are too many inline back refs, the path
1472  * points to the extent item, and -EAGAIN is returned.
1473  *
1474  * NOTE: inline back refs are ordered in the same way that back ref
1475  *       items in the tree are ordered.
1476  */
1477 static noinline_for_stack
1478 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1479                                  struct btrfs_root *root,
1480                                  struct btrfs_path *path,
1481                                  struct btrfs_extent_inline_ref **ref_ret,
1482                                  u64 bytenr, u64 num_bytes,
1483                                  u64 parent, u64 root_objectid,
1484                                  u64 owner, u64 offset, int insert)
1485 {
1486         struct btrfs_key key;
1487         struct extent_buffer *leaf;
1488         struct btrfs_extent_item *ei;
1489         struct btrfs_extent_inline_ref *iref;
1490         u64 flags;
1491         u64 item_size;
1492         unsigned long ptr;
1493         unsigned long end;
1494         int extra_size;
1495         int type;
1496         int want;
1497         int ret;
1498         int err = 0;
1499         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1500                                                  SKINNY_METADATA);
1501
1502         key.objectid = bytenr;
1503         key.type = BTRFS_EXTENT_ITEM_KEY;
1504         key.offset = num_bytes;
1505
1506         want = extent_ref_type(parent, owner);
1507         if (insert) {
1508                 extra_size = btrfs_extent_inline_ref_size(want);
1509                 path->keep_locks = 1;
1510         } else
1511                 extra_size = -1;
1512
1513         /*
1514          * Owner is our parent level, so we can just add one to get the level
1515          * for the block we are interested in.
1516          */
1517         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1518                 key.type = BTRFS_METADATA_ITEM_KEY;
1519                 key.offset = owner;
1520         }
1521
1522 again:
1523         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1524         if (ret < 0) {
1525                 err = ret;
1526                 goto out;
1527         }
1528
1529         /*
1530          * We may be a newly converted file system which still has the old fat
1531          * extent entries for metadata, so try and see if we have one of those.
1532          */
1533         if (ret > 0 && skinny_metadata) {
1534                 skinny_metadata = false;
1535                 if (path->slots[0]) {
1536                         path->slots[0]--;
1537                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1538                                               path->slots[0]);
1539                         if (key.objectid == bytenr &&
1540                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1541                             key.offset == num_bytes)
1542                                 ret = 0;
1543                 }
1544                 if (ret) {
1545                         key.type = BTRFS_EXTENT_ITEM_KEY;
1546                         key.offset = num_bytes;
1547                         btrfs_release_path(path);
1548                         goto again;
1549                 }
1550         }
1551
1552         if (ret && !insert) {
1553                 err = -ENOENT;
1554                 goto out;
1555         } else if (WARN_ON(ret)) {
1556                 err = -EIO;
1557                 goto out;
1558         }
1559
1560         leaf = path->nodes[0];
1561         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1562 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1563         if (item_size < sizeof(*ei)) {
1564                 if (!insert) {
1565                         err = -ENOENT;
1566                         goto out;
1567                 }
1568                 ret = convert_extent_item_v0(trans, root, path, owner,
1569                                              extra_size);
1570                 if (ret < 0) {
1571                         err = ret;
1572                         goto out;
1573                 }
1574                 leaf = path->nodes[0];
1575                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1576         }
1577 #endif
1578         BUG_ON(item_size < sizeof(*ei));
1579
1580         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1581         flags = btrfs_extent_flags(leaf, ei);
1582
1583         ptr = (unsigned long)(ei + 1);
1584         end = (unsigned long)ei + item_size;
1585
1586         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1587                 ptr += sizeof(struct btrfs_tree_block_info);
1588                 BUG_ON(ptr > end);
1589         }
1590
1591         err = -ENOENT;
1592         while (1) {
1593                 if (ptr >= end) {
1594                         WARN_ON(ptr > end);
1595                         break;
1596                 }
1597                 iref = (struct btrfs_extent_inline_ref *)ptr;
1598                 type = btrfs_extent_inline_ref_type(leaf, iref);
1599                 if (want < type)
1600                         break;
1601                 if (want > type) {
1602                         ptr += btrfs_extent_inline_ref_size(type);
1603                         continue;
1604                 }
1605
1606                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1607                         struct btrfs_extent_data_ref *dref;
1608                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1609                         if (match_extent_data_ref(leaf, dref, root_objectid,
1610                                                   owner, offset)) {
1611                                 err = 0;
1612                                 break;
1613                         }
1614                         if (hash_extent_data_ref_item(leaf, dref) <
1615                             hash_extent_data_ref(root_objectid, owner, offset))
1616                                 break;
1617                 } else {
1618                         u64 ref_offset;
1619                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1620                         if (parent > 0) {
1621                                 if (parent == ref_offset) {
1622                                         err = 0;
1623                                         break;
1624                                 }
1625                                 if (ref_offset < parent)
1626                                         break;
1627                         } else {
1628                                 if (root_objectid == ref_offset) {
1629                                         err = 0;
1630                                         break;
1631                                 }
1632                                 if (ref_offset < root_objectid)
1633                                         break;
1634                         }
1635                 }
1636                 ptr += btrfs_extent_inline_ref_size(type);
1637         }
1638         if (err == -ENOENT && insert) {
1639                 if (item_size + extra_size >=
1640                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1641                         err = -EAGAIN;
1642                         goto out;
1643                 }
1644                 /*
1645                  * To add new inline back ref, we have to make sure
1646                  * there is no corresponding back ref item.
1647                  * For simplicity, we just do not add new inline back
1648                  * ref if there is any kind of item for this block
1649                  */
1650                 if (find_next_key(path, 0, &key) == 0 &&
1651                     key.objectid == bytenr &&
1652                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1653                         err = -EAGAIN;
1654                         goto out;
1655                 }
1656         }
1657         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1658 out:
1659         if (insert) {
1660                 path->keep_locks = 0;
1661                 btrfs_unlock_up_safe(path, 1);
1662         }
1663         return err;
1664 }
1665
1666 /*
1667  * helper to add new inline back ref
1668  */
1669 static noinline_for_stack
1670 void setup_inline_extent_backref(struct btrfs_root *root,
1671                                  struct btrfs_path *path,
1672                                  struct btrfs_extent_inline_ref *iref,
1673                                  u64 parent, u64 root_objectid,
1674                                  u64 owner, u64 offset, int refs_to_add,
1675                                  struct btrfs_delayed_extent_op *extent_op)
1676 {
1677         struct extent_buffer *leaf;
1678         struct btrfs_extent_item *ei;
1679         unsigned long ptr;
1680         unsigned long end;
1681         unsigned long item_offset;
1682         u64 refs;
1683         int size;
1684         int type;
1685
1686         leaf = path->nodes[0];
1687         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1688         item_offset = (unsigned long)iref - (unsigned long)ei;
1689
1690         type = extent_ref_type(parent, owner);
1691         size = btrfs_extent_inline_ref_size(type);
1692
1693         btrfs_extend_item(root, path, size);
1694
1695         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1696         refs = btrfs_extent_refs(leaf, ei);
1697         refs += refs_to_add;
1698         btrfs_set_extent_refs(leaf, ei, refs);
1699         if (extent_op)
1700                 __run_delayed_extent_op(extent_op, leaf, ei);
1701
1702         ptr = (unsigned long)ei + item_offset;
1703         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1704         if (ptr < end - size)
1705                 memmove_extent_buffer(leaf, ptr + size, ptr,
1706                                       end - size - ptr);
1707
1708         iref = (struct btrfs_extent_inline_ref *)ptr;
1709         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1710         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1711                 struct btrfs_extent_data_ref *dref;
1712                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1713                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1714                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1715                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1716                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1717         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1718                 struct btrfs_shared_data_ref *sref;
1719                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1720                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1721                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1722         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1723                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1724         } else {
1725                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1726         }
1727         btrfs_mark_buffer_dirty(leaf);
1728 }
1729
1730 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1731                                  struct btrfs_root *root,
1732                                  struct btrfs_path *path,
1733                                  struct btrfs_extent_inline_ref **ref_ret,
1734                                  u64 bytenr, u64 num_bytes, u64 parent,
1735                                  u64 root_objectid, u64 owner, u64 offset)
1736 {
1737         int ret;
1738
1739         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1740                                            bytenr, num_bytes, parent,
1741                                            root_objectid, owner, offset, 0);
1742         if (ret != -ENOENT)
1743                 return ret;
1744
1745         btrfs_release_path(path);
1746         *ref_ret = NULL;
1747
1748         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1749                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1750                                             root_objectid);
1751         } else {
1752                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1753                                              root_objectid, owner, offset);
1754         }
1755         return ret;
1756 }
1757
1758 /*
1759  * helper to update/remove inline back ref
1760  */
1761 static noinline_for_stack
1762 void update_inline_extent_backref(struct btrfs_root *root,
1763                                   struct btrfs_path *path,
1764                                   struct btrfs_extent_inline_ref *iref,
1765                                   int refs_to_mod,
1766                                   struct btrfs_delayed_extent_op *extent_op)
1767 {
1768         struct extent_buffer *leaf;
1769         struct btrfs_extent_item *ei;
1770         struct btrfs_extent_data_ref *dref = NULL;
1771         struct btrfs_shared_data_ref *sref = NULL;
1772         unsigned long ptr;
1773         unsigned long end;
1774         u32 item_size;
1775         int size;
1776         int type;
1777         u64 refs;
1778
1779         leaf = path->nodes[0];
1780         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1781         refs = btrfs_extent_refs(leaf, ei);
1782         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1783         refs += refs_to_mod;
1784         btrfs_set_extent_refs(leaf, ei, refs);
1785         if (extent_op)
1786                 __run_delayed_extent_op(extent_op, leaf, ei);
1787
1788         type = btrfs_extent_inline_ref_type(leaf, iref);
1789
1790         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1791                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1792                 refs = btrfs_extent_data_ref_count(leaf, dref);
1793         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1794                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1795                 refs = btrfs_shared_data_ref_count(leaf, sref);
1796         } else {
1797                 refs = 1;
1798                 BUG_ON(refs_to_mod != -1);
1799         }
1800
1801         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1802         refs += refs_to_mod;
1803
1804         if (refs > 0) {
1805                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1806                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1807                 else
1808                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1809         } else {
1810                 size =  btrfs_extent_inline_ref_size(type);
1811                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1812                 ptr = (unsigned long)iref;
1813                 end = (unsigned long)ei + item_size;
1814                 if (ptr + size < end)
1815                         memmove_extent_buffer(leaf, ptr, ptr + size,
1816                                               end - ptr - size);
1817                 item_size -= size;
1818                 btrfs_truncate_item(root, path, item_size, 1);
1819         }
1820         btrfs_mark_buffer_dirty(leaf);
1821 }
1822
1823 static noinline_for_stack
1824 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1825                                  struct btrfs_root *root,
1826                                  struct btrfs_path *path,
1827                                  u64 bytenr, u64 num_bytes, u64 parent,
1828                                  u64 root_objectid, u64 owner,
1829                                  u64 offset, int refs_to_add,
1830                                  struct btrfs_delayed_extent_op *extent_op)
1831 {
1832         struct btrfs_extent_inline_ref *iref;
1833         int ret;
1834
1835         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1836                                            bytenr, num_bytes, parent,
1837                                            root_objectid, owner, offset, 1);
1838         if (ret == 0) {
1839                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1840                 update_inline_extent_backref(root, path, iref,
1841                                              refs_to_add, extent_op);
1842         } else if (ret == -ENOENT) {
1843                 setup_inline_extent_backref(root, path, iref, parent,
1844                                             root_objectid, owner, offset,
1845                                             refs_to_add, extent_op);
1846                 ret = 0;
1847         }
1848         return ret;
1849 }
1850
1851 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1852                                  struct btrfs_root *root,
1853                                  struct btrfs_path *path,
1854                                  u64 bytenr, u64 parent, u64 root_objectid,
1855                                  u64 owner, u64 offset, int refs_to_add)
1856 {
1857         int ret;
1858         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1859                 BUG_ON(refs_to_add != 1);
1860                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1861                                             parent, root_objectid);
1862         } else {
1863                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1864                                              parent, root_objectid,
1865                                              owner, offset, refs_to_add);
1866         }
1867         return ret;
1868 }
1869
1870 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1871                                  struct btrfs_root *root,
1872                                  struct btrfs_path *path,
1873                                  struct btrfs_extent_inline_ref *iref,
1874                                  int refs_to_drop, int is_data)
1875 {
1876         int ret = 0;
1877
1878         BUG_ON(!is_data && refs_to_drop != 1);
1879         if (iref) {
1880                 update_inline_extent_backref(root, path, iref,
1881                                              -refs_to_drop, NULL);
1882         } else if (is_data) {
1883                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1884         } else {
1885                 ret = btrfs_del_item(trans, root, path);
1886         }
1887         return ret;
1888 }
1889
1890 static int btrfs_issue_discard(struct block_device *bdev,
1891                                 u64 start, u64 len)
1892 {
1893         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1894 }
1895
1896 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1897                                 u64 num_bytes, u64 *actual_bytes)
1898 {
1899         int ret;
1900         u64 discarded_bytes = 0;
1901         struct btrfs_bio *bbio = NULL;
1902
1903
1904         /* Tell the block device(s) that the sectors can be discarded */
1905         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1906                               bytenr, &num_bytes, &bbio, 0);
1907         /* Error condition is -ENOMEM */
1908         if (!ret) {
1909                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1910                 int i;
1911
1912
1913                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1914                         if (!stripe->dev->can_discard)
1915                                 continue;
1916
1917                         ret = btrfs_issue_discard(stripe->dev->bdev,
1918                                                   stripe->physical,
1919                                                   stripe->length);
1920                         if (!ret)
1921                                 discarded_bytes += stripe->length;
1922                         else if (ret != -EOPNOTSUPP)
1923                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1924
1925                         /*
1926                          * Just in case we get back EOPNOTSUPP for some reason,
1927                          * just ignore the return value so we don't screw up
1928                          * people calling discard_extent.
1929                          */
1930                         ret = 0;
1931                 }
1932                 kfree(bbio);
1933         }
1934
1935         if (actual_bytes)
1936                 *actual_bytes = discarded_bytes;
1937
1938
1939         if (ret == -EOPNOTSUPP)
1940                 ret = 0;
1941         return ret;
1942 }
1943
1944 /* Can return -ENOMEM */
1945 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1946                          struct btrfs_root *root,
1947                          u64 bytenr, u64 num_bytes, u64 parent,
1948                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1949 {
1950         int ret;
1951         struct btrfs_fs_info *fs_info = root->fs_info;
1952
1953         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1954                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1955
1956         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1957                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1958                                         num_bytes,
1959                                         parent, root_objectid, (int)owner,
1960                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1961         } else {
1962                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1963                                         num_bytes,
1964                                         parent, root_objectid, owner, offset,
1965                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1966         }
1967         return ret;
1968 }
1969
1970 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1971                                   struct btrfs_root *root,
1972                                   u64 bytenr, u64 num_bytes,
1973                                   u64 parent, u64 root_objectid,
1974                                   u64 owner, u64 offset, int refs_to_add,
1975                                   struct btrfs_delayed_extent_op *extent_op)
1976 {
1977         struct btrfs_path *path;
1978         struct extent_buffer *leaf;
1979         struct btrfs_extent_item *item;
1980         u64 refs;
1981         int ret;
1982
1983         path = btrfs_alloc_path();
1984         if (!path)
1985                 return -ENOMEM;
1986
1987         path->reada = 1;
1988         path->leave_spinning = 1;
1989         /* this will setup the path even if it fails to insert the back ref */
1990         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1991                                            path, bytenr, num_bytes, parent,
1992                                            root_objectid, owner, offset,
1993                                            refs_to_add, extent_op);
1994         if (ret != -EAGAIN)
1995                 goto out;
1996
1997         leaf = path->nodes[0];
1998         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1999         refs = btrfs_extent_refs(leaf, item);
2000         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2001         if (extent_op)
2002                 __run_delayed_extent_op(extent_op, leaf, item);
2003
2004         btrfs_mark_buffer_dirty(leaf);
2005         btrfs_release_path(path);
2006
2007         path->reada = 1;
2008         path->leave_spinning = 1;
2009
2010         /* now insert the actual backref */
2011         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2012                                     path, bytenr, parent, root_objectid,
2013                                     owner, offset, refs_to_add);
2014         if (ret)
2015                 btrfs_abort_transaction(trans, root, ret);
2016 out:
2017         btrfs_free_path(path);
2018         return ret;
2019 }
2020
2021 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2022                                 struct btrfs_root *root,
2023                                 struct btrfs_delayed_ref_node *node,
2024                                 struct btrfs_delayed_extent_op *extent_op,
2025                                 int insert_reserved)
2026 {
2027         int ret = 0;
2028         struct btrfs_delayed_data_ref *ref;
2029         struct btrfs_key ins;
2030         u64 parent = 0;
2031         u64 ref_root = 0;
2032         u64 flags = 0;
2033
2034         ins.objectid = node->bytenr;
2035         ins.offset = node->num_bytes;
2036         ins.type = BTRFS_EXTENT_ITEM_KEY;
2037
2038         ref = btrfs_delayed_node_to_data_ref(node);
2039         trace_run_delayed_data_ref(node, ref, node->action);
2040
2041         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2042                 parent = ref->parent;
2043         else
2044                 ref_root = ref->root;
2045
2046         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2047                 if (extent_op)
2048                         flags |= extent_op->flags_to_set;
2049                 ret = alloc_reserved_file_extent(trans, root,
2050                                                  parent, ref_root, flags,
2051                                                  ref->objectid, ref->offset,
2052                                                  &ins, node->ref_mod);
2053         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2054                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2055                                              node->num_bytes, parent,
2056                                              ref_root, ref->objectid,
2057                                              ref->offset, node->ref_mod,
2058                                              extent_op);
2059         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2060                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2061                                           node->num_bytes, parent,
2062                                           ref_root, ref->objectid,
2063                                           ref->offset, node->ref_mod,
2064                                           extent_op);
2065         } else {
2066                 BUG();
2067         }
2068         return ret;
2069 }
2070
2071 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2072                                     struct extent_buffer *leaf,
2073                                     struct btrfs_extent_item *ei)
2074 {
2075         u64 flags = btrfs_extent_flags(leaf, ei);
2076         if (extent_op->update_flags) {
2077                 flags |= extent_op->flags_to_set;
2078                 btrfs_set_extent_flags(leaf, ei, flags);
2079         }
2080
2081         if (extent_op->update_key) {
2082                 struct btrfs_tree_block_info *bi;
2083                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2084                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2085                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2086         }
2087 }
2088
2089 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2090                                  struct btrfs_root *root,
2091                                  struct btrfs_delayed_ref_node *node,
2092                                  struct btrfs_delayed_extent_op *extent_op)
2093 {
2094         struct btrfs_key key;
2095         struct btrfs_path *path;
2096         struct btrfs_extent_item *ei;
2097         struct extent_buffer *leaf;
2098         u32 item_size;
2099         int ret;
2100         int err = 0;
2101         int metadata = !extent_op->is_data;
2102
2103         if (trans->aborted)
2104                 return 0;
2105
2106         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2107                 metadata = 0;
2108
2109         path = btrfs_alloc_path();
2110         if (!path)
2111                 return -ENOMEM;
2112
2113         key.objectid = node->bytenr;
2114
2115         if (metadata) {
2116                 key.type = BTRFS_METADATA_ITEM_KEY;
2117                 key.offset = extent_op->level;
2118         } else {
2119                 key.type = BTRFS_EXTENT_ITEM_KEY;
2120                 key.offset = node->num_bytes;
2121         }
2122
2123 again:
2124         path->reada = 1;
2125         path->leave_spinning = 1;
2126         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2127                                 path, 0, 1);
2128         if (ret < 0) {
2129                 err = ret;
2130                 goto out;
2131         }
2132         if (ret > 0) {
2133                 if (metadata) {
2134                         if (path->slots[0] > 0) {
2135                                 path->slots[0]--;
2136                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2137                                                       path->slots[0]);
2138                                 if (key.objectid == node->bytenr &&
2139                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2140                                     key.offset == node->num_bytes)
2141                                         ret = 0;
2142                         }
2143                         if (ret > 0) {
2144                                 btrfs_release_path(path);
2145                                 metadata = 0;
2146
2147                                 key.objectid = node->bytenr;
2148                                 key.offset = node->num_bytes;
2149                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2150                                 goto again;
2151                         }
2152                 } else {
2153                         err = -EIO;
2154                         goto out;
2155                 }
2156         }
2157
2158         leaf = path->nodes[0];
2159         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2160 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2161         if (item_size < sizeof(*ei)) {
2162                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2163                                              path, (u64)-1, 0);
2164                 if (ret < 0) {
2165                         err = ret;
2166                         goto out;
2167                 }
2168                 leaf = path->nodes[0];
2169                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2170         }
2171 #endif
2172         BUG_ON(item_size < sizeof(*ei));
2173         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2174         __run_delayed_extent_op(extent_op, leaf, ei);
2175
2176         btrfs_mark_buffer_dirty(leaf);
2177 out:
2178         btrfs_free_path(path);
2179         return err;
2180 }
2181
2182 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2183                                 struct btrfs_root *root,
2184                                 struct btrfs_delayed_ref_node *node,
2185                                 struct btrfs_delayed_extent_op *extent_op,
2186                                 int insert_reserved)
2187 {
2188         int ret = 0;
2189         struct btrfs_delayed_tree_ref *ref;
2190         struct btrfs_key ins;
2191         u64 parent = 0;
2192         u64 ref_root = 0;
2193         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2194                                                  SKINNY_METADATA);
2195
2196         ref = btrfs_delayed_node_to_tree_ref(node);
2197         trace_run_delayed_tree_ref(node, ref, node->action);
2198
2199         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2200                 parent = ref->parent;
2201         else
2202                 ref_root = ref->root;
2203
2204         ins.objectid = node->bytenr;
2205         if (skinny_metadata) {
2206                 ins.offset = ref->level;
2207                 ins.type = BTRFS_METADATA_ITEM_KEY;
2208         } else {
2209                 ins.offset = node->num_bytes;
2210                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2211         }
2212
2213         BUG_ON(node->ref_mod != 1);
2214         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2215                 BUG_ON(!extent_op || !extent_op->update_flags);
2216                 ret = alloc_reserved_tree_block(trans, root,
2217                                                 parent, ref_root,
2218                                                 extent_op->flags_to_set,
2219                                                 &extent_op->key,
2220                                                 ref->level, &ins);
2221         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2222                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2223                                              node->num_bytes, parent, ref_root,
2224                                              ref->level, 0, 1, extent_op);
2225         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2226                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2227                                           node->num_bytes, parent, ref_root,
2228                                           ref->level, 0, 1, extent_op);
2229         } else {
2230                 BUG();
2231         }
2232         return ret;
2233 }
2234
2235 /* helper function to actually process a single delayed ref entry */
2236 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2237                                struct btrfs_root *root,
2238                                struct btrfs_delayed_ref_node *node,
2239                                struct btrfs_delayed_extent_op *extent_op,
2240                                int insert_reserved)
2241 {
2242         int ret = 0;
2243
2244         if (trans->aborted) {
2245                 if (insert_reserved)
2246                         btrfs_pin_extent(root, node->bytenr,
2247                                          node->num_bytes, 1);
2248                 return 0;
2249         }
2250
2251         if (btrfs_delayed_ref_is_head(node)) {
2252                 struct btrfs_delayed_ref_head *head;
2253                 /*
2254                  * we've hit the end of the chain and we were supposed
2255                  * to insert this extent into the tree.  But, it got
2256                  * deleted before we ever needed to insert it, so all
2257                  * we have to do is clean up the accounting
2258                  */
2259                 BUG_ON(extent_op);
2260                 head = btrfs_delayed_node_to_head(node);
2261                 trace_run_delayed_ref_head(node, head, node->action);
2262
2263                 if (insert_reserved) {
2264                         btrfs_pin_extent(root, node->bytenr,
2265                                          node->num_bytes, 1);
2266                         if (head->is_data) {
2267                                 ret = btrfs_del_csums(trans, root,
2268                                                       node->bytenr,
2269                                                       node->num_bytes);
2270                         }
2271                 }
2272                 return ret;
2273         }
2274
2275         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2276             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2277                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2278                                            insert_reserved);
2279         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2280                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2281                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2282                                            insert_reserved);
2283         else
2284                 BUG();
2285         return ret;
2286 }
2287
2288 static noinline struct btrfs_delayed_ref_node *
2289 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2290 {
2291         struct rb_node *node;
2292         struct btrfs_delayed_ref_node *ref, *last = NULL;;
2293
2294         /*
2295          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2296          * this prevents ref count from going down to zero when
2297          * there still are pending delayed ref.
2298          */
2299         node = rb_first(&head->ref_root);
2300         while (node) {
2301                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2302                                 rb_node);
2303                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2304                         return ref;
2305                 else if (last == NULL)
2306                         last = ref;
2307                 node = rb_next(node);
2308         }
2309         return last;
2310 }
2311
2312 /*
2313  * Returns 0 on success or if called with an already aborted transaction.
2314  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2315  */
2316 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2317                                              struct btrfs_root *root,
2318                                              unsigned long nr)
2319 {
2320         struct btrfs_delayed_ref_root *delayed_refs;
2321         struct btrfs_delayed_ref_node *ref;
2322         struct btrfs_delayed_ref_head *locked_ref = NULL;
2323         struct btrfs_delayed_extent_op *extent_op;
2324         struct btrfs_fs_info *fs_info = root->fs_info;
2325         ktime_t start = ktime_get();
2326         int ret;
2327         unsigned long count = 0;
2328         unsigned long actual_count = 0;
2329         int must_insert_reserved = 0;
2330
2331         delayed_refs = &trans->transaction->delayed_refs;
2332         while (1) {
2333                 if (!locked_ref) {
2334                         if (count >= nr)
2335                                 break;
2336
2337                         spin_lock(&delayed_refs->lock);
2338                         locked_ref = btrfs_select_ref_head(trans);
2339                         if (!locked_ref) {
2340                                 spin_unlock(&delayed_refs->lock);
2341                                 break;
2342                         }
2343
2344                         /* grab the lock that says we are going to process
2345                          * all the refs for this head */
2346                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2347                         spin_unlock(&delayed_refs->lock);
2348                         /*
2349                          * we may have dropped the spin lock to get the head
2350                          * mutex lock, and that might have given someone else
2351                          * time to free the head.  If that's true, it has been
2352                          * removed from our list and we can move on.
2353                          */
2354                         if (ret == -EAGAIN) {
2355                                 locked_ref = NULL;
2356                                 count++;
2357                                 continue;
2358                         }
2359                 }
2360
2361                 /*
2362                  * We need to try and merge add/drops of the same ref since we
2363                  * can run into issues with relocate dropping the implicit ref
2364                  * and then it being added back again before the drop can
2365                  * finish.  If we merged anything we need to re-loop so we can
2366                  * get a good ref.
2367                  */
2368                 spin_lock(&locked_ref->lock);
2369                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2370                                          locked_ref);
2371
2372                 /*
2373                  * locked_ref is the head node, so we have to go one
2374                  * node back for any delayed ref updates
2375                  */
2376                 ref = select_delayed_ref(locked_ref);
2377
2378                 if (ref && ref->seq &&
2379                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2380                         spin_unlock(&locked_ref->lock);
2381                         btrfs_delayed_ref_unlock(locked_ref);
2382                         spin_lock(&delayed_refs->lock);
2383                         locked_ref->processing = 0;
2384                         delayed_refs->num_heads_ready++;
2385                         spin_unlock(&delayed_refs->lock);
2386                         locked_ref = NULL;
2387                         cond_resched();
2388                         count++;
2389                         continue;
2390                 }
2391
2392                 /*
2393                  * record the must insert reserved flag before we
2394                  * drop the spin lock.
2395                  */
2396                 must_insert_reserved = locked_ref->must_insert_reserved;
2397                 locked_ref->must_insert_reserved = 0;
2398
2399                 extent_op = locked_ref->extent_op;
2400                 locked_ref->extent_op = NULL;
2401
2402                 if (!ref) {
2403
2404
2405                         /* All delayed refs have been processed, Go ahead
2406                          * and send the head node to run_one_delayed_ref,
2407                          * so that any accounting fixes can happen
2408                          */
2409                         ref = &locked_ref->node;
2410
2411                         if (extent_op && must_insert_reserved) {
2412                                 btrfs_free_delayed_extent_op(extent_op);
2413                                 extent_op = NULL;
2414                         }
2415
2416                         if (extent_op) {
2417                                 spin_unlock(&locked_ref->lock);
2418                                 ret = run_delayed_extent_op(trans, root,
2419                                                             ref, extent_op);
2420                                 btrfs_free_delayed_extent_op(extent_op);
2421
2422                                 if (ret) {
2423                                         /*
2424                                          * Need to reset must_insert_reserved if
2425                                          * there was an error so the abort stuff
2426                                          * can cleanup the reserved space
2427                                          * properly.
2428                                          */
2429                                         if (must_insert_reserved)
2430                                                 locked_ref->must_insert_reserved = 1;
2431                                         locked_ref->processing = 0;
2432                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2433                                         btrfs_delayed_ref_unlock(locked_ref);
2434                                         return ret;
2435                                 }
2436                                 continue;
2437                         }
2438
2439                         /*
2440                          * Need to drop our head ref lock and re-aqcuire the
2441                          * delayed ref lock and then re-check to make sure
2442                          * nobody got added.
2443                          */
2444                         spin_unlock(&locked_ref->lock);
2445                         spin_lock(&delayed_refs->lock);
2446                         spin_lock(&locked_ref->lock);
2447                         if (rb_first(&locked_ref->ref_root)) {
2448                                 spin_unlock(&locked_ref->lock);
2449                                 spin_unlock(&delayed_refs->lock);
2450                                 continue;
2451                         }
2452                         ref->in_tree = 0;
2453                         delayed_refs->num_heads--;
2454                         rb_erase(&locked_ref->href_node,
2455                                  &delayed_refs->href_root);
2456                         spin_unlock(&delayed_refs->lock);
2457                 } else {
2458                         actual_count++;
2459                         ref->in_tree = 0;
2460                         rb_erase(&ref->rb_node, &locked_ref->ref_root);
2461                 }
2462                 atomic_dec(&delayed_refs->num_entries);
2463
2464                 if (!btrfs_delayed_ref_is_head(ref)) {
2465                         /*
2466                          * when we play the delayed ref, also correct the
2467                          * ref_mod on head
2468                          */
2469                         switch (ref->action) {
2470                         case BTRFS_ADD_DELAYED_REF:
2471                         case BTRFS_ADD_DELAYED_EXTENT:
2472                                 locked_ref->node.ref_mod -= ref->ref_mod;
2473                                 break;
2474                         case BTRFS_DROP_DELAYED_REF:
2475                                 locked_ref->node.ref_mod += ref->ref_mod;
2476                                 break;
2477                         default:
2478                                 WARN_ON(1);
2479                         }
2480                 }
2481                 spin_unlock(&locked_ref->lock);
2482
2483                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2484                                           must_insert_reserved);
2485
2486                 btrfs_free_delayed_extent_op(extent_op);
2487                 if (ret) {
2488                         locked_ref->processing = 0;
2489                         btrfs_delayed_ref_unlock(locked_ref);
2490                         btrfs_put_delayed_ref(ref);
2491                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2492                         return ret;
2493                 }
2494
2495                 /*
2496                  * If this node is a head, that means all the refs in this head
2497                  * have been dealt with, and we will pick the next head to deal
2498                  * with, so we must unlock the head and drop it from the cluster
2499                  * list before we release it.
2500                  */
2501                 if (btrfs_delayed_ref_is_head(ref)) {
2502                         btrfs_delayed_ref_unlock(locked_ref);
2503                         locked_ref = NULL;
2504                 }
2505                 btrfs_put_delayed_ref(ref);
2506                 count++;
2507                 cond_resched();
2508         }
2509
2510         /*
2511          * We don't want to include ref heads since we can have empty ref heads
2512          * and those will drastically skew our runtime down since we just do
2513          * accounting, no actual extent tree updates.
2514          */
2515         if (actual_count > 0) {
2516                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2517                 u64 avg;
2518
2519                 /*
2520                  * We weigh the current average higher than our current runtime
2521                  * to avoid large swings in the average.
2522                  */
2523                 spin_lock(&delayed_refs->lock);
2524                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2525                 avg = div64_u64(avg, 4);
2526                 fs_info->avg_delayed_ref_runtime = avg;
2527                 spin_unlock(&delayed_refs->lock);
2528         }
2529         return 0;
2530 }
2531
2532 #ifdef SCRAMBLE_DELAYED_REFS
2533 /*
2534  * Normally delayed refs get processed in ascending bytenr order. This
2535  * correlates in most cases to the order added. To expose dependencies on this
2536  * order, we start to process the tree in the middle instead of the beginning
2537  */
2538 static u64 find_middle(struct rb_root *root)
2539 {
2540         struct rb_node *n = root->rb_node;
2541         struct btrfs_delayed_ref_node *entry;
2542         int alt = 1;
2543         u64 middle;
2544         u64 first = 0, last = 0;
2545
2546         n = rb_first(root);
2547         if (n) {
2548                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2549                 first = entry->bytenr;
2550         }
2551         n = rb_last(root);
2552         if (n) {
2553                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2554                 last = entry->bytenr;
2555         }
2556         n = root->rb_node;
2557
2558         while (n) {
2559                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2560                 WARN_ON(!entry->in_tree);
2561
2562                 middle = entry->bytenr;
2563
2564                 if (alt)
2565                         n = n->rb_left;
2566                 else
2567                         n = n->rb_right;
2568
2569                 alt = 1 - alt;
2570         }
2571         return middle;
2572 }
2573 #endif
2574
2575 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2576                                          struct btrfs_fs_info *fs_info)
2577 {
2578         struct qgroup_update *qgroup_update;
2579         int ret = 0;
2580
2581         if (list_empty(&trans->qgroup_ref_list) !=
2582             !trans->delayed_ref_elem.seq) {
2583                 /* list without seq or seq without list */
2584                 btrfs_err(fs_info,
2585                         "qgroup accounting update error, list is%s empty, seq is %#x.%x",
2586                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2587                         (u32)(trans->delayed_ref_elem.seq >> 32),
2588                         (u32)trans->delayed_ref_elem.seq);
2589                 BUG();
2590         }
2591
2592         if (!trans->delayed_ref_elem.seq)
2593                 return 0;
2594
2595         while (!list_empty(&trans->qgroup_ref_list)) {
2596                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2597                                                  struct qgroup_update, list);
2598                 list_del(&qgroup_update->list);
2599                 if (!ret)
2600                         ret = btrfs_qgroup_account_ref(
2601                                         trans, fs_info, qgroup_update->node,
2602                                         qgroup_update->extent_op);
2603                 kfree(qgroup_update);
2604         }
2605
2606         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2607
2608         return ret;
2609 }
2610
2611 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2612 {
2613         u64 num_bytes;
2614
2615         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2616                              sizeof(struct btrfs_extent_inline_ref));
2617         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2618                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2619
2620         /*
2621          * We don't ever fill up leaves all the way so multiply by 2 just to be
2622          * closer to what we're really going to want to ouse.
2623          */
2624         return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2625 }
2626
2627 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2628                                        struct btrfs_root *root)
2629 {
2630         struct btrfs_block_rsv *global_rsv;
2631         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2632         u64 num_bytes;
2633         int ret = 0;
2634
2635         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2636         num_heads = heads_to_leaves(root, num_heads);
2637         if (num_heads > 1)
2638                 num_bytes += (num_heads - 1) * root->leafsize;
2639         num_bytes <<= 1;
2640         global_rsv = &root->fs_info->global_block_rsv;
2641
2642         /*
2643          * If we can't allocate any more chunks lets make sure we have _lots_ of
2644          * wiggle room since running delayed refs can create more delayed refs.
2645          */
2646         if (global_rsv->space_info->full)
2647                 num_bytes <<= 1;
2648
2649         spin_lock(&global_rsv->lock);
2650         if (global_rsv->reserved <= num_bytes)
2651                 ret = 1;
2652         spin_unlock(&global_rsv->lock);
2653         return ret;
2654 }
2655
2656 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2657                                        struct btrfs_root *root)
2658 {
2659         struct btrfs_fs_info *fs_info = root->fs_info;
2660         u64 num_entries =
2661                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2662         u64 avg_runtime;
2663
2664         smp_mb();
2665         avg_runtime = fs_info->avg_delayed_ref_runtime;
2666         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2667                 return 1;
2668
2669         return btrfs_check_space_for_delayed_refs(trans, root);
2670 }
2671
2672 /*
2673  * this starts processing the delayed reference count updates and
2674  * extent insertions we have queued up so far.  count can be
2675  * 0, which means to process everything in the tree at the start
2676  * of the run (but not newly added entries), or it can be some target
2677  * number you'd like to process.
2678  *
2679  * Returns 0 on success or if called with an aborted transaction
2680  * Returns <0 on error and aborts the transaction
2681  */
2682 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2683                            struct btrfs_root *root, unsigned long count)
2684 {
2685         struct rb_node *node;
2686         struct btrfs_delayed_ref_root *delayed_refs;
2687         struct btrfs_delayed_ref_head *head;
2688         int ret;
2689         int run_all = count == (unsigned long)-1;
2690         int run_most = 0;
2691
2692         /* We'll clean this up in btrfs_cleanup_transaction */
2693         if (trans->aborted)
2694                 return 0;
2695
2696         if (root == root->fs_info->extent_root)
2697                 root = root->fs_info->tree_root;
2698
2699         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2700
2701         delayed_refs = &trans->transaction->delayed_refs;
2702         if (count == 0) {
2703                 count = atomic_read(&delayed_refs->num_entries) * 2;
2704                 run_most = 1;
2705         }
2706
2707 again:
2708 #ifdef SCRAMBLE_DELAYED_REFS
2709         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2710 #endif
2711         ret = __btrfs_run_delayed_refs(trans, root, count);
2712         if (ret < 0) {
2713                 btrfs_abort_transaction(trans, root, ret);
2714                 return ret;
2715         }
2716
2717         if (run_all) {
2718                 if (!list_empty(&trans->new_bgs))
2719                         btrfs_create_pending_block_groups(trans, root);
2720
2721                 spin_lock(&delayed_refs->lock);
2722                 node = rb_first(&delayed_refs->href_root);
2723                 if (!node) {
2724                         spin_unlock(&delayed_refs->lock);
2725                         goto out;
2726                 }
2727                 count = (unsigned long)-1;
2728
2729                 while (node) {
2730                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2731                                         href_node);
2732                         if (btrfs_delayed_ref_is_head(&head->node)) {
2733                                 struct btrfs_delayed_ref_node *ref;
2734
2735                                 ref = &head->node;
2736                                 atomic_inc(&ref->refs);
2737
2738                                 spin_unlock(&delayed_refs->lock);
2739                                 /*
2740                                  * Mutex was contended, block until it's
2741                                  * released and try again
2742                                  */
2743                                 mutex_lock(&head->mutex);
2744                                 mutex_unlock(&head->mutex);
2745
2746                                 btrfs_put_delayed_ref(ref);
2747                                 cond_resched();
2748                                 goto again;
2749                         } else {
2750                                 WARN_ON(1);
2751                         }
2752                         node = rb_next(node);
2753                 }
2754                 spin_unlock(&delayed_refs->lock);
2755                 cond_resched();
2756                 goto again;
2757         }
2758 out:
2759         assert_qgroups_uptodate(trans);
2760         return 0;
2761 }
2762
2763 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2764                                 struct btrfs_root *root,
2765                                 u64 bytenr, u64 num_bytes, u64 flags,
2766                                 int level, int is_data)
2767 {
2768         struct btrfs_delayed_extent_op *extent_op;
2769         int ret;
2770
2771         extent_op = btrfs_alloc_delayed_extent_op();
2772         if (!extent_op)
2773                 return -ENOMEM;
2774
2775         extent_op->flags_to_set = flags;
2776         extent_op->update_flags = 1;
2777         extent_op->update_key = 0;
2778         extent_op->is_data = is_data ? 1 : 0;
2779         extent_op->level = level;
2780
2781         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2782                                           num_bytes, extent_op);
2783         if (ret)
2784                 btrfs_free_delayed_extent_op(extent_op);
2785         return ret;
2786 }
2787
2788 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2789                                       struct btrfs_root *root,
2790                                       struct btrfs_path *path,
2791                                       u64 objectid, u64 offset, u64 bytenr)
2792 {
2793         struct btrfs_delayed_ref_head *head;
2794         struct btrfs_delayed_ref_node *ref;
2795         struct btrfs_delayed_data_ref *data_ref;
2796         struct btrfs_delayed_ref_root *delayed_refs;
2797         struct rb_node *node;
2798         int ret = 0;
2799
2800         delayed_refs = &trans->transaction->delayed_refs;
2801         spin_lock(&delayed_refs->lock);
2802         head = btrfs_find_delayed_ref_head(trans, bytenr);
2803         if (!head) {
2804                 spin_unlock(&delayed_refs->lock);
2805                 return 0;
2806         }
2807
2808         if (!mutex_trylock(&head->mutex)) {
2809                 atomic_inc(&head->node.refs);
2810                 spin_unlock(&delayed_refs->lock);
2811
2812                 btrfs_release_path(path);
2813
2814                 /*
2815                  * Mutex was contended, block until it's released and let
2816                  * caller try again
2817                  */
2818                 mutex_lock(&head->mutex);
2819                 mutex_unlock(&head->mutex);
2820                 btrfs_put_delayed_ref(&head->node);
2821                 return -EAGAIN;
2822         }
2823         spin_unlock(&delayed_refs->lock);
2824
2825         spin_lock(&head->lock);
2826         node = rb_first(&head->ref_root);
2827         while (node) {
2828                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2829                 node = rb_next(node);
2830
2831                 /* If it's a shared ref we know a cross reference exists */
2832                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2833                         ret = 1;
2834                         break;
2835                 }
2836
2837                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2838
2839                 /*
2840                  * If our ref doesn't match the one we're currently looking at
2841                  * then we have a cross reference.
2842                  */
2843                 if (data_ref->root != root->root_key.objectid ||
2844                     data_ref->objectid != objectid ||
2845                     data_ref->offset != offset) {
2846                         ret = 1;
2847                         break;
2848                 }
2849         }
2850         spin_unlock(&head->lock);
2851         mutex_unlock(&head->mutex);
2852         return ret;
2853 }
2854
2855 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2856                                         struct btrfs_root *root,
2857                                         struct btrfs_path *path,
2858                                         u64 objectid, u64 offset, u64 bytenr)
2859 {
2860         struct btrfs_root *extent_root = root->fs_info->extent_root;
2861         struct extent_buffer *leaf;
2862         struct btrfs_extent_data_ref *ref;
2863         struct btrfs_extent_inline_ref *iref;
2864         struct btrfs_extent_item *ei;
2865         struct btrfs_key key;
2866         u32 item_size;
2867         int ret;
2868
2869         key.objectid = bytenr;
2870         key.offset = (u64)-1;
2871         key.type = BTRFS_EXTENT_ITEM_KEY;
2872
2873         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2874         if (ret < 0)
2875                 goto out;
2876         BUG_ON(ret == 0); /* Corruption */
2877
2878         ret = -ENOENT;
2879         if (path->slots[0] == 0)
2880                 goto out;
2881
2882         path->slots[0]--;
2883         leaf = path->nodes[0];
2884         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2885
2886         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2887                 goto out;
2888
2889         ret = 1;
2890         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2891 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2892         if (item_size < sizeof(*ei)) {
2893                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2894                 goto out;
2895         }
2896 #endif
2897         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2898
2899         if (item_size != sizeof(*ei) +
2900             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2901                 goto out;
2902
2903         if (btrfs_extent_generation(leaf, ei) <=
2904             btrfs_root_last_snapshot(&root->root_item))
2905                 goto out;
2906
2907         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2908         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2909             BTRFS_EXTENT_DATA_REF_KEY)
2910                 goto out;
2911
2912         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2913         if (btrfs_extent_refs(leaf, ei) !=
2914             btrfs_extent_data_ref_count(leaf, ref) ||
2915             btrfs_extent_data_ref_root(leaf, ref) !=
2916             root->root_key.objectid ||
2917             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2918             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2919                 goto out;
2920
2921         ret = 0;
2922 out:
2923         return ret;
2924 }
2925
2926 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2927                           struct btrfs_root *root,
2928                           u64 objectid, u64 offset, u64 bytenr)
2929 {
2930         struct btrfs_path *path;
2931         int ret;
2932         int ret2;
2933
2934         path = btrfs_alloc_path();
2935         if (!path)
2936                 return -ENOENT;
2937
2938         do {
2939                 ret = check_committed_ref(trans, root, path, objectid,
2940                                           offset, bytenr);
2941                 if (ret && ret != -ENOENT)
2942                         goto out;
2943
2944                 ret2 = check_delayed_ref(trans, root, path, objectid,
2945                                          offset, bytenr);
2946         } while (ret2 == -EAGAIN);
2947
2948         if (ret2 && ret2 != -ENOENT) {
2949                 ret = ret2;
2950                 goto out;
2951         }
2952
2953         if (ret != -ENOENT || ret2 != -ENOENT)
2954                 ret = 0;
2955 out:
2956         btrfs_free_path(path);
2957         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2958                 WARN_ON(ret > 0);
2959         return ret;
2960 }
2961
2962 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2963                            struct btrfs_root *root,
2964                            struct extent_buffer *buf,
2965                            int full_backref, int inc, int for_cow)
2966 {
2967         u64 bytenr;
2968         u64 num_bytes;
2969         u64 parent;
2970         u64 ref_root;
2971         u32 nritems;
2972         struct btrfs_key key;
2973         struct btrfs_file_extent_item *fi;
2974         int i;
2975         int level;
2976         int ret = 0;
2977         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2978                             u64, u64, u64, u64, u64, u64, int);
2979
2980         ref_root = btrfs_header_owner(buf);
2981         nritems = btrfs_header_nritems(buf);
2982         level = btrfs_header_level(buf);
2983
2984         if (!root->ref_cows && level == 0)
2985                 return 0;
2986
2987         if (inc)
2988                 process_func = btrfs_inc_extent_ref;
2989         else
2990                 process_func = btrfs_free_extent;
2991
2992         if (full_backref)
2993                 parent = buf->start;
2994         else
2995                 parent = 0;
2996
2997         for (i = 0; i < nritems; i++) {
2998                 if (level == 0) {
2999                         btrfs_item_key_to_cpu(buf, &key, i);
3000                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3001                                 continue;
3002                         fi = btrfs_item_ptr(buf, i,
3003                                             struct btrfs_file_extent_item);
3004                         if (btrfs_file_extent_type(buf, fi) ==
3005                             BTRFS_FILE_EXTENT_INLINE)
3006                                 continue;
3007                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3008                         if (bytenr == 0)
3009                                 continue;
3010
3011                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3012                         key.offset -= btrfs_file_extent_offset(buf, fi);
3013                         ret = process_func(trans, root, bytenr, num_bytes,
3014                                            parent, ref_root, key.objectid,
3015                                            key.offset, for_cow);
3016                         if (ret)
3017                                 goto fail;
3018                 } else {
3019                         bytenr = btrfs_node_blockptr(buf, i);
3020                         num_bytes = btrfs_level_size(root, level - 1);
3021                         ret = process_func(trans, root, bytenr, num_bytes,
3022                                            parent, ref_root, level - 1, 0,
3023                                            for_cow);
3024                         if (ret)
3025                                 goto fail;
3026                 }
3027         }
3028         return 0;
3029 fail:
3030         return ret;
3031 }
3032
3033 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3034                   struct extent_buffer *buf, int full_backref, int for_cow)
3035 {
3036         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
3037 }
3038
3039 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3040                   struct extent_buffer *buf, int full_backref, int for_cow)
3041 {
3042         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
3043 }
3044
3045 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3046                                  struct btrfs_root *root,
3047                                  struct btrfs_path *path,
3048                                  struct btrfs_block_group_cache *cache)
3049 {
3050         int ret;
3051         struct btrfs_root *extent_root = root->fs_info->extent_root;
3052         unsigned long bi;
3053         struct extent_buffer *leaf;
3054
3055         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3056         if (ret < 0)
3057                 goto fail;
3058         BUG_ON(ret); /* Corruption */
3059
3060         leaf = path->nodes[0];
3061         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3062         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3063         btrfs_mark_buffer_dirty(leaf);
3064         btrfs_release_path(path);
3065 fail:
3066         if (ret) {
3067                 btrfs_abort_transaction(trans, root, ret);
3068                 return ret;
3069         }
3070         return 0;
3071
3072 }
3073
3074 static struct btrfs_block_group_cache *
3075 next_block_group(struct btrfs_root *root,
3076                  struct btrfs_block_group_cache *cache)
3077 {
3078         struct rb_node *node;
3079         spin_lock(&root->fs_info->block_group_cache_lock);
3080         node = rb_next(&cache->cache_node);
3081         btrfs_put_block_group(cache);
3082         if (node) {
3083                 cache = rb_entry(node, struct btrfs_block_group_cache,
3084                                  cache_node);
3085                 btrfs_get_block_group(cache);
3086         } else
3087                 cache = NULL;
3088         spin_unlock(&root->fs_info->block_group_cache_lock);
3089         return cache;
3090 }
3091
3092 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3093                             struct btrfs_trans_handle *trans,
3094                             struct btrfs_path *path)
3095 {
3096         struct btrfs_root *root = block_group->fs_info->tree_root;
3097         struct inode *inode = NULL;
3098         u64 alloc_hint = 0;
3099         int dcs = BTRFS_DC_ERROR;
3100         int num_pages = 0;
3101         int retries = 0;
3102         int ret = 0;
3103
3104         /*
3105          * If this block group is smaller than 100 megs don't bother caching the
3106          * block group.
3107          */
3108         if (block_group->key.offset < (100 * 1024 * 1024)) {
3109                 spin_lock(&block_group->lock);
3110                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3111                 spin_unlock(&block_group->lock);
3112                 return 0;
3113         }
3114
3115 again:
3116         inode = lookup_free_space_inode(root, block_group, path);
3117         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3118                 ret = PTR_ERR(inode);
3119                 btrfs_release_path(path);
3120                 goto out;
3121         }
3122
3123         if (IS_ERR(inode)) {
3124                 BUG_ON(retries);
3125                 retries++;
3126
3127                 if (block_group->ro)
3128                         goto out_free;
3129
3130                 ret = create_free_space_inode(root, trans, block_group, path);
3131                 if (ret)
3132                         goto out_free;
3133                 goto again;
3134         }
3135
3136         /* We've already setup this transaction, go ahead and exit */
3137         if (block_group->cache_generation == trans->transid &&
3138             i_size_read(inode)) {
3139                 dcs = BTRFS_DC_SETUP;
3140                 goto out_put;
3141         }
3142
3143         /*
3144          * We want to set the generation to 0, that way if anything goes wrong
3145          * from here on out we know not to trust this cache when we load up next
3146          * time.
3147          */
3148         BTRFS_I(inode)->generation = 0;
3149         ret = btrfs_update_inode(trans, root, inode);
3150         WARN_ON(ret);
3151
3152         if (i_size_read(inode) > 0) {
3153                 ret = btrfs_check_trunc_cache_free_space(root,
3154                                         &root->fs_info->global_block_rsv);
3155                 if (ret)
3156                         goto out_put;
3157
3158                 ret = btrfs_truncate_free_space_cache(root, trans, inode);
3159                 if (ret)
3160                         goto out_put;
3161         }
3162
3163         spin_lock(&block_group->lock);
3164         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3165             !btrfs_test_opt(root, SPACE_CACHE)) {
3166                 /*
3167                  * don't bother trying to write stuff out _if_
3168                  * a) we're not cached,
3169                  * b) we're with nospace_cache mount option.
3170                  */
3171                 dcs = BTRFS_DC_WRITTEN;
3172                 spin_unlock(&block_group->lock);
3173                 goto out_put;
3174         }
3175         spin_unlock(&block_group->lock);
3176
3177         /*
3178          * Try to preallocate enough space based on how big the block group is.
3179          * Keep in mind this has to include any pinned space which could end up
3180          * taking up quite a bit since it's not folded into the other space
3181          * cache.
3182          */
3183         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3184         if (!num_pages)
3185                 num_pages = 1;
3186
3187         num_pages *= 16;
3188         num_pages *= PAGE_CACHE_SIZE;
3189
3190         ret = btrfs_check_data_free_space(inode, num_pages);
3191         if (ret)
3192                 goto out_put;
3193
3194         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3195                                               num_pages, num_pages,
3196                                               &alloc_hint);
3197         if (!ret)
3198                 dcs = BTRFS_DC_SETUP;
3199         btrfs_free_reserved_data_space(inode, num_pages);
3200
3201 out_put:
3202         iput(inode);
3203 out_free:
3204         btrfs_release_path(path);
3205 out:
3206         spin_lock(&block_group->lock);
3207         if (!ret && dcs == BTRFS_DC_SETUP)
3208                 block_group->cache_generation = trans->transid;
3209         block_group->disk_cache_state = dcs;
3210         spin_unlock(&block_group->lock);
3211
3212         return ret;
3213 }
3214
3215 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3216                                    struct btrfs_root *root)
3217 {
3218         struct btrfs_block_group_cache *cache;
3219         int err = 0;
3220         struct btrfs_path *path;
3221         u64 last = 0;
3222
3223         path = btrfs_alloc_path();
3224         if (!path)
3225                 return -ENOMEM;
3226
3227 again:
3228         while (1) {
3229                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3230                 while (cache) {
3231                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3232                                 break;
3233                         cache = next_block_group(root, cache);
3234                 }
3235                 if (!cache) {
3236                         if (last == 0)
3237                                 break;
3238                         last = 0;
3239                         continue;
3240                 }
3241                 err = cache_save_setup(cache, trans, path);
3242                 last = cache->key.objectid + cache->key.offset;
3243                 btrfs_put_block_group(cache);
3244         }
3245
3246         while (1) {
3247                 if (last == 0) {
3248                         err = btrfs_run_delayed_refs(trans, root,
3249                                                      (unsigned long)-1);
3250                         if (err) /* File system offline */
3251                                 goto out;
3252                 }
3253
3254                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3255                 while (cache) {
3256                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3257                                 btrfs_put_block_group(cache);
3258                                 goto again;
3259                         }
3260
3261                         if (cache->dirty)
3262                                 break;
3263                         cache = next_block_group(root, cache);
3264                 }
3265                 if (!cache) {
3266                         if (last == 0)
3267                                 break;
3268                         last = 0;
3269                         continue;
3270                 }
3271
3272                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3273                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3274                 cache->dirty = 0;
3275                 last = cache->key.objectid + cache->key.offset;
3276
3277                 err = write_one_cache_group(trans, root, path, cache);
3278                 btrfs_put_block_group(cache);
3279                 if (err) /* File system offline */
3280                         goto out;
3281         }
3282
3283         while (1) {
3284                 /*
3285                  * I don't think this is needed since we're just marking our
3286                  * preallocated extent as written, but just in case it can't
3287                  * hurt.
3288                  */
3289                 if (last == 0) {
3290                         err = btrfs_run_delayed_refs(trans, root,
3291                                                      (unsigned long)-1);
3292                         if (err) /* File system offline */
3293                                 goto out;
3294                 }
3295
3296                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3297                 while (cache) {
3298                         /*
3299                          * Really this shouldn't happen, but it could if we
3300                          * couldn't write the entire preallocated extent and
3301                          * splitting the extent resulted in a new block.
3302                          */
3303                         if (cache->dirty) {
3304                                 btrfs_put_block_group(cache);
3305                                 goto again;
3306                         }
3307                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3308                                 break;
3309                         cache = next_block_group(root, cache);
3310                 }
3311                 if (!cache) {
3312                         if (last == 0)
3313                                 break;
3314                         last = 0;
3315                         continue;
3316                 }
3317
3318                 err = btrfs_write_out_cache(root, trans, cache, path);
3319
3320                 /*
3321                  * If we didn't have an error then the cache state is still
3322                  * NEED_WRITE, so we can set it to WRITTEN.
3323                  */
3324                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3325                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3326                 last = cache->key.objectid + cache->key.offset;
3327                 btrfs_put_block_group(cache);
3328         }
3329 out:
3330
3331         btrfs_free_path(path);
3332         return err;
3333 }
3334
3335 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3336 {
3337         struct btrfs_block_group_cache *block_group;
3338         int readonly = 0;
3339
3340         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3341         if (!block_group || block_group->ro)
3342                 readonly = 1;
3343         if (block_group)
3344                 btrfs_put_block_group(block_group);
3345         return readonly;
3346 }
3347
3348 static const char *alloc_name(u64 flags)
3349 {
3350         switch (flags) {
3351         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3352                 return "mixed";
3353         case BTRFS_BLOCK_GROUP_METADATA:
3354                 return "metadata";
3355         case BTRFS_BLOCK_GROUP_DATA:
3356                 return "data";
3357         case BTRFS_BLOCK_GROUP_SYSTEM:
3358                 return "system";
3359         default:
3360                 WARN_ON(1);
3361                 return "invalid-combination";
3362         };
3363 }
3364
3365 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3366                              u64 total_bytes, u64 bytes_used,
3367                              struct btrfs_space_info **space_info)
3368 {
3369         struct btrfs_space_info *found;
3370         int i;
3371         int factor;
3372         int ret;
3373
3374         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3375                      BTRFS_BLOCK_GROUP_RAID10))
3376                 factor = 2;
3377         else
3378                 factor = 1;
3379
3380         found = __find_space_info(info, flags);
3381         if (found) {
3382                 spin_lock(&found->lock);
3383                 found->total_bytes += total_bytes;
3384                 found->disk_total += total_bytes * factor;
3385                 found->bytes_used += bytes_used;
3386                 found->disk_used += bytes_used * factor;
3387                 found->full = 0;
3388                 spin_unlock(&found->lock);
3389                 *space_info = found;
3390                 return 0;
3391         }
3392         found = kzalloc(sizeof(*found), GFP_NOFS);
3393         if (!found)
3394                 return -ENOMEM;
3395
3396         ret = percpu_counter_init(&found->total_bytes_pinned, 0);
3397         if (ret) {
3398                 kfree(found);
3399                 return ret;
3400         }
3401
3402         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
3403                 INIT_LIST_HEAD(&found->block_groups[i]);
3404                 kobject_init(&found->block_group_kobjs[i], &btrfs_raid_ktype);
3405         }
3406         init_rwsem(&found->groups_sem);
3407         spin_lock_init(&found->lock);
3408         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3409         found->total_bytes = total_bytes;
3410         found->disk_total = total_bytes * factor;
3411         found->bytes_used = bytes_used;
3412         found->disk_used = bytes_used * factor;
3413         found->bytes_pinned = 0;
3414         found->bytes_reserved = 0;
3415         found->bytes_readonly = 0;
3416         found->bytes_may_use = 0;
3417         found->full = 0;
3418         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3419         found->chunk_alloc = 0;
3420         found->flush = 0;
3421         init_waitqueue_head(&found->wait);
3422
3423         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3424                                     info->space_info_kobj, "%s",
3425                                     alloc_name(found->flags));
3426         if (ret) {
3427                 kfree(found);
3428                 return ret;
3429         }
3430
3431         *space_info = found;
3432         list_add_rcu(&found->list, &info->space_info);
3433         if (flags & BTRFS_BLOCK_GROUP_DATA)
3434                 info->data_sinfo = found;
3435
3436         return ret;
3437 }
3438
3439 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3440 {
3441         u64 extra_flags = chunk_to_extended(flags) &
3442                                 BTRFS_EXTENDED_PROFILE_MASK;
3443
3444         write_seqlock(&fs_info->profiles_lock);
3445         if (flags & BTRFS_BLOCK_GROUP_DATA)
3446                 fs_info->avail_data_alloc_bits |= extra_flags;
3447         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3448                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3449         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3450                 fs_info->avail_system_alloc_bits |= extra_flags;
3451         write_sequnlock(&fs_info->profiles_lock);
3452 }
3453
3454 /*
3455  * returns target flags in extended format or 0 if restripe for this
3456  * chunk_type is not in progress
3457  *
3458  * should be called with either volume_mutex or balance_lock held
3459  */
3460 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3461 {
3462         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3463         u64 target = 0;
3464
3465         if (!bctl)
3466                 return 0;
3467
3468         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3469             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3470                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3471         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3472                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3473                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3474         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3475                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3476                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3477         }
3478
3479         return target;
3480 }
3481
3482 /*
3483  * @flags: available profiles in extended format (see ctree.h)
3484  *
3485  * Returns reduced profile in chunk format.  If profile changing is in
3486  * progress (either running or paused) picks the target profile (if it's
3487  * already available), otherwise falls back to plain reducing.
3488  */
3489 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3490 {
3491         /*
3492          * we add in the count of missing devices because we want
3493          * to make sure that any RAID levels on a degraded FS
3494          * continue to be honored.
3495          */
3496         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3497                 root->fs_info->fs_devices->missing_devices;
3498         u64 target;
3499         u64 tmp;
3500
3501         /*
3502          * see if restripe for this chunk_type is in progress, if so
3503          * try to reduce to the target profile
3504          */
3505         spin_lock(&root->fs_info->balance_lock);
3506         target = get_restripe_target(root->fs_info, flags);
3507         if (target) {
3508                 /* pick target profile only if it's already available */
3509                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3510                         spin_unlock(&root->fs_info->balance_lock);
3511                         return extended_to_chunk(target);
3512                 }
3513         }
3514         spin_unlock(&root->fs_info->balance_lock);
3515
3516         /* First, mask out the RAID levels which aren't possible */
3517         if (num_devices == 1)
3518                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3519                            BTRFS_BLOCK_GROUP_RAID5);
3520         if (num_devices < 3)
3521                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3522         if (num_devices < 4)
3523                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3524
3525         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3526                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3527                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3528         flags &= ~tmp;
3529
3530         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3531                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3532         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3533                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3534         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3535                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3536         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3537                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3538         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3539                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3540
3541         return extended_to_chunk(flags | tmp);
3542 }
3543
3544 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3545 {
3546         unsigned seq;
3547
3548         do {
3549                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3550
3551                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3552                         flags |= root->fs_info->avail_data_alloc_bits;
3553                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3554                         flags |= root->fs_info->avail_system_alloc_bits;
3555                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3556                         flags |= root->fs_info->avail_metadata_alloc_bits;
3557         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3558
3559         return btrfs_reduce_alloc_profile(root, flags);
3560 }
3561
3562 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3563 {
3564         u64 flags;
3565         u64 ret;
3566
3567         if (data)
3568                 flags = BTRFS_BLOCK_GROUP_DATA;
3569         else if (root == root->fs_info->chunk_root)
3570                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3571         else
3572                 flags = BTRFS_BLOCK_GROUP_METADATA;
3573
3574         ret = get_alloc_profile(root, flags);
3575         return ret;
3576 }
3577
3578 /*
3579  * This will check the space that the inode allocates from to make sure we have
3580  * enough space for bytes.
3581  */
3582 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3583 {
3584         struct btrfs_space_info *data_sinfo;
3585         struct btrfs_root *root = BTRFS_I(inode)->root;
3586         struct btrfs_fs_info *fs_info = root->fs_info;
3587         u64 used;
3588         int ret = 0, committed = 0, alloc_chunk = 1;
3589
3590         /* make sure bytes are sectorsize aligned */
3591         bytes = ALIGN(bytes, root->sectorsize);
3592
3593         if (btrfs_is_free_space_inode(inode)) {
3594                 committed = 1;
3595                 ASSERT(current->journal_info);
3596         }
3597
3598         data_sinfo = fs_info->data_sinfo;
3599         if (!data_sinfo)
3600                 goto alloc;
3601
3602 again:
3603         /* make sure we have enough space to handle the data first */
3604         spin_lock(&data_sinfo->lock);
3605         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3606                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3607                 data_sinfo->bytes_may_use;
3608
3609         if (used + bytes > data_sinfo->total_bytes) {
3610                 struct btrfs_trans_handle *trans;
3611
3612                 /*
3613                  * if we don't have enough free bytes in this space then we need
3614                  * to alloc a new chunk.
3615                  */
3616                 if (!data_sinfo->full && alloc_chunk) {
3617                         u64 alloc_target;
3618
3619                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3620                         spin_unlock(&data_sinfo->lock);
3621 alloc:
3622                         alloc_target = btrfs_get_alloc_profile(root, 1);
3623                         /*
3624                          * It is ugly that we don't call nolock join
3625                          * transaction for the free space inode case here.
3626                          * But it is safe because we only do the data space
3627                          * reservation for the free space cache in the
3628                          * transaction context, the common join transaction
3629                          * just increase the counter of the current transaction
3630                          * handler, doesn't try to acquire the trans_lock of
3631                          * the fs.
3632                          */
3633                         trans = btrfs_join_transaction(root);
3634                         if (IS_ERR(trans))
3635                                 return PTR_ERR(trans);
3636
3637                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3638                                              alloc_target,
3639                                              CHUNK_ALLOC_NO_FORCE);
3640                         btrfs_end_transaction(trans, root);
3641                         if (ret < 0) {
3642                                 if (ret != -ENOSPC)
3643                                         return ret;
3644                                 else
3645                                         goto commit_trans;
3646                         }
3647
3648                         if (!data_sinfo)
3649                                 data_sinfo = fs_info->data_sinfo;
3650
3651                         goto again;
3652                 }
3653
3654                 /*
3655                  * If we don't have enough pinned space to deal with this
3656                  * allocation don't bother committing the transaction.
3657                  */
3658                 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3659                                            bytes) < 0)
3660                         committed = 1;
3661                 spin_unlock(&data_sinfo->lock);
3662
3663                 /* commit the current transaction and try again */
3664 commit_trans:
3665                 if (!committed &&
3666                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3667                         committed = 1;
3668
3669                         trans = btrfs_join_transaction(root);
3670                         if (IS_ERR(trans))
3671                                 return PTR_ERR(trans);
3672                         ret = btrfs_commit_transaction(trans, root);
3673                         if (ret)
3674                                 return ret;
3675                         goto again;
3676                 }
3677
3678                 trace_btrfs_space_reservation(root->fs_info,
3679                                               "space_info:enospc",
3680                                               data_sinfo->flags, bytes, 1);
3681                 return -ENOSPC;
3682         }
3683         data_sinfo->bytes_may_use += bytes;
3684         trace_btrfs_space_reservation(root->fs_info, "space_info",
3685                                       data_sinfo->flags, bytes, 1);
3686         spin_unlock(&data_sinfo->lock);
3687
3688         return 0;
3689 }
3690
3691 /*
3692  * Called if we need to clear a data reservation for this inode.
3693  */
3694 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3695 {
3696         struct btrfs_root *root = BTRFS_I(inode)->root;
3697         struct btrfs_space_info *data_sinfo;
3698
3699         /* make sure bytes are sectorsize aligned */
3700         bytes = ALIGN(bytes, root->sectorsize);
3701
3702         data_sinfo = root->fs_info->data_sinfo;
3703         spin_lock(&data_sinfo->lock);
3704         WARN_ON(data_sinfo->bytes_may_use < bytes);
3705         data_sinfo->bytes_may_use -= bytes;
3706         trace_btrfs_space_reservation(root->fs_info, "space_info",
3707                                       data_sinfo->flags, bytes, 0);
3708         spin_unlock(&data_sinfo->lock);
3709 }
3710
3711 static void force_metadata_allocation(struct btrfs_fs_info *info)
3712 {
3713         struct list_head *head = &info->space_info;
3714         struct btrfs_space_info *found;
3715
3716         rcu_read_lock();
3717         list_for_each_entry_rcu(found, head, list) {
3718                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3719                         found->force_alloc = CHUNK_ALLOC_FORCE;
3720         }
3721         rcu_read_unlock();
3722 }
3723
3724 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3725 {
3726         return (global->size << 1);
3727 }
3728
3729 static int should_alloc_chunk(struct btrfs_root *root,
3730                               struct btrfs_space_info *sinfo, int force)
3731 {
3732         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3733         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3734         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3735         u64 thresh;
3736
3737         if (force == CHUNK_ALLOC_FORCE)
3738                 return 1;
3739
3740         /*
3741          * We need to take into account the global rsv because for all intents
3742          * and purposes it's used space.  Don't worry about locking the
3743          * global_rsv, it doesn't change except when the transaction commits.
3744          */
3745         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3746                 num_allocated += calc_global_rsv_need_space(global_rsv);
3747
3748         /*
3749          * in limited mode, we want to have some free space up to
3750          * about 1% of the FS size.
3751          */
3752         if (force == CHUNK_ALLOC_LIMITED) {
3753                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3754                 thresh = max_t(u64, 64 * 1024 * 1024,
3755                                div_factor_fine(thresh, 1));
3756
3757                 if (num_bytes - num_allocated < thresh)
3758                         return 1;
3759         }
3760
3761         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3762                 return 0;
3763         return 1;
3764 }
3765
3766 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3767 {
3768         u64 num_dev;
3769
3770         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3771                     BTRFS_BLOCK_GROUP_RAID0 |
3772                     BTRFS_BLOCK_GROUP_RAID5 |
3773                     BTRFS_BLOCK_GROUP_RAID6))
3774                 num_dev = root->fs_info->fs_devices->rw_devices;
3775         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3776                 num_dev = 2;
3777         else
3778                 num_dev = 1;    /* DUP or single */
3779
3780         /* metadata for updaing devices and chunk tree */
3781         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3782 }
3783
3784 static void check_system_chunk(struct btrfs_trans_handle *trans,
3785                                struct btrfs_root *root, u64 type)
3786 {
3787         struct btrfs_space_info *info;
3788         u64 left;
3789         u64 thresh;
3790
3791         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3792         spin_lock(&info->lock);
3793         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3794                 info->bytes_reserved - info->bytes_readonly;
3795         spin_unlock(&info->lock);
3796
3797         thresh = get_system_chunk_thresh(root, type);
3798         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3799                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3800                         left, thresh, type);
3801                 dump_space_info(info, 0, 0);
3802         }
3803
3804         if (left < thresh) {
3805                 u64 flags;
3806
3807                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3808                 btrfs_alloc_chunk(trans, root, flags);
3809         }
3810 }
3811
3812 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3813                           struct btrfs_root *extent_root, u64 flags, int force)
3814 {
3815         struct btrfs_space_info *space_info;
3816         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3817         int wait_for_alloc = 0;
3818         int ret = 0;
3819
3820         /* Don't re-enter if we're already allocating a chunk */
3821         if (trans->allocating_chunk)
3822                 return -ENOSPC;
3823
3824         space_info = __find_space_info(extent_root->fs_info, flags);
3825         if (!space_info) {
3826                 ret = update_space_info(extent_root->fs_info, flags,
3827                                         0, 0, &space_info);
3828                 BUG_ON(ret); /* -ENOMEM */
3829         }
3830         BUG_ON(!space_info); /* Logic error */
3831
3832 again:
3833         spin_lock(&space_info->lock);
3834         if (force < space_info->force_alloc)
3835                 force = space_info->force_alloc;
3836         if (space_info->full) {
3837                 if (should_alloc_chunk(extent_root, space_info, force))
3838                         ret = -ENOSPC;
3839                 else
3840                         ret = 0;
3841                 spin_unlock(&space_info->lock);
3842                 return ret;
3843         }
3844
3845         if (!should_alloc_chunk(extent_root, space_info, force)) {
3846                 spin_unlock(&space_info->lock);
3847                 return 0;
3848         } else if (space_info->chunk_alloc) {
3849                 wait_for_alloc = 1;
3850         } else {
3851                 space_info->chunk_alloc = 1;
3852         }
3853
3854         spin_unlock(&space_info->lock);
3855
3856         mutex_lock(&fs_info->chunk_mutex);
3857
3858         /*
3859          * The chunk_mutex is held throughout the entirety of a chunk
3860          * allocation, so once we've acquired the chunk_mutex we know that the
3861          * other guy is done and we need to recheck and see if we should
3862          * allocate.
3863          */
3864         if (wait_for_alloc) {
3865                 mutex_unlock(&fs_info->chunk_mutex);
3866                 wait_for_alloc = 0;
3867                 goto again;
3868         }
3869
3870         trans->allocating_chunk = true;
3871
3872         /*
3873          * If we have mixed data/metadata chunks we want to make sure we keep
3874          * allocating mixed chunks instead of individual chunks.
3875          */
3876         if (btrfs_mixed_space_info(space_info))
3877                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3878
3879         /*
3880          * if we're doing a data chunk, go ahead and make sure that
3881          * we keep a reasonable number of metadata chunks allocated in the
3882          * FS as well.
3883          */
3884         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3885                 fs_info->data_chunk_allocations++;
3886                 if (!(fs_info->data_chunk_allocations %
3887                       fs_info->metadata_ratio))
3888                         force_metadata_allocation(fs_info);
3889         }
3890
3891         /*
3892          * Check if we have enough space in SYSTEM chunk because we may need
3893          * to update devices.
3894          */
3895         check_system_chunk(trans, extent_root, flags);
3896
3897         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3898         trans->allocating_chunk = false;
3899
3900         spin_lock(&space_info->lock);
3901         if (ret < 0 && ret != -ENOSPC)
3902                 goto out;
3903         if (ret)
3904                 space_info->full = 1;
3905         else
3906                 ret = 1;
3907
3908         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3909 out:
3910         space_info->chunk_alloc = 0;
3911         spin_unlock(&space_info->lock);
3912         mutex_unlock(&fs_info->chunk_mutex);
3913         return ret;
3914 }
3915
3916 static int can_overcommit(struct btrfs_root *root,
3917                           struct btrfs_space_info *space_info, u64 bytes,
3918                           enum btrfs_reserve_flush_enum flush)
3919 {
3920         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3921         u64 profile = btrfs_get_alloc_profile(root, 0);
3922         u64 space_size;
3923         u64 avail;
3924         u64 used;
3925
3926         used = space_info->bytes_used + space_info->bytes_reserved +
3927                 space_info->bytes_pinned + space_info->bytes_readonly;
3928
3929         /*
3930          * We only want to allow over committing if we have lots of actual space
3931          * free, but if we don't have enough space to handle the global reserve
3932          * space then we could end up having a real enospc problem when trying
3933          * to allocate a chunk or some other such important allocation.
3934          */
3935         spin_lock(&global_rsv->lock);
3936         space_size = calc_global_rsv_need_space(global_rsv);
3937         spin_unlock(&global_rsv->lock);
3938         if (used + space_size >= space_info->total_bytes)
3939                 return 0;
3940
3941         used += space_info->bytes_may_use;
3942
3943         spin_lock(&root->fs_info->free_chunk_lock);
3944         avail = root->fs_info->free_chunk_space;
3945         spin_unlock(&root->fs_info->free_chunk_lock);
3946
3947         /*
3948          * If we have dup, raid1 or raid10 then only half of the free
3949          * space is actually useable.  For raid56, the space info used
3950          * doesn't include the parity drive, so we don't have to
3951          * change the math
3952          */
3953         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3954                        BTRFS_BLOCK_GROUP_RAID1 |
3955                        BTRFS_BLOCK_GROUP_RAID10))
3956                 avail >>= 1;
3957
3958         /*
3959          * If we aren't flushing all things, let us overcommit up to
3960          * 1/2th of the space. If we can flush, don't let us overcommit
3961          * too much, let it overcommit up to 1/8 of the space.
3962          */
3963         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3964                 avail >>= 3;
3965         else
3966                 avail >>= 1;
3967
3968         if (used + bytes < space_info->total_bytes + avail)
3969                 return 1;
3970         return 0;
3971 }
3972
3973 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3974                                          unsigned long nr_pages, int nr_items)
3975 {
3976         struct super_block *sb = root->fs_info->sb;
3977
3978         if (down_read_trylock(&sb->s_umount)) {
3979                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
3980                 up_read(&sb->s_umount);
3981         } else {
3982                 /*
3983                  * We needn't worry the filesystem going from r/w to r/o though
3984                  * we don't acquire ->s_umount mutex, because the filesystem
3985                  * should guarantee the delalloc inodes list be empty after
3986                  * the filesystem is readonly(all dirty pages are written to
3987                  * the disk).
3988                  */
3989                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
3990                 if (!current->journal_info)
3991                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
3992         }
3993 }
3994
3995 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
3996 {
3997         u64 bytes;
3998         int nr;
3999
4000         bytes = btrfs_calc_trans_metadata_size(root, 1);
4001         nr = (int)div64_u64(to_reclaim, bytes);
4002         if (!nr)
4003                 nr = 1;
4004         return nr;
4005 }
4006
4007 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4008
4009 /*
4010  * shrink metadata reservation for delalloc
4011  */
4012 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4013                             bool wait_ordered)
4014 {
4015         struct btrfs_block_rsv *block_rsv;
4016         struct btrfs_space_info *space_info;
4017         struct btrfs_trans_handle *trans;
4018         u64 delalloc_bytes;
4019         u64 max_reclaim;
4020         long time_left;
4021         unsigned long nr_pages;
4022         int loops;
4023         int items;
4024         enum btrfs_reserve_flush_enum flush;
4025
4026         /* Calc the number of the pages we need flush for space reservation */
4027         items = calc_reclaim_items_nr(root, to_reclaim);
4028         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4029
4030         trans = (struct btrfs_trans_handle *)current->journal_info;
4031         block_rsv = &root->fs_info->delalloc_block_rsv;
4032         space_info = block_rsv->space_info;
4033
4034         delalloc_bytes = percpu_counter_sum_positive(
4035                                                 &root->fs_info->delalloc_bytes);
4036         if (delalloc_bytes == 0) {
4037                 if (trans)
4038                         return;
4039                 if (wait_ordered)
4040                         btrfs_wait_ordered_roots(root->fs_info, items);
4041                 return;
4042         }
4043
4044         loops = 0;
4045         while (delalloc_bytes && loops < 3) {
4046                 max_reclaim = min(delalloc_bytes, to_reclaim);
4047                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4048                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4049                 /*
4050                  * We need to wait for the async pages to actually start before
4051                  * we do anything.
4052                  */
4053                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4054                 if (!max_reclaim)
4055                         goto skip_async;
4056
4057                 if (max_reclaim <= nr_pages)
4058                         max_reclaim = 0;
4059                 else
4060                         max_reclaim -= nr_pages;
4061
4062                 wait_event(root->fs_info->async_submit_wait,
4063                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4064                            (int)max_reclaim);
4065 skip_async:
4066                 if (!trans)
4067                         flush = BTRFS_RESERVE_FLUSH_ALL;
4068                 else
4069                         flush = BTRFS_RESERVE_NO_FLUSH;
4070                 spin_lock(&space_info->lock);
4071                 if (can_overcommit(root, space_info, orig, flush)) {
4072                         spin_unlock(&space_info->lock);
4073                         break;
4074                 }
4075                 spin_unlock(&space_info->lock);
4076
4077                 loops++;
4078                 if (wait_ordered && !trans) {
4079                         btrfs_wait_ordered_roots(root->fs_info, items);
4080                 } else {
4081                         time_left = schedule_timeout_killable(1);
4082                         if (time_left)
4083                                 break;
4084                 }
4085                 delalloc_bytes = percpu_counter_sum_positive(
4086                                                 &root->fs_info->delalloc_bytes);
4087         }
4088 }
4089
4090 /**
4091  * maybe_commit_transaction - possibly commit the transaction if its ok to
4092  * @root - the root we're allocating for
4093  * @bytes - the number of bytes we want to reserve
4094  * @force - force the commit
4095  *
4096  * This will check to make sure that committing the transaction will actually
4097  * get us somewhere and then commit the transaction if it does.  Otherwise it
4098  * will return -ENOSPC.
4099  */
4100 static int may_commit_transaction(struct btrfs_root *root,
4101                                   struct btrfs_space_info *space_info,
4102                                   u64 bytes, int force)
4103 {
4104         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4105         struct btrfs_trans_handle *trans;
4106
4107         trans = (struct btrfs_trans_handle *)current->journal_info;
4108         if (trans)
4109                 return -EAGAIN;
4110
4111         if (force)
4112                 goto commit;
4113
4114         /* See if there is enough pinned space to make this reservation */
4115         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4116                                    bytes) >= 0)
4117                 goto commit;
4118
4119         /*
4120          * See if there is some space in the delayed insertion reservation for
4121          * this reservation.
4122          */
4123         if (space_info != delayed_rsv->space_info)
4124                 return -ENOSPC;
4125
4126         spin_lock(&delayed_rsv->lock);
4127         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4128                                    bytes - delayed_rsv->size) >= 0) {
4129                 spin_unlock(&delayed_rsv->lock);
4130                 return -ENOSPC;
4131         }
4132         spin_unlock(&delayed_rsv->lock);
4133
4134 commit:
4135         trans = btrfs_join_transaction(root);
4136         if (IS_ERR(trans))
4137                 return -ENOSPC;
4138
4139         return btrfs_commit_transaction(trans, root);
4140 }
4141
4142 enum flush_state {
4143         FLUSH_DELAYED_ITEMS_NR  =       1,
4144         FLUSH_DELAYED_ITEMS     =       2,
4145         FLUSH_DELALLOC          =       3,
4146         FLUSH_DELALLOC_WAIT     =       4,
4147         ALLOC_CHUNK             =       5,
4148         COMMIT_TRANS            =       6,
4149 };
4150
4151 static int flush_space(struct btrfs_root *root,
4152                        struct btrfs_space_info *space_info, u64 num_bytes,
4153                        u64 orig_bytes, int state)
4154 {
4155         struct btrfs_trans_handle *trans;
4156         int nr;
4157         int ret = 0;
4158
4159         switch (state) {
4160         case FLUSH_DELAYED_ITEMS_NR:
4161         case FLUSH_DELAYED_ITEMS:
4162                 if (state == FLUSH_DELAYED_ITEMS_NR)
4163                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4164                 else
4165                         nr = -1;
4166
4167                 trans = btrfs_join_transaction(root);
4168                 if (IS_ERR(trans)) {
4169                         ret = PTR_ERR(trans);
4170                         break;
4171                 }
4172                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4173                 btrfs_end_transaction(trans, root);
4174                 break;
4175         case FLUSH_DELALLOC:
4176         case FLUSH_DELALLOC_WAIT:
4177                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4178                                 state == FLUSH_DELALLOC_WAIT);
4179                 break;
4180         case ALLOC_CHUNK:
4181                 trans = btrfs_join_transaction(root);
4182                 if (IS_ERR(trans)) {
4183                         ret = PTR_ERR(trans);
4184                         break;
4185                 }
4186                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4187                                      btrfs_get_alloc_profile(root, 0),
4188                                      CHUNK_ALLOC_NO_FORCE);
4189                 btrfs_end_transaction(trans, root);
4190                 if (ret == -ENOSPC)
4191                         ret = 0;
4192                 break;
4193         case COMMIT_TRANS:
4194                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4195                 break;
4196         default:
4197                 ret = -ENOSPC;
4198                 break;
4199         }
4200
4201         return ret;
4202 }
4203 /**
4204  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4205  * @root - the root we're allocating for
4206  * @block_rsv - the block_rsv we're allocating for
4207  * @orig_bytes - the number of bytes we want
4208  * @flush - whether or not we can flush to make our reservation
4209  *
4210  * This will reserve orgi_bytes number of bytes from the space info associated
4211  * with the block_rsv.  If there is not enough space it will make an attempt to
4212  * flush out space to make room.  It will do this by flushing delalloc if
4213  * possible or committing the transaction.  If flush is 0 then no attempts to
4214  * regain reservations will be made and this will fail if there is not enough
4215  * space already.
4216  */
4217 static int reserve_metadata_bytes(struct btrfs_root *root,
4218                                   struct btrfs_block_rsv *block_rsv,
4219                                   u64 orig_bytes,
4220                                   enum btrfs_reserve_flush_enum flush)
4221 {
4222         struct btrfs_space_info *space_info = block_rsv->space_info;
4223         u64 used;
4224         u64 num_bytes = orig_bytes;
4225         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4226         int ret = 0;
4227         bool flushing = false;
4228
4229 again:
4230         ret = 0;
4231         spin_lock(&space_info->lock);
4232         /*
4233          * We only want to wait if somebody other than us is flushing and we
4234          * are actually allowed to flush all things.
4235          */
4236         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4237                space_info->flush) {
4238                 spin_unlock(&space_info->lock);
4239                 /*
4240                  * If we have a trans handle we can't wait because the flusher
4241                  * may have to commit the transaction, which would mean we would
4242                  * deadlock since we are waiting for the flusher to finish, but
4243                  * hold the current transaction open.
4244                  */
4245                 if (current->journal_info)
4246                         return -EAGAIN;
4247                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4248                 /* Must have been killed, return */
4249                 if (ret)
4250                         return -EINTR;
4251
4252                 spin_lock(&space_info->lock);
4253         }
4254
4255         ret = -ENOSPC;
4256         used = space_info->bytes_used + space_info->bytes_reserved +
4257                 space_info->bytes_pinned + space_info->bytes_readonly +
4258                 space_info->bytes_may_use;
4259
4260         /*
4261          * The idea here is that we've not already over-reserved the block group
4262          * then we can go ahead and save our reservation first and then start
4263          * flushing if we need to.  Otherwise if we've already overcommitted
4264          * lets start flushing stuff first and then come back and try to make
4265          * our reservation.
4266          */
4267         if (used <= space_info->total_bytes) {
4268                 if (used + orig_bytes <= space_info->total_bytes) {
4269                         space_info->bytes_may_use += orig_bytes;
4270                         trace_btrfs_space_reservation(root->fs_info,
4271                                 "space_info", space_info->flags, orig_bytes, 1);
4272                         ret = 0;
4273                 } else {
4274                         /*
4275                          * Ok set num_bytes to orig_bytes since we aren't
4276                          * overocmmitted, this way we only try and reclaim what
4277                          * we need.
4278                          */
4279                         num_bytes = orig_bytes;
4280                 }
4281         } else {
4282                 /*
4283                  * Ok we're over committed, set num_bytes to the overcommitted
4284                  * amount plus the amount of bytes that we need for this
4285                  * reservation.
4286                  */
4287                 num_bytes = used - space_info->total_bytes +
4288                         (orig_bytes * 2);
4289         }
4290
4291         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4292                 space_info->bytes_may_use += orig_bytes;
4293                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4294                                               space_info->flags, orig_bytes,
4295                                               1);
4296                 ret = 0;
4297         }
4298
4299         /*
4300          * Couldn't make our reservation, save our place so while we're trying
4301          * to reclaim space we can actually use it instead of somebody else
4302          * stealing it from us.
4303          *
4304          * We make the other tasks wait for the flush only when we can flush
4305          * all things.
4306          */
4307         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4308                 flushing = true;
4309                 space_info->flush = 1;
4310         }
4311
4312         spin_unlock(&space_info->lock);
4313
4314         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4315                 goto out;
4316
4317         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4318                           flush_state);
4319         flush_state++;
4320
4321         /*
4322          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4323          * would happen. So skip delalloc flush.
4324          */
4325         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4326             (flush_state == FLUSH_DELALLOC ||
4327              flush_state == FLUSH_DELALLOC_WAIT))
4328                 flush_state = ALLOC_CHUNK;
4329
4330         if (!ret)
4331                 goto again;
4332         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4333                  flush_state < COMMIT_TRANS)
4334                 goto again;
4335         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4336                  flush_state <= COMMIT_TRANS)
4337                 goto again;
4338
4339 out:
4340         if (ret == -ENOSPC &&
4341             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4342                 struct btrfs_block_rsv *global_rsv =
4343                         &root->fs_info->global_block_rsv;
4344
4345                 if (block_rsv != global_rsv &&
4346                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4347                         ret = 0;
4348         }
4349         if (ret == -ENOSPC)
4350                 trace_btrfs_space_reservation(root->fs_info,
4351                                               "space_info:enospc",
4352                                               space_info->flags, orig_bytes, 1);
4353         if (flushing) {
4354                 spin_lock(&space_info->lock);
4355                 space_info->flush = 0;
4356                 wake_up_all(&space_info->wait);
4357                 spin_unlock(&space_info->lock);
4358         }
4359         return ret;
4360 }
4361
4362 static struct btrfs_block_rsv *get_block_rsv(
4363                                         const struct btrfs_trans_handle *trans,
4364                                         const struct btrfs_root *root)
4365 {
4366         struct btrfs_block_rsv *block_rsv = NULL;
4367
4368         if (root->ref_cows)
4369                 block_rsv = trans->block_rsv;
4370
4371         if (root == root->fs_info->csum_root && trans->adding_csums)
4372                 block_rsv = trans->block_rsv;
4373
4374         if (root == root->fs_info->uuid_root)
4375                 block_rsv = trans->block_rsv;
4376
4377         if (!block_rsv)
4378                 block_rsv = root->block_rsv;
4379
4380         if (!block_rsv)
4381                 block_rsv = &root->fs_info->empty_block_rsv;
4382
4383         return block_rsv;
4384 }
4385
4386 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4387                                u64 num_bytes)
4388 {
4389         int ret = -ENOSPC;
4390         spin_lock(&block_rsv->lock);
4391         if (block_rsv->reserved >= num_bytes) {
4392                 block_rsv->reserved -= num_bytes;
4393                 if (block_rsv->reserved < block_rsv->size)
4394                         block_rsv->full = 0;
4395                 ret = 0;
4396         }
4397         spin_unlock(&block_rsv->lock);
4398         return ret;
4399 }
4400
4401 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4402                                 u64 num_bytes, int update_size)
4403 {
4404         spin_lock(&block_rsv->lock);
4405         block_rsv->reserved += num_bytes;
4406         if (update_size)
4407                 block_rsv->size += num_bytes;
4408         else if (block_rsv->reserved >= block_rsv->size)
4409                 block_rsv->full = 1;
4410         spin_unlock(&block_rsv->lock);
4411 }
4412
4413 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4414                              struct btrfs_block_rsv *dest, u64 num_bytes,
4415                              int min_factor)
4416 {
4417         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4418         u64 min_bytes;
4419
4420         if (global_rsv->space_info != dest->space_info)
4421                 return -ENOSPC;
4422
4423         spin_lock(&global_rsv->lock);
4424         min_bytes = div_factor(global_rsv->size, min_factor);
4425         if (global_rsv->reserved < min_bytes + num_bytes) {
4426                 spin_unlock(&global_rsv->lock);
4427                 return -ENOSPC;
4428         }
4429         global_rsv->reserved -= num_bytes;
4430         if (global_rsv->reserved < global_rsv->size)
4431                 global_rsv->full = 0;
4432         spin_unlock(&global_rsv->lock);
4433
4434         block_rsv_add_bytes(dest, num_bytes, 1);
4435         return 0;
4436 }
4437
4438 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4439                                     struct btrfs_block_rsv *block_rsv,
4440                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4441 {
4442         struct btrfs_space_info *space_info = block_rsv->space_info;
4443
4444         spin_lock(&block_rsv->lock);
4445         if (num_bytes == (u64)-1)
4446                 num_bytes = block_rsv->size;
4447         block_rsv->size -= num_bytes;
4448         if (block_rsv->reserved >= block_rsv->size) {
4449                 num_bytes = block_rsv->reserved - block_rsv->size;
4450                 block_rsv->reserved = block_rsv->size;
4451                 block_rsv->full = 1;
4452         } else {
4453                 num_bytes = 0;
4454         }
4455         spin_unlock(&block_rsv->lock);
4456
4457         if (num_bytes > 0) {
4458                 if (dest) {
4459                         spin_lock(&dest->lock);
4460                         if (!dest->full) {
4461                                 u64 bytes_to_add;
4462
4463                                 bytes_to_add = dest->size - dest->reserved;
4464                                 bytes_to_add = min(num_bytes, bytes_to_add);
4465                                 dest->reserved += bytes_to_add;
4466                                 if (dest->reserved >= dest->size)
4467                                         dest->full = 1;
4468                                 num_bytes -= bytes_to_add;
4469                         }
4470                         spin_unlock(&dest->lock);
4471                 }
4472                 if (num_bytes) {
4473                         spin_lock(&space_info->lock);
4474                         space_info->bytes_may_use -= num_bytes;
4475                         trace_btrfs_space_reservation(fs_info, "space_info",
4476                                         space_info->flags, num_bytes, 0);
4477                         spin_unlock(&space_info->lock);
4478                 }
4479         }
4480 }
4481
4482 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4483                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4484 {
4485         int ret;
4486
4487         ret = block_rsv_use_bytes(src, num_bytes);
4488         if (ret)
4489                 return ret;
4490
4491         block_rsv_add_bytes(dst, num_bytes, 1);
4492         return 0;
4493 }
4494
4495 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4496 {
4497         memset(rsv, 0, sizeof(*rsv));
4498         spin_lock_init(&rsv->lock);
4499         rsv->type = type;
4500 }
4501
4502 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4503                                               unsigned short type)
4504 {
4505         struct btrfs_block_rsv *block_rsv;
4506         struct btrfs_fs_info *fs_info = root->fs_info;
4507
4508         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4509         if (!block_rsv)
4510                 return NULL;
4511
4512         btrfs_init_block_rsv(block_rsv, type);
4513         block_rsv->space_info = __find_space_info(fs_info,
4514                                                   BTRFS_BLOCK_GROUP_METADATA);
4515         return block_rsv;
4516 }
4517
4518 void btrfs_free_block_rsv(struct btrfs_root *root,
4519                           struct btrfs_block_rsv *rsv)
4520 {
4521         if (!rsv)
4522                 return;
4523         btrfs_block_rsv_release(root, rsv, (u64)-1);
4524         kfree(rsv);
4525 }
4526
4527 int btrfs_block_rsv_add(struct btrfs_root *root,
4528                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4529                         enum btrfs_reserve_flush_enum flush)
4530 {
4531         int ret;
4532
4533         if (num_bytes == 0)
4534                 return 0;
4535
4536         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4537         if (!ret) {
4538                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4539                 return 0;
4540         }
4541
4542         return ret;
4543 }
4544
4545 int btrfs_block_rsv_check(struct btrfs_root *root,
4546                           struct btrfs_block_rsv *block_rsv, int min_factor)
4547 {
4548         u64 num_bytes = 0;
4549         int ret = -ENOSPC;
4550
4551         if (!block_rsv)
4552                 return 0;
4553
4554         spin_lock(&block_rsv->lock);
4555         num_bytes = div_factor(block_rsv->size, min_factor);
4556         if (block_rsv->reserved >= num_bytes)
4557                 ret = 0;
4558         spin_unlock(&block_rsv->lock);
4559
4560         return ret;
4561 }
4562
4563 int btrfs_block_rsv_refill(struct btrfs_root *root,
4564                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4565                            enum btrfs_reserve_flush_enum flush)
4566 {
4567         u64 num_bytes = 0;
4568         int ret = -ENOSPC;
4569
4570         if (!block_rsv)
4571                 return 0;
4572
4573         spin_lock(&block_rsv->lock);
4574         num_bytes = min_reserved;
4575         if (block_rsv->reserved >= num_bytes)
4576                 ret = 0;
4577         else
4578                 num_bytes -= block_rsv->reserved;
4579         spin_unlock(&block_rsv->lock);
4580
4581         if (!ret)
4582                 return 0;
4583
4584         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4585         if (!ret) {
4586                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4587                 return 0;
4588         }
4589
4590         return ret;
4591 }
4592
4593 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4594                             struct btrfs_block_rsv *dst_rsv,
4595                             u64 num_bytes)
4596 {
4597         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4598 }
4599
4600 void btrfs_block_rsv_release(struct btrfs_root *root,
4601                              struct btrfs_block_rsv *block_rsv,
4602                              u64 num_bytes)
4603 {
4604         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4605         if (global_rsv == block_rsv ||
4606             block_rsv->space_info != global_rsv->space_info)
4607                 global_rsv = NULL;
4608         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4609                                 num_bytes);
4610 }
4611
4612 /*
4613  * helper to calculate size of global block reservation.
4614  * the desired value is sum of space used by extent tree,
4615  * checksum tree and root tree
4616  */
4617 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4618 {
4619         struct btrfs_space_info *sinfo;
4620         u64 num_bytes;
4621         u64 meta_used;
4622         u64 data_used;
4623         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4624
4625         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4626         spin_lock(&sinfo->lock);
4627         data_used = sinfo->bytes_used;
4628         spin_unlock(&sinfo->lock);
4629
4630         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4631         spin_lock(&sinfo->lock);
4632         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4633                 data_used = 0;
4634         meta_used = sinfo->bytes_used;
4635         spin_unlock(&sinfo->lock);
4636
4637         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4638                     csum_size * 2;
4639         num_bytes += div64_u64(data_used + meta_used, 50);
4640
4641         if (num_bytes * 3 > meta_used)
4642                 num_bytes = div64_u64(meta_used, 3);
4643
4644         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4645 }
4646
4647 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4648 {
4649         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4650         struct btrfs_space_info *sinfo = block_rsv->space_info;
4651         u64 num_bytes;
4652
4653         num_bytes = calc_global_metadata_size(fs_info);
4654
4655         spin_lock(&sinfo->lock);
4656         spin_lock(&block_rsv->lock);
4657
4658         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4659
4660         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4661                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4662                     sinfo->bytes_may_use;
4663
4664         if (sinfo->total_bytes > num_bytes) {
4665                 num_bytes = sinfo->total_bytes - num_bytes;
4666                 block_rsv->reserved += num_bytes;
4667                 sinfo->bytes_may_use += num_bytes;
4668                 trace_btrfs_space_reservation(fs_info, "space_info",
4669                                       sinfo->flags, num_bytes, 1);
4670         }
4671
4672         if (block_rsv->reserved >= block_rsv->size) {
4673                 num_bytes = block_rsv->reserved - block_rsv->size;
4674                 sinfo->bytes_may_use -= num_bytes;
4675                 trace_btrfs_space_reservation(fs_info, "space_info",
4676                                       sinfo->flags, num_bytes, 0);
4677                 block_rsv->reserved = block_rsv->size;
4678                 block_rsv->full = 1;
4679         }
4680
4681         spin_unlock(&block_rsv->lock);
4682         spin_unlock(&sinfo->lock);
4683 }
4684
4685 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4686 {
4687         struct btrfs_space_info *space_info;
4688
4689         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4690         fs_info->chunk_block_rsv.space_info = space_info;
4691
4692         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4693         fs_info->global_block_rsv.space_info = space_info;
4694         fs_info->delalloc_block_rsv.space_info = space_info;
4695         fs_info->trans_block_rsv.space_info = space_info;
4696         fs_info->empty_block_rsv.space_info = space_info;
4697         fs_info->delayed_block_rsv.space_info = space_info;
4698
4699         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4700         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4701         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4702         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4703         if (fs_info->quota_root)
4704                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4705         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4706
4707         update_global_block_rsv(fs_info);
4708 }
4709
4710 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4711 {
4712         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4713                                 (u64)-1);
4714         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4715         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4716         WARN_ON(fs_info->trans_block_rsv.size > 0);
4717         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4718         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4719         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4720         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4721         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4722 }
4723
4724 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4725                                   struct btrfs_root *root)
4726 {
4727         if (!trans->block_rsv)
4728                 return;
4729
4730         if (!trans->bytes_reserved)
4731                 return;
4732
4733         trace_btrfs_space_reservation(root->fs_info, "transaction",
4734                                       trans->transid, trans->bytes_reserved, 0);
4735         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4736         trans->bytes_reserved = 0;
4737 }
4738
4739 /* Can only return 0 or -ENOSPC */
4740 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4741                                   struct inode *inode)
4742 {
4743         struct btrfs_root *root = BTRFS_I(inode)->root;
4744         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4745         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4746
4747         /*
4748          * We need to hold space in order to delete our orphan item once we've
4749          * added it, so this takes the reservation so we can release it later
4750          * when we are truly done with the orphan item.
4751          */
4752         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4753         trace_btrfs_space_reservation(root->fs_info, "orphan",
4754                                       btrfs_ino(inode), num_bytes, 1);
4755         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4756 }
4757
4758 void btrfs_orphan_release_metadata(struct inode *inode)
4759 {
4760         struct btrfs_root *root = BTRFS_I(inode)->root;
4761         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4762         trace_btrfs_space_reservation(root->fs_info, "orphan",
4763                                       btrfs_ino(inode), num_bytes, 0);
4764         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4765 }
4766
4767 /*
4768  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4769  * root: the root of the parent directory
4770  * rsv: block reservation
4771  * items: the number of items that we need do reservation
4772  * qgroup_reserved: used to return the reserved size in qgroup
4773  *
4774  * This function is used to reserve the space for snapshot/subvolume
4775  * creation and deletion. Those operations are different with the
4776  * common file/directory operations, they change two fs/file trees
4777  * and root tree, the number of items that the qgroup reserves is
4778  * different with the free space reservation. So we can not use
4779  * the space reseravtion mechanism in start_transaction().
4780  */
4781 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4782                                      struct btrfs_block_rsv *rsv,
4783                                      int items,
4784                                      u64 *qgroup_reserved,
4785                                      bool use_global_rsv)
4786 {
4787         u64 num_bytes;
4788         int ret;
4789         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4790
4791         if (root->fs_info->quota_enabled) {
4792                 /* One for parent inode, two for dir entries */
4793                 num_bytes = 3 * root->leafsize;
4794                 ret = btrfs_qgroup_reserve(root, num_bytes);
4795                 if (ret)
4796                         return ret;
4797         } else {
4798                 num_bytes = 0;
4799         }
4800
4801         *qgroup_reserved = num_bytes;
4802
4803         num_bytes = btrfs_calc_trans_metadata_size(root, items);
4804         rsv->space_info = __find_space_info(root->fs_info,
4805                                             BTRFS_BLOCK_GROUP_METADATA);
4806         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4807                                   BTRFS_RESERVE_FLUSH_ALL);
4808
4809         if (ret == -ENOSPC && use_global_rsv)
4810                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
4811
4812         if (ret) {
4813                 if (*qgroup_reserved)
4814                         btrfs_qgroup_free(root, *qgroup_reserved);
4815         }
4816
4817         return ret;
4818 }
4819
4820 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4821                                       struct btrfs_block_rsv *rsv,
4822                                       u64 qgroup_reserved)
4823 {
4824         btrfs_block_rsv_release(root, rsv, (u64)-1);
4825         if (qgroup_reserved)
4826                 btrfs_qgroup_free(root, qgroup_reserved);
4827 }
4828
4829 /**
4830  * drop_outstanding_extent - drop an outstanding extent
4831  * @inode: the inode we're dropping the extent for
4832  *
4833  * This is called when we are freeing up an outstanding extent, either called
4834  * after an error or after an extent is written.  This will return the number of
4835  * reserved extents that need to be freed.  This must be called with
4836  * BTRFS_I(inode)->lock held.
4837  */
4838 static unsigned drop_outstanding_extent(struct inode *inode)
4839 {
4840         unsigned drop_inode_space = 0;
4841         unsigned dropped_extents = 0;
4842
4843         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4844         BTRFS_I(inode)->outstanding_extents--;
4845
4846         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4847             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4848                                &BTRFS_I(inode)->runtime_flags))
4849                 drop_inode_space = 1;
4850
4851         /*
4852          * If we have more or the same amount of outsanding extents than we have
4853          * reserved then we need to leave the reserved extents count alone.
4854          */
4855         if (BTRFS_I(inode)->outstanding_extents >=
4856             BTRFS_I(inode)->reserved_extents)
4857                 return drop_inode_space;
4858
4859         dropped_extents = BTRFS_I(inode)->reserved_extents -
4860                 BTRFS_I(inode)->outstanding_extents;
4861         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4862         return dropped_extents + drop_inode_space;
4863 }
4864
4865 /**
4866  * calc_csum_metadata_size - return the amount of metada space that must be
4867  *      reserved/free'd for the given bytes.
4868  * @inode: the inode we're manipulating
4869  * @num_bytes: the number of bytes in question
4870  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4871  *
4872  * This adjusts the number of csum_bytes in the inode and then returns the
4873  * correct amount of metadata that must either be reserved or freed.  We
4874  * calculate how many checksums we can fit into one leaf and then divide the
4875  * number of bytes that will need to be checksumed by this value to figure out
4876  * how many checksums will be required.  If we are adding bytes then the number
4877  * may go up and we will return the number of additional bytes that must be
4878  * reserved.  If it is going down we will return the number of bytes that must
4879  * be freed.
4880  *
4881  * This must be called with BTRFS_I(inode)->lock held.
4882  */
4883 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4884                                    int reserve)
4885 {
4886         struct btrfs_root *root = BTRFS_I(inode)->root;
4887         u64 csum_size;
4888         int num_csums_per_leaf;
4889         int num_csums;
4890         int old_csums;
4891
4892         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4893             BTRFS_I(inode)->csum_bytes == 0)
4894                 return 0;
4895
4896         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4897         if (reserve)
4898                 BTRFS_I(inode)->csum_bytes += num_bytes;
4899         else
4900                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4901         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4902         num_csums_per_leaf = (int)div64_u64(csum_size,
4903                                             sizeof(struct btrfs_csum_item) +
4904                                             sizeof(struct btrfs_disk_key));
4905         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4906         num_csums = num_csums + num_csums_per_leaf - 1;
4907         num_csums = num_csums / num_csums_per_leaf;
4908
4909         old_csums = old_csums + num_csums_per_leaf - 1;
4910         old_csums = old_csums / num_csums_per_leaf;
4911
4912         /* No change, no need to reserve more */
4913         if (old_csums == num_csums)
4914                 return 0;
4915
4916         if (reserve)
4917                 return btrfs_calc_trans_metadata_size(root,
4918                                                       num_csums - old_csums);
4919
4920         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4921 }
4922
4923 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4924 {
4925         struct btrfs_root *root = BTRFS_I(inode)->root;
4926         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4927         u64 to_reserve = 0;
4928         u64 csum_bytes;
4929         unsigned nr_extents = 0;
4930         int extra_reserve = 0;
4931         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4932         int ret = 0;
4933         bool delalloc_lock = true;
4934         u64 to_free = 0;
4935         unsigned dropped;
4936
4937         /* If we are a free space inode we need to not flush since we will be in
4938          * the middle of a transaction commit.  We also don't need the delalloc
4939          * mutex since we won't race with anybody.  We need this mostly to make
4940          * lockdep shut its filthy mouth.
4941          */
4942         if (btrfs_is_free_space_inode(inode)) {
4943                 flush = BTRFS_RESERVE_NO_FLUSH;
4944                 delalloc_lock = false;
4945         }
4946
4947         if (flush != BTRFS_RESERVE_NO_FLUSH &&
4948             btrfs_transaction_in_commit(root->fs_info))
4949                 schedule_timeout(1);
4950
4951         if (delalloc_lock)
4952                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4953
4954         num_bytes = ALIGN(num_bytes, root->sectorsize);
4955
4956         spin_lock(&BTRFS_I(inode)->lock);
4957         BTRFS_I(inode)->outstanding_extents++;
4958
4959         if (BTRFS_I(inode)->outstanding_extents >
4960             BTRFS_I(inode)->reserved_extents)
4961                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4962                         BTRFS_I(inode)->reserved_extents;
4963
4964         /*
4965          * Add an item to reserve for updating the inode when we complete the
4966          * delalloc io.
4967          */
4968         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4969                       &BTRFS_I(inode)->runtime_flags)) {
4970                 nr_extents++;
4971                 extra_reserve = 1;
4972         }
4973
4974         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4975         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4976         csum_bytes = BTRFS_I(inode)->csum_bytes;
4977         spin_unlock(&BTRFS_I(inode)->lock);
4978
4979         if (root->fs_info->quota_enabled) {
4980                 ret = btrfs_qgroup_reserve(root, num_bytes +
4981                                            nr_extents * root->leafsize);
4982                 if (ret)
4983                         goto out_fail;
4984         }
4985
4986         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4987         if (unlikely(ret)) {
4988                 if (root->fs_info->quota_enabled)
4989                         btrfs_qgroup_free(root, num_bytes +
4990                                                 nr_extents * root->leafsize);
4991                 goto out_fail;
4992         }
4993
4994         spin_lock(&BTRFS_I(inode)->lock);
4995         if (extra_reserve) {
4996                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4997                         &BTRFS_I(inode)->runtime_flags);
4998                 nr_extents--;
4999         }
5000         BTRFS_I(inode)->reserved_extents += nr_extents;
5001         spin_unlock(&BTRFS_I(inode)->lock);
5002
5003         if (delalloc_lock)
5004                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5005
5006         if (to_reserve)
5007                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5008                                               btrfs_ino(inode), to_reserve, 1);
5009         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5010
5011         return 0;
5012
5013 out_fail:
5014         spin_lock(&BTRFS_I(inode)->lock);
5015         dropped = drop_outstanding_extent(inode);
5016         /*
5017          * If the inodes csum_bytes is the same as the original
5018          * csum_bytes then we know we haven't raced with any free()ers
5019          * so we can just reduce our inodes csum bytes and carry on.
5020          */
5021         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5022                 calc_csum_metadata_size(inode, num_bytes, 0);
5023         } else {
5024                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5025                 u64 bytes;
5026
5027                 /*
5028                  * This is tricky, but first we need to figure out how much we
5029                  * free'd from any free-ers that occured during this
5030                  * reservation, so we reset ->csum_bytes to the csum_bytes
5031                  * before we dropped our lock, and then call the free for the
5032                  * number of bytes that were freed while we were trying our
5033                  * reservation.
5034                  */
5035                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5036                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5037                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5038
5039
5040                 /*
5041                  * Now we need to see how much we would have freed had we not
5042                  * been making this reservation and our ->csum_bytes were not
5043                  * artificially inflated.
5044                  */
5045                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5046                 bytes = csum_bytes - orig_csum_bytes;
5047                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5048
5049                 /*
5050                  * Now reset ->csum_bytes to what it should be.  If bytes is
5051                  * more than to_free then we would have free'd more space had we
5052                  * not had an artificially high ->csum_bytes, so we need to free
5053                  * the remainder.  If bytes is the same or less then we don't
5054                  * need to do anything, the other free-ers did the correct
5055                  * thing.
5056                  */
5057                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5058                 if (bytes > to_free)
5059                         to_free = bytes - to_free;
5060                 else
5061                         to_free = 0;
5062         }
5063         spin_unlock(&BTRFS_I(inode)->lock);
5064         if (dropped)
5065                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5066
5067         if (to_free) {
5068                 btrfs_block_rsv_release(root, block_rsv, to_free);
5069                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5070                                               btrfs_ino(inode), to_free, 0);
5071         }
5072         if (delalloc_lock)
5073                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5074         return ret;
5075 }
5076
5077 /**
5078  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5079  * @inode: the inode to release the reservation for
5080  * @num_bytes: the number of bytes we're releasing
5081  *
5082  * This will release the metadata reservation for an inode.  This can be called
5083  * once we complete IO for a given set of bytes to release their metadata
5084  * reservations.
5085  */
5086 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5087 {
5088         struct btrfs_root *root = BTRFS_I(inode)->root;
5089         u64 to_free = 0;
5090         unsigned dropped;
5091
5092         num_bytes = ALIGN(num_bytes, root->sectorsize);
5093         spin_lock(&BTRFS_I(inode)->lock);
5094         dropped = drop_outstanding_extent(inode);
5095
5096         if (num_bytes)
5097                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5098         spin_unlock(&BTRFS_I(inode)->lock);
5099         if (dropped > 0)
5100                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5101
5102         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5103                                       btrfs_ino(inode), to_free, 0);
5104         if (root->fs_info->quota_enabled) {
5105                 btrfs_qgroup_free(root, num_bytes +
5106                                         dropped * root->leafsize);
5107         }
5108
5109         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5110                                 to_free);
5111 }
5112
5113 /**
5114  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5115  * @inode: inode we're writing to
5116  * @num_bytes: the number of bytes we want to allocate
5117  *
5118  * This will do the following things
5119  *
5120  * o reserve space in the data space info for num_bytes
5121  * o reserve space in the metadata space info based on number of outstanding
5122  *   extents and how much csums will be needed
5123  * o add to the inodes ->delalloc_bytes
5124  * o add it to the fs_info's delalloc inodes list.
5125  *
5126  * This will return 0 for success and -ENOSPC if there is no space left.
5127  */
5128 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5129 {
5130         int ret;
5131
5132         ret = btrfs_check_data_free_space(inode, num_bytes);
5133         if (ret)
5134                 return ret;
5135
5136         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5137         if (ret) {
5138                 btrfs_free_reserved_data_space(inode, num_bytes);
5139                 return ret;
5140         }
5141
5142         return 0;
5143 }
5144
5145 /**
5146  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5147  * @inode: inode we're releasing space for
5148  * @num_bytes: the number of bytes we want to free up
5149  *
5150  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5151  * called in the case that we don't need the metadata AND data reservations
5152  * anymore.  So if there is an error or we insert an inline extent.
5153  *
5154  * This function will release the metadata space that was not used and will
5155  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5156  * list if there are no delalloc bytes left.
5157  */
5158 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5159 {
5160         btrfs_delalloc_release_metadata(inode, num_bytes);
5161         btrfs_free_reserved_data_space(inode, num_bytes);
5162 }
5163
5164 static int update_block_group(struct btrfs_root *root,
5165                               u64 bytenr, u64 num_bytes, int alloc)
5166 {
5167         struct btrfs_block_group_cache *cache = NULL;
5168         struct btrfs_fs_info *info = root->fs_info;
5169         u64 total = num_bytes;
5170         u64 old_val;
5171         u64 byte_in_group;
5172         int factor;
5173
5174         /* block accounting for super block */
5175         spin_lock(&info->delalloc_root_lock);
5176         old_val = btrfs_super_bytes_used(info->super_copy);
5177         if (alloc)
5178                 old_val += num_bytes;
5179         else
5180                 old_val -= num_bytes;
5181         btrfs_set_super_bytes_used(info->super_copy, old_val);
5182         spin_unlock(&info->delalloc_root_lock);
5183
5184         while (total) {
5185                 cache = btrfs_lookup_block_group(info, bytenr);
5186                 if (!cache)
5187                         return -ENOENT;
5188                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5189                                     BTRFS_BLOCK_GROUP_RAID1 |
5190                                     BTRFS_BLOCK_GROUP_RAID10))
5191                         factor = 2;
5192                 else
5193                         factor = 1;
5194                 /*
5195                  * If this block group has free space cache written out, we
5196                  * need to make sure to load it if we are removing space.  This
5197                  * is because we need the unpinning stage to actually add the
5198                  * space back to the block group, otherwise we will leak space.
5199                  */
5200                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5201                         cache_block_group(cache, 1);
5202
5203                 byte_in_group = bytenr - cache->key.objectid;
5204                 WARN_ON(byte_in_group > cache->key.offset);
5205
5206                 spin_lock(&cache->space_info->lock);
5207                 spin_lock(&cache->lock);
5208
5209                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5210                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5211                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5212
5213                 cache->dirty = 1;
5214                 old_val = btrfs_block_group_used(&cache->item);
5215                 num_bytes = min(total, cache->key.offset - byte_in_group);
5216                 if (alloc) {
5217                         old_val += num_bytes;
5218                         btrfs_set_block_group_used(&cache->item, old_val);
5219                         cache->reserved -= num_bytes;
5220                         cache->space_info->bytes_reserved -= num_bytes;
5221                         cache->space_info->bytes_used += num_bytes;
5222                         cache->space_info->disk_used += num_bytes * factor;
5223                         spin_unlock(&cache->lock);
5224                         spin_unlock(&cache->space_info->lock);
5225                 } else {
5226                         old_val -= num_bytes;
5227                         btrfs_set_block_group_used(&cache->item, old_val);
5228                         cache->pinned += num_bytes;
5229                         cache->space_info->bytes_pinned += num_bytes;
5230                         cache->space_info->bytes_used -= num_bytes;
5231                         cache->space_info->disk_used -= num_bytes * factor;
5232                         spin_unlock(&cache->lock);
5233                         spin_unlock(&cache->space_info->lock);
5234
5235                         set_extent_dirty(info->pinned_extents,
5236                                          bytenr, bytenr + num_bytes - 1,
5237                                          GFP_NOFS | __GFP_NOFAIL);
5238                 }
5239                 btrfs_put_block_group(cache);
5240                 total -= num_bytes;
5241                 bytenr += num_bytes;
5242         }
5243         return 0;
5244 }
5245
5246 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5247 {
5248         struct btrfs_block_group_cache *cache;
5249         u64 bytenr;
5250
5251         spin_lock(&root->fs_info->block_group_cache_lock);
5252         bytenr = root->fs_info->first_logical_byte;
5253         spin_unlock(&root->fs_info->block_group_cache_lock);
5254
5255         if (bytenr < (u64)-1)
5256                 return bytenr;
5257
5258         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5259         if (!cache)
5260                 return 0;
5261
5262         bytenr = cache->key.objectid;
5263         btrfs_put_block_group(cache);
5264
5265         return bytenr;
5266 }
5267
5268 static int pin_down_extent(struct btrfs_root *root,
5269                            struct btrfs_block_group_cache *cache,
5270                            u64 bytenr, u64 num_bytes, int reserved)
5271 {
5272         spin_lock(&cache->space_info->lock);
5273         spin_lock(&cache->lock);
5274         cache->pinned += num_bytes;
5275         cache->space_info->bytes_pinned += num_bytes;
5276         if (reserved) {
5277                 cache->reserved -= num_bytes;
5278                 cache->space_info->bytes_reserved -= num_bytes;
5279         }
5280         spin_unlock(&cache->lock);
5281         spin_unlock(&cache->space_info->lock);
5282
5283         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5284                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5285         if (reserved)
5286                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5287         return 0;
5288 }
5289
5290 /*
5291  * this function must be called within transaction
5292  */
5293 int btrfs_pin_extent(struct btrfs_root *root,
5294                      u64 bytenr, u64 num_bytes, int reserved)
5295 {
5296         struct btrfs_block_group_cache *cache;
5297
5298         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5299         BUG_ON(!cache); /* Logic error */
5300
5301         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5302
5303         btrfs_put_block_group(cache);
5304         return 0;
5305 }
5306
5307 /*
5308  * this function must be called within transaction
5309  */
5310 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5311                                     u64 bytenr, u64 num_bytes)
5312 {
5313         struct btrfs_block_group_cache *cache;
5314         int ret;
5315
5316         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5317         if (!cache)
5318                 return -EINVAL;
5319
5320         /*
5321          * pull in the free space cache (if any) so that our pin
5322          * removes the free space from the cache.  We have load_only set
5323          * to one because the slow code to read in the free extents does check
5324          * the pinned extents.
5325          */
5326         cache_block_group(cache, 1);
5327
5328         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5329
5330         /* remove us from the free space cache (if we're there at all) */
5331         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5332         btrfs_put_block_group(cache);
5333         return ret;
5334 }
5335
5336 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5337 {
5338         int ret;
5339         struct btrfs_block_group_cache *block_group;
5340         struct btrfs_caching_control *caching_ctl;
5341
5342         block_group = btrfs_lookup_block_group(root->fs_info, start);
5343         if (!block_group)
5344                 return -EINVAL;
5345
5346         cache_block_group(block_group, 0);
5347         caching_ctl = get_caching_control(block_group);
5348
5349         if (!caching_ctl) {
5350                 /* Logic error */
5351                 BUG_ON(!block_group_cache_done(block_group));
5352                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5353         } else {
5354                 mutex_lock(&caching_ctl->mutex);
5355
5356                 if (start >= caching_ctl->progress) {
5357                         ret = add_excluded_extent(root, start, num_bytes);
5358                 } else if (start + num_bytes <= caching_ctl->progress) {
5359                         ret = btrfs_remove_free_space(block_group,
5360                                                       start, num_bytes);
5361                 } else {
5362                         num_bytes = caching_ctl->progress - start;
5363                         ret = btrfs_remove_free_space(block_group,
5364                                                       start, num_bytes);
5365                         if (ret)
5366                                 goto out_lock;
5367
5368                         num_bytes = (start + num_bytes) -
5369                                 caching_ctl->progress;
5370                         start = caching_ctl->progress;
5371                         ret = add_excluded_extent(root, start, num_bytes);
5372                 }
5373 out_lock:
5374                 mutex_unlock(&caching_ctl->mutex);
5375                 put_caching_control(caching_ctl);
5376         }
5377         btrfs_put_block_group(block_group);
5378         return ret;
5379 }
5380
5381 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5382                                  struct extent_buffer *eb)
5383 {
5384         struct btrfs_file_extent_item *item;
5385         struct btrfs_key key;
5386         int found_type;
5387         int i;
5388
5389         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5390                 return 0;
5391
5392         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5393                 btrfs_item_key_to_cpu(eb, &key, i);
5394                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5395                         continue;
5396                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5397                 found_type = btrfs_file_extent_type(eb, item);
5398                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5399                         continue;
5400                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5401                         continue;
5402                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5403                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5404                 __exclude_logged_extent(log, key.objectid, key.offset);
5405         }
5406
5407         return 0;
5408 }
5409
5410 /**
5411  * btrfs_update_reserved_bytes - update the block_group and space info counters
5412  * @cache:      The cache we are manipulating
5413  * @num_bytes:  The number of bytes in question
5414  * @reserve:    One of the reservation enums
5415  *
5416  * This is called by the allocator when it reserves space, or by somebody who is
5417  * freeing space that was never actually used on disk.  For example if you
5418  * reserve some space for a new leaf in transaction A and before transaction A
5419  * commits you free that leaf, you call this with reserve set to 0 in order to
5420  * clear the reservation.
5421  *
5422  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5423  * ENOSPC accounting.  For data we handle the reservation through clearing the
5424  * delalloc bits in the io_tree.  We have to do this since we could end up
5425  * allocating less disk space for the amount of data we have reserved in the
5426  * case of compression.
5427  *
5428  * If this is a reservation and the block group has become read only we cannot
5429  * make the reservation and return -EAGAIN, otherwise this function always
5430  * succeeds.
5431  */
5432 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5433                                        u64 num_bytes, int reserve)
5434 {
5435         struct btrfs_space_info *space_info = cache->space_info;
5436         int ret = 0;
5437
5438         spin_lock(&space_info->lock);
5439         spin_lock(&cache->lock);
5440         if (reserve != RESERVE_FREE) {
5441                 if (cache->ro) {
5442                         ret = -EAGAIN;
5443                 } else {
5444                         cache->reserved += num_bytes;
5445                         space_info->bytes_reserved += num_bytes;
5446                         if (reserve == RESERVE_ALLOC) {
5447                                 trace_btrfs_space_reservation(cache->fs_info,
5448                                                 "space_info", space_info->flags,
5449                                                 num_bytes, 0);
5450                                 space_info->bytes_may_use -= num_bytes;
5451                         }
5452                 }
5453         } else {
5454                 if (cache->ro)
5455                         space_info->bytes_readonly += num_bytes;
5456                 cache->reserved -= num_bytes;
5457                 space_info->bytes_reserved -= num_bytes;
5458         }
5459         spin_unlock(&cache->lock);
5460         spin_unlock(&space_info->lock);
5461         return ret;
5462 }
5463
5464 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5465                                 struct btrfs_root *root)
5466 {
5467         struct btrfs_fs_info *fs_info = root->fs_info;
5468         struct btrfs_caching_control *next;
5469         struct btrfs_caching_control *caching_ctl;
5470         struct btrfs_block_group_cache *cache;
5471         struct btrfs_space_info *space_info;
5472
5473         down_write(&fs_info->extent_commit_sem);
5474
5475         list_for_each_entry_safe(caching_ctl, next,
5476                                  &fs_info->caching_block_groups, list) {
5477                 cache = caching_ctl->block_group;
5478                 if (block_group_cache_done(cache)) {
5479                         cache->last_byte_to_unpin = (u64)-1;
5480                         list_del_init(&caching_ctl->list);
5481                         put_caching_control(caching_ctl);
5482                 } else {
5483                         cache->last_byte_to_unpin = caching_ctl->progress;
5484                 }
5485         }
5486
5487         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5488                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5489         else
5490                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5491
5492         up_write(&fs_info->extent_commit_sem);
5493
5494         list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
5495                 percpu_counter_set(&space_info->total_bytes_pinned, 0);
5496
5497         update_global_block_rsv(fs_info);
5498 }
5499
5500 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5501 {
5502         struct btrfs_fs_info *fs_info = root->fs_info;
5503         struct btrfs_block_group_cache *cache = NULL;
5504         struct btrfs_space_info *space_info;
5505         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5506         u64 len;
5507         bool readonly;
5508
5509         while (start <= end) {
5510                 readonly = false;
5511                 if (!cache ||
5512                     start >= cache->key.objectid + cache->key.offset) {
5513                         if (cache)
5514                                 btrfs_put_block_group(cache);
5515                         cache = btrfs_lookup_block_group(fs_info, start);
5516                         BUG_ON(!cache); /* Logic error */
5517                 }
5518
5519                 len = cache->key.objectid + cache->key.offset - start;
5520                 len = min(len, end + 1 - start);
5521
5522                 if (start < cache->last_byte_to_unpin) {
5523                         len = min(len, cache->last_byte_to_unpin - start);
5524                         btrfs_add_free_space(cache, start, len);
5525                 }
5526
5527                 start += len;
5528                 space_info = cache->space_info;
5529
5530                 spin_lock(&space_info->lock);
5531                 spin_lock(&cache->lock);
5532                 cache->pinned -= len;
5533                 space_info->bytes_pinned -= len;
5534                 if (cache->ro) {
5535                         space_info->bytes_readonly += len;
5536                         readonly = true;
5537                 }
5538                 spin_unlock(&cache->lock);
5539                 if (!readonly && global_rsv->space_info == space_info) {
5540                         spin_lock(&global_rsv->lock);
5541                         if (!global_rsv->full) {
5542                                 len = min(len, global_rsv->size -
5543                                           global_rsv->reserved);
5544                                 global_rsv->reserved += len;
5545                                 space_info->bytes_may_use += len;
5546                                 if (global_rsv->reserved >= global_rsv->size)
5547                                         global_rsv->full = 1;
5548                         }
5549                         spin_unlock(&global_rsv->lock);
5550                 }
5551                 spin_unlock(&space_info->lock);
5552         }
5553
5554         if (cache)
5555                 btrfs_put_block_group(cache);
5556         return 0;
5557 }
5558
5559 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5560                                struct btrfs_root *root)
5561 {
5562         struct btrfs_fs_info *fs_info = root->fs_info;
5563         struct extent_io_tree *unpin;
5564         u64 start;
5565         u64 end;
5566         int ret;
5567
5568         if (trans->aborted)
5569                 return 0;
5570
5571         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5572                 unpin = &fs_info->freed_extents[1];
5573         else
5574                 unpin = &fs_info->freed_extents[0];
5575
5576         while (1) {
5577                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5578                                             EXTENT_DIRTY, NULL);
5579                 if (ret)
5580                         break;
5581
5582                 if (btrfs_test_opt(root, DISCARD))
5583                         ret = btrfs_discard_extent(root, start,
5584                                                    end + 1 - start, NULL);
5585
5586                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5587                 unpin_extent_range(root, start, end);
5588                 cond_resched();
5589         }
5590
5591         return 0;
5592 }
5593
5594 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5595                              u64 owner, u64 root_objectid)
5596 {
5597         struct btrfs_space_info *space_info;
5598         u64 flags;
5599
5600         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5601                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5602                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
5603                 else
5604                         flags = BTRFS_BLOCK_GROUP_METADATA;
5605         } else {
5606                 flags = BTRFS_BLOCK_GROUP_DATA;
5607         }
5608
5609         space_info = __find_space_info(fs_info, flags);
5610         BUG_ON(!space_info); /* Logic bug */
5611         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5612 }
5613
5614
5615 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5616                                 struct btrfs_root *root,
5617                                 u64 bytenr, u64 num_bytes, u64 parent,
5618                                 u64 root_objectid, u64 owner_objectid,
5619                                 u64 owner_offset, int refs_to_drop,
5620                                 struct btrfs_delayed_extent_op *extent_op)
5621 {
5622         struct btrfs_key key;
5623         struct btrfs_path *path;
5624         struct btrfs_fs_info *info = root->fs_info;
5625         struct btrfs_root *extent_root = info->extent_root;
5626         struct extent_buffer *leaf;
5627         struct btrfs_extent_item *ei;
5628         struct btrfs_extent_inline_ref *iref;
5629         int ret;
5630         int is_data;
5631         int extent_slot = 0;
5632         int found_extent = 0;
5633         int num_to_del = 1;
5634         u32 item_size;
5635         u64 refs;
5636         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5637                                                  SKINNY_METADATA);
5638
5639         path = btrfs_alloc_path();
5640         if (!path)
5641                 return -ENOMEM;
5642
5643         path->reada = 1;
5644         path->leave_spinning = 1;
5645
5646         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5647         BUG_ON(!is_data && refs_to_drop != 1);
5648
5649         if (is_data)
5650                 skinny_metadata = 0;
5651
5652         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5653                                     bytenr, num_bytes, parent,
5654                                     root_objectid, owner_objectid,
5655                                     owner_offset);
5656         if (ret == 0) {
5657                 extent_slot = path->slots[0];
5658                 while (extent_slot >= 0) {
5659                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5660                                               extent_slot);
5661                         if (key.objectid != bytenr)
5662                                 break;
5663                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5664                             key.offset == num_bytes) {
5665                                 found_extent = 1;
5666                                 break;
5667                         }
5668                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5669                             key.offset == owner_objectid) {
5670                                 found_extent = 1;
5671                                 break;
5672                         }
5673                         if (path->slots[0] - extent_slot > 5)
5674                                 break;
5675                         extent_slot--;
5676                 }
5677 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5678                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5679                 if (found_extent && item_size < sizeof(*ei))
5680                         found_extent = 0;
5681 #endif
5682                 if (!found_extent) {
5683                         BUG_ON(iref);
5684                         ret = remove_extent_backref(trans, extent_root, path,
5685                                                     NULL, refs_to_drop,
5686                                                     is_data);
5687                         if (ret) {
5688                                 btrfs_abort_transaction(trans, extent_root, ret);
5689                                 goto out;
5690                         }
5691                         btrfs_release_path(path);
5692                         path->leave_spinning = 1;
5693
5694                         key.objectid = bytenr;
5695                         key.type = BTRFS_EXTENT_ITEM_KEY;
5696                         key.offset = num_bytes;
5697
5698                         if (!is_data && skinny_metadata) {
5699                                 key.type = BTRFS_METADATA_ITEM_KEY;
5700                                 key.offset = owner_objectid;
5701                         }
5702
5703                         ret = btrfs_search_slot(trans, extent_root,
5704                                                 &key, path, -1, 1);
5705                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5706                                 /*
5707                                  * Couldn't find our skinny metadata item,
5708                                  * see if we have ye olde extent item.
5709                                  */
5710                                 path->slots[0]--;
5711                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5712                                                       path->slots[0]);
5713                                 if (key.objectid == bytenr &&
5714                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5715                                     key.offset == num_bytes)
5716                                         ret = 0;
5717                         }
5718
5719                         if (ret > 0 && skinny_metadata) {
5720                                 skinny_metadata = false;
5721                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5722                                 key.offset = num_bytes;
5723                                 btrfs_release_path(path);
5724                                 ret = btrfs_search_slot(trans, extent_root,
5725                                                         &key, path, -1, 1);
5726                         }
5727
5728                         if (ret) {
5729                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5730                                         ret, bytenr);
5731                                 if (ret > 0)
5732                                         btrfs_print_leaf(extent_root,
5733                                                          path->nodes[0]);
5734                         }
5735                         if (ret < 0) {
5736                                 btrfs_abort_transaction(trans, extent_root, ret);
5737                                 goto out;
5738                         }
5739                         extent_slot = path->slots[0];
5740                 }
5741         } else if (WARN_ON(ret == -ENOENT)) {
5742                 btrfs_print_leaf(extent_root, path->nodes[0]);
5743                 btrfs_err(info,
5744                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5745                         bytenr, parent, root_objectid, owner_objectid,
5746                         owner_offset);
5747         } else {
5748                 btrfs_abort_transaction(trans, extent_root, ret);
5749                 goto out;
5750         }
5751
5752         leaf = path->nodes[0];
5753         item_size = btrfs_item_size_nr(leaf, extent_slot);
5754 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5755         if (item_size < sizeof(*ei)) {
5756                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5757                 ret = convert_extent_item_v0(trans, extent_root, path,
5758                                              owner_objectid, 0);
5759                 if (ret < 0) {
5760                         btrfs_abort_transaction(trans, extent_root, ret);
5761                         goto out;
5762                 }
5763
5764                 btrfs_release_path(path);
5765                 path->leave_spinning = 1;
5766
5767                 key.objectid = bytenr;
5768                 key.type = BTRFS_EXTENT_ITEM_KEY;
5769                 key.offset = num_bytes;
5770
5771                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5772                                         -1, 1);
5773                 if (ret) {
5774                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5775                                 ret, bytenr);
5776                         btrfs_print_leaf(extent_root, path->nodes[0]);
5777                 }
5778                 if (ret < 0) {
5779                         btrfs_abort_transaction(trans, extent_root, ret);
5780                         goto out;
5781                 }
5782
5783                 extent_slot = path->slots[0];
5784                 leaf = path->nodes[0];
5785                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5786         }
5787 #endif
5788         BUG_ON(item_size < sizeof(*ei));
5789         ei = btrfs_item_ptr(leaf, extent_slot,
5790                             struct btrfs_extent_item);
5791         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5792             key.type == BTRFS_EXTENT_ITEM_KEY) {
5793                 struct btrfs_tree_block_info *bi;
5794                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5795                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5796                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5797         }
5798
5799         refs = btrfs_extent_refs(leaf, ei);
5800         if (refs < refs_to_drop) {
5801                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
5802                           "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
5803                 ret = -EINVAL;
5804                 btrfs_abort_transaction(trans, extent_root, ret);
5805                 goto out;
5806         }
5807         refs -= refs_to_drop;
5808
5809         if (refs > 0) {
5810                 if (extent_op)
5811                         __run_delayed_extent_op(extent_op, leaf, ei);
5812                 /*
5813                  * In the case of inline back ref, reference count will
5814                  * be updated by remove_extent_backref
5815                  */
5816                 if (iref) {
5817                         BUG_ON(!found_extent);
5818                 } else {
5819                         btrfs_set_extent_refs(leaf, ei, refs);
5820                         btrfs_mark_buffer_dirty(leaf);
5821                 }
5822                 if (found_extent) {
5823                         ret = remove_extent_backref(trans, extent_root, path,
5824                                                     iref, refs_to_drop,
5825                                                     is_data);
5826                         if (ret) {
5827                                 btrfs_abort_transaction(trans, extent_root, ret);
5828                                 goto out;
5829                         }
5830                 }
5831                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
5832                                  root_objectid);
5833         } else {
5834                 if (found_extent) {
5835                         BUG_ON(is_data && refs_to_drop !=
5836                                extent_data_ref_count(root, path, iref));
5837                         if (iref) {
5838                                 BUG_ON(path->slots[0] != extent_slot);
5839                         } else {
5840                                 BUG_ON(path->slots[0] != extent_slot + 1);
5841                                 path->slots[0] = extent_slot;
5842                                 num_to_del = 2;
5843                         }
5844                 }
5845
5846                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5847                                       num_to_del);
5848                 if (ret) {
5849                         btrfs_abort_transaction(trans, extent_root, ret);
5850                         goto out;
5851                 }
5852                 btrfs_release_path(path);
5853
5854                 if (is_data) {
5855                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5856                         if (ret) {
5857                                 btrfs_abort_transaction(trans, extent_root, ret);
5858                                 goto out;
5859                         }
5860                 }
5861
5862                 ret = update_block_group(root, bytenr, num_bytes, 0);
5863                 if (ret) {
5864                         btrfs_abort_transaction(trans, extent_root, ret);
5865                         goto out;
5866                 }
5867         }
5868 out:
5869         btrfs_free_path(path);
5870         return ret;
5871 }
5872
5873 /*
5874  * when we free an block, it is possible (and likely) that we free the last
5875  * delayed ref for that extent as well.  This searches the delayed ref tree for
5876  * a given extent, and if there are no other delayed refs to be processed, it
5877  * removes it from the tree.
5878  */
5879 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5880                                       struct btrfs_root *root, u64 bytenr)
5881 {
5882         struct btrfs_delayed_ref_head *head;
5883         struct btrfs_delayed_ref_root *delayed_refs;
5884         int ret = 0;
5885
5886         delayed_refs = &trans->transaction->delayed_refs;
5887         spin_lock(&delayed_refs->lock);
5888         head = btrfs_find_delayed_ref_head(trans, bytenr);
5889         if (!head)
5890                 goto out_delayed_unlock;
5891
5892         spin_lock(&head->lock);
5893         if (rb_first(&head->ref_root))
5894                 goto out;
5895
5896         if (head->extent_op) {
5897                 if (!head->must_insert_reserved)
5898                         goto out;
5899                 btrfs_free_delayed_extent_op(head->extent_op);
5900                 head->extent_op = NULL;
5901         }
5902
5903         /*
5904          * waiting for the lock here would deadlock.  If someone else has it
5905          * locked they are already in the process of dropping it anyway
5906          */
5907         if (!mutex_trylock(&head->mutex))
5908                 goto out;
5909
5910         /*
5911          * at this point we have a head with no other entries.  Go
5912          * ahead and process it.
5913          */
5914         head->node.in_tree = 0;
5915         rb_erase(&head->href_node, &delayed_refs->href_root);
5916
5917         atomic_dec(&delayed_refs->num_entries);
5918
5919         /*
5920          * we don't take a ref on the node because we're removing it from the
5921          * tree, so we just steal the ref the tree was holding.
5922          */
5923         delayed_refs->num_heads--;
5924         if (head->processing == 0)
5925                 delayed_refs->num_heads_ready--;
5926         head->processing = 0;
5927         spin_unlock(&head->lock);
5928         spin_unlock(&delayed_refs->lock);
5929
5930         BUG_ON(head->extent_op);
5931         if (head->must_insert_reserved)
5932                 ret = 1;
5933
5934         mutex_unlock(&head->mutex);
5935         btrfs_put_delayed_ref(&head->node);
5936         return ret;
5937 out:
5938         spin_unlock(&head->lock);
5939
5940 out_delayed_unlock:
5941         spin_unlock(&delayed_refs->lock);
5942         return 0;
5943 }
5944
5945 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5946                            struct btrfs_root *root,
5947                            struct extent_buffer *buf,
5948                            u64 parent, int last_ref)
5949 {
5950         struct btrfs_block_group_cache *cache = NULL;
5951         int pin = 1;
5952         int ret;
5953
5954         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5955                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5956                                         buf->start, buf->len,
5957                                         parent, root->root_key.objectid,
5958                                         btrfs_header_level(buf),
5959                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5960                 BUG_ON(ret); /* -ENOMEM */
5961         }
5962
5963         if (!last_ref)
5964                 return;
5965
5966         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5967
5968         if (btrfs_header_generation(buf) == trans->transid) {
5969                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5970                         ret = check_ref_cleanup(trans, root, buf->start);
5971                         if (!ret)
5972                                 goto out;
5973                 }
5974
5975                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5976                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5977                         goto out;
5978                 }
5979
5980                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5981
5982                 btrfs_add_free_space(cache, buf->start, buf->len);
5983                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5984                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
5985                 pin = 0;
5986         }
5987 out:
5988         if (pin)
5989                 add_pinned_bytes(root->fs_info, buf->len,
5990                                  btrfs_header_level(buf),
5991                                  root->root_key.objectid);
5992
5993         /*
5994          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5995          * anymore.
5996          */
5997         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5998         btrfs_put_block_group(cache);
5999 }
6000
6001 /* Can return -ENOMEM */
6002 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6003                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6004                       u64 owner, u64 offset, int for_cow)
6005 {
6006         int ret;
6007         struct btrfs_fs_info *fs_info = root->fs_info;
6008
6009         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6010
6011         /*
6012          * tree log blocks never actually go into the extent allocation
6013          * tree, just update pinning info and exit early.
6014          */
6015         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6016                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6017                 /* unlocks the pinned mutex */
6018                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6019                 ret = 0;
6020         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6021                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6022                                         num_bytes,
6023                                         parent, root_objectid, (int)owner,
6024                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
6025         } else {
6026                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6027                                                 num_bytes,
6028                                                 parent, root_objectid, owner,
6029                                                 offset, BTRFS_DROP_DELAYED_REF,
6030                                                 NULL, for_cow);
6031         }
6032         return ret;
6033 }
6034
6035 static u64 stripe_align(struct btrfs_root *root,
6036                         struct btrfs_block_group_cache *cache,
6037                         u64 val, u64 num_bytes)
6038 {
6039         u64 ret = ALIGN(val, root->stripesize);
6040         return ret;
6041 }
6042
6043 /*
6044  * when we wait for progress in the block group caching, its because
6045  * our allocation attempt failed at least once.  So, we must sleep
6046  * and let some progress happen before we try again.
6047  *
6048  * This function will sleep at least once waiting for new free space to
6049  * show up, and then it will check the block group free space numbers
6050  * for our min num_bytes.  Another option is to have it go ahead
6051  * and look in the rbtree for a free extent of a given size, but this
6052  * is a good start.
6053  *
6054  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6055  * any of the information in this block group.
6056  */
6057 static noinline void
6058 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6059                                 u64 num_bytes)
6060 {
6061         struct btrfs_caching_control *caching_ctl;
6062
6063         caching_ctl = get_caching_control(cache);
6064         if (!caching_ctl)
6065                 return;
6066
6067         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6068                    (cache->free_space_ctl->free_space >= num_bytes));
6069
6070         put_caching_control(caching_ctl);
6071 }
6072
6073 static noinline int
6074 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6075 {
6076         struct btrfs_caching_control *caching_ctl;
6077         int ret = 0;
6078
6079         caching_ctl = get_caching_control(cache);
6080         if (!caching_ctl)
6081                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6082
6083         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6084         if (cache->cached == BTRFS_CACHE_ERROR)
6085                 ret = -EIO;
6086         put_caching_control(caching_ctl);
6087         return ret;
6088 }
6089
6090 int __get_raid_index(u64 flags)
6091 {
6092         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6093                 return BTRFS_RAID_RAID10;
6094         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6095                 return BTRFS_RAID_RAID1;
6096         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6097                 return BTRFS_RAID_DUP;
6098         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6099                 return BTRFS_RAID_RAID0;
6100         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6101                 return BTRFS_RAID_RAID5;
6102         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6103                 return BTRFS_RAID_RAID6;
6104
6105         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6106 }
6107
6108 int get_block_group_index(struct btrfs_block_group_cache *cache)
6109 {
6110         return __get_raid_index(cache->flags);
6111 }
6112
6113 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6114         [BTRFS_RAID_RAID10]     = "raid10",
6115         [BTRFS_RAID_RAID1]      = "raid1",
6116         [BTRFS_RAID_DUP]        = "dup",
6117         [BTRFS_RAID_RAID0]      = "raid0",
6118         [BTRFS_RAID_SINGLE]     = "single",
6119         [BTRFS_RAID_RAID5]      = "raid5",
6120         [BTRFS_RAID_RAID6]      = "raid6",
6121 };
6122
6123 static const char *get_raid_name(enum btrfs_raid_types type)
6124 {
6125         if (type >= BTRFS_NR_RAID_TYPES)
6126                 return NULL;
6127
6128         return btrfs_raid_type_names[type];
6129 }
6130
6131 enum btrfs_loop_type {
6132         LOOP_CACHING_NOWAIT = 0,
6133         LOOP_CACHING_WAIT = 1,
6134         LOOP_ALLOC_CHUNK = 2,
6135         LOOP_NO_EMPTY_SIZE = 3,
6136 };
6137
6138 /*
6139  * walks the btree of allocated extents and find a hole of a given size.
6140  * The key ins is changed to record the hole:
6141  * ins->objectid == start position
6142  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6143  * ins->offset == the size of the hole.
6144  * Any available blocks before search_start are skipped.
6145  *
6146  * If there is no suitable free space, we will record the max size of
6147  * the free space extent currently.
6148  */
6149 static noinline int find_free_extent(struct btrfs_root *orig_root,
6150                                      u64 num_bytes, u64 empty_size,
6151                                      u64 hint_byte, struct btrfs_key *ins,
6152                                      u64 flags)
6153 {
6154         int ret = 0;
6155         struct btrfs_root *root = orig_root->fs_info->extent_root;
6156         struct btrfs_free_cluster *last_ptr = NULL;
6157         struct btrfs_block_group_cache *block_group = NULL;
6158         u64 search_start = 0;
6159         u64 max_extent_size = 0;
6160         int empty_cluster = 2 * 1024 * 1024;
6161         struct btrfs_space_info *space_info;
6162         int loop = 0;
6163         int index = __get_raid_index(flags);
6164         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6165                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6166         bool failed_cluster_refill = false;
6167         bool failed_alloc = false;
6168         bool use_cluster = true;
6169         bool have_caching_bg = false;
6170
6171         WARN_ON(num_bytes < root->sectorsize);
6172         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
6173         ins->objectid = 0;
6174         ins->offset = 0;
6175
6176         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6177
6178         space_info = __find_space_info(root->fs_info, flags);
6179         if (!space_info) {
6180                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6181                 return -ENOSPC;
6182         }
6183
6184         /*
6185          * If the space info is for both data and metadata it means we have a
6186          * small filesystem and we can't use the clustering stuff.
6187          */
6188         if (btrfs_mixed_space_info(space_info))
6189                 use_cluster = false;
6190
6191         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6192                 last_ptr = &root->fs_info->meta_alloc_cluster;
6193                 if (!btrfs_test_opt(root, SSD))
6194                         empty_cluster = 64 * 1024;
6195         }
6196
6197         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6198             btrfs_test_opt(root, SSD)) {
6199                 last_ptr = &root->fs_info->data_alloc_cluster;
6200         }
6201
6202         if (last_ptr) {
6203                 spin_lock(&last_ptr->lock);
6204                 if (last_ptr->block_group)
6205                         hint_byte = last_ptr->window_start;
6206                 spin_unlock(&last_ptr->lock);
6207         }
6208
6209         search_start = max(search_start, first_logical_byte(root, 0));
6210         search_start = max(search_start, hint_byte);
6211
6212         if (!last_ptr)
6213                 empty_cluster = 0;
6214
6215         if (search_start == hint_byte) {
6216                 block_group = btrfs_lookup_block_group(root->fs_info,
6217                                                        search_start);
6218                 /*
6219                  * we don't want to use the block group if it doesn't match our
6220                  * allocation bits, or if its not cached.
6221                  *
6222                  * However if we are re-searching with an ideal block group
6223                  * picked out then we don't care that the block group is cached.
6224                  */
6225                 if (block_group && block_group_bits(block_group, flags) &&
6226                     block_group->cached != BTRFS_CACHE_NO) {
6227                         down_read(&space_info->groups_sem);
6228                         if (list_empty(&block_group->list) ||
6229                             block_group->ro) {
6230                                 /*
6231                                  * someone is removing this block group,
6232                                  * we can't jump into the have_block_group
6233                                  * target because our list pointers are not
6234                                  * valid
6235                                  */
6236                                 btrfs_put_block_group(block_group);
6237                                 up_read(&space_info->groups_sem);
6238                         } else {
6239                                 index = get_block_group_index(block_group);
6240                                 goto have_block_group;
6241                         }
6242                 } else if (block_group) {
6243                         btrfs_put_block_group(block_group);
6244                 }
6245         }
6246 search:
6247         have_caching_bg = false;
6248         down_read(&space_info->groups_sem);
6249         list_for_each_entry(block_group, &space_info->block_groups[index],
6250                             list) {
6251                 u64 offset;
6252                 int cached;
6253
6254                 btrfs_get_block_group(block_group);
6255                 search_start = block_group->key.objectid;
6256
6257                 /*
6258                  * this can happen if we end up cycling through all the
6259                  * raid types, but we want to make sure we only allocate
6260                  * for the proper type.
6261                  */
6262                 if (!block_group_bits(block_group, flags)) {
6263                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6264                                 BTRFS_BLOCK_GROUP_RAID1 |
6265                                 BTRFS_BLOCK_GROUP_RAID5 |
6266                                 BTRFS_BLOCK_GROUP_RAID6 |
6267                                 BTRFS_BLOCK_GROUP_RAID10;
6268
6269                         /*
6270                          * if they asked for extra copies and this block group
6271                          * doesn't provide them, bail.  This does allow us to
6272                          * fill raid0 from raid1.
6273                          */
6274                         if ((flags & extra) && !(block_group->flags & extra))
6275                                 goto loop;
6276                 }
6277
6278 have_block_group:
6279                 cached = block_group_cache_done(block_group);
6280                 if (unlikely(!cached)) {
6281                         ret = cache_block_group(block_group, 0);
6282                         BUG_ON(ret < 0);
6283                         ret = 0;
6284                 }
6285
6286                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6287                         goto loop;
6288                 if (unlikely(block_group->ro))
6289                         goto loop;
6290
6291                 /*
6292                  * Ok we want to try and use the cluster allocator, so
6293                  * lets look there
6294                  */
6295                 if (last_ptr) {
6296                         struct btrfs_block_group_cache *used_block_group;
6297                         unsigned long aligned_cluster;
6298                         /*
6299                          * the refill lock keeps out other
6300                          * people trying to start a new cluster
6301                          */
6302                         spin_lock(&last_ptr->refill_lock);
6303                         used_block_group = last_ptr->block_group;
6304                         if (used_block_group != block_group &&
6305                             (!used_block_group ||
6306                              used_block_group->ro ||
6307                              !block_group_bits(used_block_group, flags)))
6308                                 goto refill_cluster;
6309
6310                         if (used_block_group != block_group)
6311                                 btrfs_get_block_group(used_block_group);
6312
6313                         offset = btrfs_alloc_from_cluster(used_block_group,
6314                                                 last_ptr,
6315                                                 num_bytes,
6316                                                 used_block_group->key.objectid,
6317                                                 &max_extent_size);
6318                         if (offset) {
6319                                 /* we have a block, we're done */
6320                                 spin_unlock(&last_ptr->refill_lock);
6321                                 trace_btrfs_reserve_extent_cluster(root,
6322                                                 used_block_group,
6323                                                 search_start, num_bytes);
6324                                 if (used_block_group != block_group) {
6325                                         btrfs_put_block_group(block_group);
6326                                         block_group = used_block_group;
6327                                 }
6328                                 goto checks;
6329                         }
6330
6331                         WARN_ON(last_ptr->block_group != used_block_group);
6332                         if (used_block_group != block_group)
6333                                 btrfs_put_block_group(used_block_group);
6334 refill_cluster:
6335                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6336                          * set up a new clusters, so lets just skip it
6337                          * and let the allocator find whatever block
6338                          * it can find.  If we reach this point, we
6339                          * will have tried the cluster allocator
6340                          * plenty of times and not have found
6341                          * anything, so we are likely way too
6342                          * fragmented for the clustering stuff to find
6343                          * anything.
6344                          *
6345                          * However, if the cluster is taken from the
6346                          * current block group, release the cluster
6347                          * first, so that we stand a better chance of
6348                          * succeeding in the unclustered
6349                          * allocation.  */
6350                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6351                             last_ptr->block_group != block_group) {
6352                                 spin_unlock(&last_ptr->refill_lock);
6353                                 goto unclustered_alloc;
6354                         }
6355
6356                         /*
6357                          * this cluster didn't work out, free it and
6358                          * start over
6359                          */
6360                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6361
6362                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6363                                 spin_unlock(&last_ptr->refill_lock);
6364                                 goto unclustered_alloc;
6365                         }
6366
6367                         aligned_cluster = max_t(unsigned long,
6368                                                 empty_cluster + empty_size,
6369                                               block_group->full_stripe_len);
6370
6371                         /* allocate a cluster in this block group */
6372                         ret = btrfs_find_space_cluster(root, block_group,
6373                                                        last_ptr, search_start,
6374                                                        num_bytes,
6375                                                        aligned_cluster);
6376                         if (ret == 0) {
6377                                 /*
6378                                  * now pull our allocation out of this
6379                                  * cluster
6380                                  */
6381                                 offset = btrfs_alloc_from_cluster(block_group,
6382                                                         last_ptr,
6383                                                         num_bytes,
6384                                                         search_start,
6385                                                         &max_extent_size);
6386                                 if (offset) {
6387                                         /* we found one, proceed */
6388                                         spin_unlock(&last_ptr->refill_lock);
6389                                         trace_btrfs_reserve_extent_cluster(root,
6390                                                 block_group, search_start,
6391                                                 num_bytes);
6392                                         goto checks;
6393                                 }
6394                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6395                                    && !failed_cluster_refill) {
6396                                 spin_unlock(&last_ptr->refill_lock);
6397
6398                                 failed_cluster_refill = true;
6399                                 wait_block_group_cache_progress(block_group,
6400                                        num_bytes + empty_cluster + empty_size);
6401                                 goto have_block_group;
6402                         }
6403
6404                         /*
6405                          * at this point we either didn't find a cluster
6406                          * or we weren't able to allocate a block from our
6407                          * cluster.  Free the cluster we've been trying
6408                          * to use, and go to the next block group
6409                          */
6410                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6411                         spin_unlock(&last_ptr->refill_lock);
6412                         goto loop;
6413                 }
6414
6415 unclustered_alloc:
6416                 spin_lock(&block_group->free_space_ctl->tree_lock);
6417                 if (cached &&
6418                     block_group->free_space_ctl->free_space <
6419                     num_bytes + empty_cluster + empty_size) {
6420                         if (block_group->free_space_ctl->free_space >
6421                             max_extent_size)
6422                                 max_extent_size =
6423                                         block_group->free_space_ctl->free_space;
6424                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6425                         goto loop;
6426                 }
6427                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6428
6429                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6430                                                     num_bytes, empty_size,
6431                                                     &max_extent_size);
6432                 /*
6433                  * If we didn't find a chunk, and we haven't failed on this
6434                  * block group before, and this block group is in the middle of
6435                  * caching and we are ok with waiting, then go ahead and wait
6436                  * for progress to be made, and set failed_alloc to true.
6437                  *
6438                  * If failed_alloc is true then we've already waited on this
6439                  * block group once and should move on to the next block group.
6440                  */
6441                 if (!offset && !failed_alloc && !cached &&
6442                     loop > LOOP_CACHING_NOWAIT) {
6443                         wait_block_group_cache_progress(block_group,
6444                                                 num_bytes + empty_size);
6445                         failed_alloc = true;
6446                         goto have_block_group;
6447                 } else if (!offset) {
6448                         if (!cached)
6449                                 have_caching_bg = true;
6450                         goto loop;
6451                 }
6452 checks:
6453                 search_start = stripe_align(root, block_group,
6454                                             offset, num_bytes);
6455
6456                 /* move on to the next group */
6457                 if (search_start + num_bytes >
6458                     block_group->key.objectid + block_group->key.offset) {
6459                         btrfs_add_free_space(block_group, offset, num_bytes);
6460                         goto loop;
6461                 }
6462
6463                 if (offset < search_start)
6464                         btrfs_add_free_space(block_group, offset,
6465                                              search_start - offset);
6466                 BUG_ON(offset > search_start);
6467
6468                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
6469                                                   alloc_type);
6470                 if (ret == -EAGAIN) {
6471                         btrfs_add_free_space(block_group, offset, num_bytes);
6472                         goto loop;
6473                 }
6474
6475                 /* we are all good, lets return */
6476                 ins->objectid = search_start;
6477                 ins->offset = num_bytes;
6478
6479                 trace_btrfs_reserve_extent(orig_root, block_group,
6480                                            search_start, num_bytes);
6481                 btrfs_put_block_group(block_group);
6482                 break;
6483 loop:
6484                 failed_cluster_refill = false;
6485                 failed_alloc = false;
6486                 BUG_ON(index != get_block_group_index(block_group));
6487                 btrfs_put_block_group(block_group);
6488         }
6489         up_read(&space_info->groups_sem);
6490
6491         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6492                 goto search;
6493
6494         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6495                 goto search;
6496
6497         /*
6498          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6499          *                      caching kthreads as we move along
6500          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6501          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6502          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6503          *                      again
6504          */
6505         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6506                 index = 0;
6507                 loop++;
6508                 if (loop == LOOP_ALLOC_CHUNK) {
6509                         struct btrfs_trans_handle *trans;
6510
6511                         trans = btrfs_join_transaction(root);
6512                         if (IS_ERR(trans)) {
6513                                 ret = PTR_ERR(trans);
6514                                 goto out;
6515                         }
6516
6517                         ret = do_chunk_alloc(trans, root, flags,
6518                                              CHUNK_ALLOC_FORCE);
6519                         /*
6520                          * Do not bail out on ENOSPC since we
6521                          * can do more things.
6522                          */
6523                         if (ret < 0 && ret != -ENOSPC)
6524                                 btrfs_abort_transaction(trans,
6525                                                         root, ret);
6526                         else
6527                                 ret = 0;
6528                         btrfs_end_transaction(trans, root);
6529                         if (ret)
6530                                 goto out;
6531                 }
6532
6533                 if (loop == LOOP_NO_EMPTY_SIZE) {
6534                         empty_size = 0;
6535                         empty_cluster = 0;
6536                 }
6537
6538                 goto search;
6539         } else if (!ins->objectid) {
6540                 ret = -ENOSPC;
6541         } else if (ins->objectid) {
6542                 ret = 0;
6543         }
6544 out:
6545         if (ret == -ENOSPC)
6546                 ins->offset = max_extent_size;
6547         return ret;
6548 }
6549
6550 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6551                             int dump_block_groups)
6552 {
6553         struct btrfs_block_group_cache *cache;
6554         int index = 0;
6555
6556         spin_lock(&info->lock);
6557         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
6558                info->flags,
6559                info->total_bytes - info->bytes_used - info->bytes_pinned -
6560                info->bytes_reserved - info->bytes_readonly,
6561                (info->full) ? "" : "not ");
6562         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
6563                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6564                info->total_bytes, info->bytes_used, info->bytes_pinned,
6565                info->bytes_reserved, info->bytes_may_use,
6566                info->bytes_readonly);
6567         spin_unlock(&info->lock);
6568
6569         if (!dump_block_groups)
6570                 return;
6571
6572         down_read(&info->groups_sem);
6573 again:
6574         list_for_each_entry(cache, &info->block_groups[index], list) {
6575                 spin_lock(&cache->lock);
6576                 printk(KERN_INFO "BTRFS: "
6577                            "block group %llu has %llu bytes, "
6578                            "%llu used %llu pinned %llu reserved %s\n",
6579                        cache->key.objectid, cache->key.offset,
6580                        btrfs_block_group_used(&cache->item), cache->pinned,
6581                        cache->reserved, cache->ro ? "[readonly]" : "");
6582                 btrfs_dump_free_space(cache, bytes);
6583                 spin_unlock(&cache->lock);
6584         }
6585         if (++index < BTRFS_NR_RAID_TYPES)
6586                 goto again;
6587         up_read(&info->groups_sem);
6588 }
6589
6590 int btrfs_reserve_extent(struct btrfs_root *root,
6591                          u64 num_bytes, u64 min_alloc_size,
6592                          u64 empty_size, u64 hint_byte,
6593                          struct btrfs_key *ins, int is_data)
6594 {
6595         bool final_tried = false;
6596         u64 flags;
6597         int ret;
6598
6599         flags = btrfs_get_alloc_profile(root, is_data);
6600 again:
6601         WARN_ON(num_bytes < root->sectorsize);
6602         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6603                                flags);
6604
6605         if (ret == -ENOSPC) {
6606                 if (!final_tried && ins->offset) {
6607                         num_bytes = min(num_bytes >> 1, ins->offset);
6608                         num_bytes = round_down(num_bytes, root->sectorsize);
6609                         num_bytes = max(num_bytes, min_alloc_size);
6610                         if (num_bytes == min_alloc_size)
6611                                 final_tried = true;
6612                         goto again;
6613                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6614                         struct btrfs_space_info *sinfo;
6615
6616                         sinfo = __find_space_info(root->fs_info, flags);
6617                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6618                                 flags, num_bytes);
6619                         if (sinfo)
6620                                 dump_space_info(sinfo, num_bytes, 1);
6621                 }
6622         }
6623
6624         return ret;
6625 }
6626
6627 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6628                                         u64 start, u64 len, int pin)
6629 {
6630         struct btrfs_block_group_cache *cache;
6631         int ret = 0;
6632
6633         cache = btrfs_lookup_block_group(root->fs_info, start);
6634         if (!cache) {
6635                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6636                         start);
6637                 return -ENOSPC;
6638         }
6639
6640         if (btrfs_test_opt(root, DISCARD))
6641                 ret = btrfs_discard_extent(root, start, len, NULL);
6642
6643         if (pin)
6644                 pin_down_extent(root, cache, start, len, 1);
6645         else {
6646                 btrfs_add_free_space(cache, start, len);
6647                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6648         }
6649         btrfs_put_block_group(cache);
6650
6651         trace_btrfs_reserved_extent_free(root, start, len);
6652
6653         return ret;
6654 }
6655
6656 int btrfs_free_reserved_extent(struct btrfs_root *root,
6657                                         u64 start, u64 len)
6658 {
6659         return __btrfs_free_reserved_extent(root, start, len, 0);
6660 }
6661
6662 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6663                                        u64 start, u64 len)
6664 {
6665         return __btrfs_free_reserved_extent(root, start, len, 1);
6666 }
6667
6668 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6669                                       struct btrfs_root *root,
6670                                       u64 parent, u64 root_objectid,
6671                                       u64 flags, u64 owner, u64 offset,
6672                                       struct btrfs_key *ins, int ref_mod)
6673 {
6674         int ret;
6675         struct btrfs_fs_info *fs_info = root->fs_info;
6676         struct btrfs_extent_item *extent_item;
6677         struct btrfs_extent_inline_ref *iref;
6678         struct btrfs_path *path;
6679         struct extent_buffer *leaf;
6680         int type;
6681         u32 size;
6682
6683         if (parent > 0)
6684                 type = BTRFS_SHARED_DATA_REF_KEY;
6685         else
6686                 type = BTRFS_EXTENT_DATA_REF_KEY;
6687
6688         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6689
6690         path = btrfs_alloc_path();
6691         if (!path)
6692                 return -ENOMEM;
6693
6694         path->leave_spinning = 1;
6695         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6696                                       ins, size);
6697         if (ret) {
6698                 btrfs_free_path(path);
6699                 return ret;
6700         }
6701
6702         leaf = path->nodes[0];
6703         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6704                                      struct btrfs_extent_item);
6705         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6706         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6707         btrfs_set_extent_flags(leaf, extent_item,
6708                                flags | BTRFS_EXTENT_FLAG_DATA);
6709
6710         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6711         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6712         if (parent > 0) {
6713                 struct btrfs_shared_data_ref *ref;
6714                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6715                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6716                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6717         } else {
6718                 struct btrfs_extent_data_ref *ref;
6719                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6720                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6721                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6722                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6723                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6724         }
6725
6726         btrfs_mark_buffer_dirty(path->nodes[0]);
6727         btrfs_free_path(path);
6728
6729         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6730         if (ret) { /* -ENOENT, logic error */
6731                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6732                         ins->objectid, ins->offset);
6733                 BUG();
6734         }
6735         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6736         return ret;
6737 }
6738
6739 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6740                                      struct btrfs_root *root,
6741                                      u64 parent, u64 root_objectid,
6742                                      u64 flags, struct btrfs_disk_key *key,
6743                                      int level, struct btrfs_key *ins)
6744 {
6745         int ret;
6746         struct btrfs_fs_info *fs_info = root->fs_info;
6747         struct btrfs_extent_item *extent_item;
6748         struct btrfs_tree_block_info *block_info;
6749         struct btrfs_extent_inline_ref *iref;
6750         struct btrfs_path *path;
6751         struct extent_buffer *leaf;
6752         u32 size = sizeof(*extent_item) + sizeof(*iref);
6753         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6754                                                  SKINNY_METADATA);
6755
6756         if (!skinny_metadata)
6757                 size += sizeof(*block_info);
6758
6759         path = btrfs_alloc_path();
6760         if (!path) {
6761                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
6762                                                    root->leafsize);
6763                 return -ENOMEM;
6764         }
6765
6766         path->leave_spinning = 1;
6767         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6768                                       ins, size);
6769         if (ret) {
6770                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
6771                                                    root->leafsize);
6772                 btrfs_free_path(path);
6773                 return ret;
6774         }
6775
6776         leaf = path->nodes[0];
6777         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6778                                      struct btrfs_extent_item);
6779         btrfs_set_extent_refs(leaf, extent_item, 1);
6780         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6781         btrfs_set_extent_flags(leaf, extent_item,
6782                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6783
6784         if (skinny_metadata) {
6785                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6786         } else {
6787                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6788                 btrfs_set_tree_block_key(leaf, block_info, key);
6789                 btrfs_set_tree_block_level(leaf, block_info, level);
6790                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6791         }
6792
6793         if (parent > 0) {
6794                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6795                 btrfs_set_extent_inline_ref_type(leaf, iref,
6796                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6797                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6798         } else {
6799                 btrfs_set_extent_inline_ref_type(leaf, iref,
6800                                                  BTRFS_TREE_BLOCK_REF_KEY);
6801                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6802         }
6803
6804         btrfs_mark_buffer_dirty(leaf);
6805         btrfs_free_path(path);
6806
6807         ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6808         if (ret) { /* -ENOENT, logic error */
6809                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6810                         ins->objectid, ins->offset);
6811                 BUG();
6812         }
6813
6814         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->leafsize);
6815         return ret;
6816 }
6817
6818 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6819                                      struct btrfs_root *root,
6820                                      u64 root_objectid, u64 owner,
6821                                      u64 offset, struct btrfs_key *ins)
6822 {
6823         int ret;
6824
6825         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6826
6827         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6828                                          ins->offset, 0,
6829                                          root_objectid, owner, offset,
6830                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6831         return ret;
6832 }
6833
6834 /*
6835  * this is used by the tree logging recovery code.  It records that
6836  * an extent has been allocated and makes sure to clear the free
6837  * space cache bits as well
6838  */
6839 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6840                                    struct btrfs_root *root,
6841                                    u64 root_objectid, u64 owner, u64 offset,
6842                                    struct btrfs_key *ins)
6843 {
6844         int ret;
6845         struct btrfs_block_group_cache *block_group;
6846
6847         /*
6848          * Mixed block groups will exclude before processing the log so we only
6849          * need to do the exlude dance if this fs isn't mixed.
6850          */
6851         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
6852                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
6853                 if (ret)
6854                         return ret;
6855         }
6856
6857         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6858         if (!block_group)
6859                 return -EINVAL;
6860
6861         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6862                                           RESERVE_ALLOC_NO_ACCOUNT);
6863         BUG_ON(ret); /* logic error */
6864         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6865                                          0, owner, offset, ins, 1);
6866         btrfs_put_block_group(block_group);
6867         return ret;
6868 }
6869
6870 static struct extent_buffer *
6871 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6872                       u64 bytenr, u32 blocksize, int level)
6873 {
6874         struct extent_buffer *buf;
6875
6876         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6877         if (!buf)
6878                 return ERR_PTR(-ENOMEM);
6879         btrfs_set_header_generation(buf, trans->transid);
6880         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6881         btrfs_tree_lock(buf);
6882         clean_tree_block(trans, root, buf);
6883         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6884
6885         btrfs_set_lock_blocking(buf);
6886         btrfs_set_buffer_uptodate(buf);
6887
6888         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6889                 /*
6890                  * we allow two log transactions at a time, use different
6891                  * EXENT bit to differentiate dirty pages.
6892                  */
6893                 if (root->log_transid % 2 == 0)
6894                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6895                                         buf->start + buf->len - 1, GFP_NOFS);
6896                 else
6897                         set_extent_new(&root->dirty_log_pages, buf->start,
6898                                         buf->start + buf->len - 1, GFP_NOFS);
6899         } else {
6900                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6901                          buf->start + buf->len - 1, GFP_NOFS);
6902         }
6903         trans->blocks_used++;
6904         /* this returns a buffer locked for blocking */
6905         return buf;
6906 }
6907
6908 static struct btrfs_block_rsv *
6909 use_block_rsv(struct btrfs_trans_handle *trans,
6910               struct btrfs_root *root, u32 blocksize)
6911 {
6912         struct btrfs_block_rsv *block_rsv;
6913         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6914         int ret;
6915         bool global_updated = false;
6916
6917         block_rsv = get_block_rsv(trans, root);
6918
6919         if (unlikely(block_rsv->size == 0))
6920                 goto try_reserve;
6921 again:
6922         ret = block_rsv_use_bytes(block_rsv, blocksize);
6923         if (!ret)
6924                 return block_rsv;
6925
6926         if (block_rsv->failfast)
6927                 return ERR_PTR(ret);
6928
6929         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
6930                 global_updated = true;
6931                 update_global_block_rsv(root->fs_info);
6932                 goto again;
6933         }
6934
6935         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6936                 static DEFINE_RATELIMIT_STATE(_rs,
6937                                 DEFAULT_RATELIMIT_INTERVAL * 10,
6938                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
6939                 if (__ratelimit(&_rs))
6940                         WARN(1, KERN_DEBUG
6941                                 "BTRFS: block rsv returned %d\n", ret);
6942         }
6943 try_reserve:
6944         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6945                                      BTRFS_RESERVE_NO_FLUSH);
6946         if (!ret)
6947                 return block_rsv;
6948         /*
6949          * If we couldn't reserve metadata bytes try and use some from
6950          * the global reserve if its space type is the same as the global
6951          * reservation.
6952          */
6953         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
6954             block_rsv->space_info == global_rsv->space_info) {
6955                 ret = block_rsv_use_bytes(global_rsv, blocksize);
6956                 if (!ret)
6957                         return global_rsv;
6958         }
6959         return ERR_PTR(ret);
6960 }
6961
6962 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6963                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6964 {
6965         block_rsv_add_bytes(block_rsv, blocksize, 0);
6966         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6967 }
6968
6969 /*
6970  * finds a free extent and does all the dirty work required for allocation
6971  * returns the key for the extent through ins, and a tree buffer for
6972  * the first block of the extent through buf.
6973  *
6974  * returns the tree buffer or NULL.
6975  */
6976 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6977                                         struct btrfs_root *root, u32 blocksize,
6978                                         u64 parent, u64 root_objectid,
6979                                         struct btrfs_disk_key *key, int level,
6980                                         u64 hint, u64 empty_size)
6981 {
6982         struct btrfs_key ins;
6983         struct btrfs_block_rsv *block_rsv;
6984         struct extent_buffer *buf;
6985         u64 flags = 0;
6986         int ret;
6987         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6988                                                  SKINNY_METADATA);
6989
6990         block_rsv = use_block_rsv(trans, root, blocksize);
6991         if (IS_ERR(block_rsv))
6992                 return ERR_CAST(block_rsv);
6993
6994         ret = btrfs_reserve_extent(root, blocksize, blocksize,
6995                                    empty_size, hint, &ins, 0);
6996         if (ret) {
6997                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6998                 return ERR_PTR(ret);
6999         }
7000
7001         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
7002                                     blocksize, level);
7003         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
7004
7005         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7006                 if (parent == 0)
7007                         parent = ins.objectid;
7008                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7009         } else
7010                 BUG_ON(parent > 0);
7011
7012         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7013                 struct btrfs_delayed_extent_op *extent_op;
7014                 extent_op = btrfs_alloc_delayed_extent_op();
7015                 BUG_ON(!extent_op); /* -ENOMEM */
7016                 if (key)
7017                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7018                 else
7019                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7020                 extent_op->flags_to_set = flags;
7021                 if (skinny_metadata)
7022                         extent_op->update_key = 0;
7023                 else
7024                         extent_op->update_key = 1;
7025                 extent_op->update_flags = 1;
7026                 extent_op->is_data = 0;
7027                 extent_op->level = level;
7028
7029                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7030                                         ins.objectid,
7031                                         ins.offset, parent, root_objectid,
7032                                         level, BTRFS_ADD_DELAYED_EXTENT,
7033                                         extent_op, 0);
7034                 BUG_ON(ret); /* -ENOMEM */
7035         }
7036         return buf;
7037 }
7038
7039 struct walk_control {
7040         u64 refs[BTRFS_MAX_LEVEL];
7041         u64 flags[BTRFS_MAX_LEVEL];
7042         struct btrfs_key update_progress;
7043         int stage;
7044         int level;
7045         int shared_level;
7046         int update_ref;
7047         int keep_locks;
7048         int reada_slot;
7049         int reada_count;
7050         int for_reloc;
7051 };
7052
7053 #define DROP_REFERENCE  1
7054 #define UPDATE_BACKREF  2
7055
7056 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7057                                      struct btrfs_root *root,
7058                                      struct walk_control *wc,
7059                                      struct btrfs_path *path)
7060 {
7061         u64 bytenr;
7062         u64 generation;
7063         u64 refs;
7064         u64 flags;
7065         u32 nritems;
7066         u32 blocksize;
7067         struct btrfs_key key;
7068         struct extent_buffer *eb;
7069         int ret;
7070         int slot;
7071         int nread = 0;
7072
7073         if (path->slots[wc->level] < wc->reada_slot) {
7074                 wc->reada_count = wc->reada_count * 2 / 3;
7075                 wc->reada_count = max(wc->reada_count, 2);
7076         } else {
7077                 wc->reada_count = wc->reada_count * 3 / 2;
7078                 wc->reada_count = min_t(int, wc->reada_count,
7079                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7080         }
7081
7082         eb = path->nodes[wc->level];
7083         nritems = btrfs_header_nritems(eb);
7084         blocksize = btrfs_level_size(root, wc->level - 1);
7085
7086         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7087                 if (nread >= wc->reada_count)
7088                         break;
7089
7090                 cond_resched();
7091                 bytenr = btrfs_node_blockptr(eb, slot);
7092                 generation = btrfs_node_ptr_generation(eb, slot);
7093
7094                 if (slot == path->slots[wc->level])
7095                         goto reada;
7096
7097                 if (wc->stage == UPDATE_BACKREF &&
7098                     generation <= root->root_key.offset)
7099                         continue;
7100
7101                 /* We don't lock the tree block, it's OK to be racy here */
7102                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7103                                                wc->level - 1, 1, &refs,
7104                                                &flags);
7105                 /* We don't care about errors in readahead. */
7106                 if (ret < 0)
7107                         continue;
7108                 BUG_ON(refs == 0);
7109
7110                 if (wc->stage == DROP_REFERENCE) {
7111                         if (refs == 1)
7112                                 goto reada;
7113
7114                         if (wc->level == 1 &&
7115                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7116                                 continue;
7117                         if (!wc->update_ref ||
7118                             generation <= root->root_key.offset)
7119                                 continue;
7120                         btrfs_node_key_to_cpu(eb, &key, slot);
7121                         ret = btrfs_comp_cpu_keys(&key,
7122                                                   &wc->update_progress);
7123                         if (ret < 0)
7124                                 continue;
7125                 } else {
7126                         if (wc->level == 1 &&
7127                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7128                                 continue;
7129                 }
7130 reada:
7131                 ret = readahead_tree_block(root, bytenr, blocksize,
7132                                            generation);
7133                 if (ret)
7134                         break;
7135                 nread++;
7136         }
7137         wc->reada_slot = slot;
7138 }
7139
7140 /*
7141  * helper to process tree block while walking down the tree.
7142  *
7143  * when wc->stage == UPDATE_BACKREF, this function updates
7144  * back refs for pointers in the block.
7145  *
7146  * NOTE: return value 1 means we should stop walking down.
7147  */
7148 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7149                                    struct btrfs_root *root,
7150                                    struct btrfs_path *path,
7151                                    struct walk_control *wc, int lookup_info)
7152 {
7153         int level = wc->level;
7154         struct extent_buffer *eb = path->nodes[level];
7155         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7156         int ret;
7157
7158         if (wc->stage == UPDATE_BACKREF &&
7159             btrfs_header_owner(eb) != root->root_key.objectid)
7160                 return 1;
7161
7162         /*
7163          * when reference count of tree block is 1, it won't increase
7164          * again. once full backref flag is set, we never clear it.
7165          */
7166         if (lookup_info &&
7167             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7168              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7169                 BUG_ON(!path->locks[level]);
7170                 ret = btrfs_lookup_extent_info(trans, root,
7171                                                eb->start, level, 1,
7172                                                &wc->refs[level],
7173                                                &wc->flags[level]);
7174                 BUG_ON(ret == -ENOMEM);
7175                 if (ret)
7176                         return ret;
7177                 BUG_ON(wc->refs[level] == 0);
7178         }
7179
7180         if (wc->stage == DROP_REFERENCE) {
7181                 if (wc->refs[level] > 1)
7182                         return 1;
7183
7184                 if (path->locks[level] && !wc->keep_locks) {
7185                         btrfs_tree_unlock_rw(eb, path->locks[level]);
7186                         path->locks[level] = 0;
7187                 }
7188                 return 0;
7189         }
7190
7191         /* wc->stage == UPDATE_BACKREF */
7192         if (!(wc->flags[level] & flag)) {
7193                 BUG_ON(!path->locks[level]);
7194                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
7195                 BUG_ON(ret); /* -ENOMEM */
7196                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
7197                 BUG_ON(ret); /* -ENOMEM */
7198                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7199                                                   eb->len, flag,
7200                                                   btrfs_header_level(eb), 0);
7201                 BUG_ON(ret); /* -ENOMEM */
7202                 wc->flags[level] |= flag;
7203         }
7204
7205         /*
7206          * the block is shared by multiple trees, so it's not good to
7207          * keep the tree lock
7208          */
7209         if (path->locks[level] && level > 0) {
7210                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7211                 path->locks[level] = 0;
7212         }
7213         return 0;
7214 }
7215
7216 /*
7217  * helper to process tree block pointer.
7218  *
7219  * when wc->stage == DROP_REFERENCE, this function checks
7220  * reference count of the block pointed to. if the block
7221  * is shared and we need update back refs for the subtree
7222  * rooted at the block, this function changes wc->stage to
7223  * UPDATE_BACKREF. if the block is shared and there is no
7224  * need to update back, this function drops the reference
7225  * to the block.
7226  *
7227  * NOTE: return value 1 means we should stop walking down.
7228  */
7229 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7230                                  struct btrfs_root *root,
7231                                  struct btrfs_path *path,
7232                                  struct walk_control *wc, int *lookup_info)
7233 {
7234         u64 bytenr;
7235         u64 generation;
7236         u64 parent;
7237         u32 blocksize;
7238         struct btrfs_key key;
7239         struct extent_buffer *next;
7240         int level = wc->level;
7241         int reada = 0;
7242         int ret = 0;
7243
7244         generation = btrfs_node_ptr_generation(path->nodes[level],
7245                                                path->slots[level]);
7246         /*
7247          * if the lower level block was created before the snapshot
7248          * was created, we know there is no need to update back refs
7249          * for the subtree
7250          */
7251         if (wc->stage == UPDATE_BACKREF &&
7252             generation <= root->root_key.offset) {
7253                 *lookup_info = 1;
7254                 return 1;
7255         }
7256
7257         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7258         blocksize = btrfs_level_size(root, level - 1);
7259
7260         next = btrfs_find_tree_block(root, bytenr, blocksize);
7261         if (!next) {
7262                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7263                 if (!next)
7264                         return -ENOMEM;
7265                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7266                                                level - 1);
7267                 reada = 1;
7268         }
7269         btrfs_tree_lock(next);
7270         btrfs_set_lock_blocking(next);
7271
7272         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7273                                        &wc->refs[level - 1],
7274                                        &wc->flags[level - 1]);
7275         if (ret < 0) {
7276                 btrfs_tree_unlock(next);
7277                 return ret;
7278         }
7279
7280         if (unlikely(wc->refs[level - 1] == 0)) {
7281                 btrfs_err(root->fs_info, "Missing references.");
7282                 BUG();
7283         }
7284         *lookup_info = 0;
7285
7286         if (wc->stage == DROP_REFERENCE) {
7287                 if (wc->refs[level - 1] > 1) {
7288                         if (level == 1 &&
7289                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7290                                 goto skip;
7291
7292                         if (!wc->update_ref ||
7293                             generation <= root->root_key.offset)
7294                                 goto skip;
7295
7296                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7297                                               path->slots[level]);
7298                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7299                         if (ret < 0)
7300                                 goto skip;
7301
7302                         wc->stage = UPDATE_BACKREF;
7303                         wc->shared_level = level - 1;
7304                 }
7305         } else {
7306                 if (level == 1 &&
7307                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7308                         goto skip;
7309         }
7310
7311         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7312                 btrfs_tree_unlock(next);
7313                 free_extent_buffer(next);
7314                 next = NULL;
7315                 *lookup_info = 1;
7316         }
7317
7318         if (!next) {
7319                 if (reada && level == 1)
7320                         reada_walk_down(trans, root, wc, path);
7321                 next = read_tree_block(root, bytenr, blocksize, generation);
7322                 if (!next || !extent_buffer_uptodate(next)) {
7323                         free_extent_buffer(next);
7324                         return -EIO;
7325                 }
7326                 btrfs_tree_lock(next);
7327                 btrfs_set_lock_blocking(next);
7328         }
7329
7330         level--;
7331         BUG_ON(level != btrfs_header_level(next));
7332         path->nodes[level] = next;
7333         path->slots[level] = 0;
7334         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7335         wc->level = level;
7336         if (wc->level == 1)
7337                 wc->reada_slot = 0;
7338         return 0;
7339 skip:
7340         wc->refs[level - 1] = 0;
7341         wc->flags[level - 1] = 0;
7342         if (wc->stage == DROP_REFERENCE) {
7343                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7344                         parent = path->nodes[level]->start;
7345                 } else {
7346                         BUG_ON(root->root_key.objectid !=
7347                                btrfs_header_owner(path->nodes[level]));
7348                         parent = 0;
7349                 }
7350
7351                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7352                                 root->root_key.objectid, level - 1, 0, 0);
7353                 BUG_ON(ret); /* -ENOMEM */
7354         }
7355         btrfs_tree_unlock(next);
7356         free_extent_buffer(next);
7357         *lookup_info = 1;
7358         return 1;
7359 }
7360
7361 /*
7362  * helper to process tree block while walking up the tree.
7363  *
7364  * when wc->stage == DROP_REFERENCE, this function drops
7365  * reference count on the block.
7366  *
7367  * when wc->stage == UPDATE_BACKREF, this function changes
7368  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7369  * to UPDATE_BACKREF previously while processing the block.
7370  *
7371  * NOTE: return value 1 means we should stop walking up.
7372  */
7373 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7374                                  struct btrfs_root *root,
7375                                  struct btrfs_path *path,
7376                                  struct walk_control *wc)
7377 {
7378         int ret;
7379         int level = wc->level;
7380         struct extent_buffer *eb = path->nodes[level];
7381         u64 parent = 0;
7382
7383         if (wc->stage == UPDATE_BACKREF) {
7384                 BUG_ON(wc->shared_level < level);
7385                 if (level < wc->shared_level)
7386                         goto out;
7387
7388                 ret = find_next_key(path, level + 1, &wc->update_progress);
7389                 if (ret > 0)
7390                         wc->update_ref = 0;
7391
7392                 wc->stage = DROP_REFERENCE;
7393                 wc->shared_level = -1;
7394                 path->slots[level] = 0;
7395
7396                 /*
7397                  * check reference count again if the block isn't locked.
7398                  * we should start walking down the tree again if reference
7399                  * count is one.
7400                  */
7401                 if (!path->locks[level]) {
7402                         BUG_ON(level == 0);
7403                         btrfs_tree_lock(eb);
7404                         btrfs_set_lock_blocking(eb);
7405                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7406
7407                         ret = btrfs_lookup_extent_info(trans, root,
7408                                                        eb->start, level, 1,
7409                                                        &wc->refs[level],
7410                                                        &wc->flags[level]);
7411                         if (ret < 0) {
7412                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7413                                 path->locks[level] = 0;
7414                                 return ret;
7415                         }
7416                         BUG_ON(wc->refs[level] == 0);
7417                         if (wc->refs[level] == 1) {
7418                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7419                                 path->locks[level] = 0;
7420                                 return 1;
7421                         }
7422                 }
7423         }
7424
7425         /* wc->stage == DROP_REFERENCE */
7426         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7427
7428         if (wc->refs[level] == 1) {
7429                 if (level == 0) {
7430                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7431                                 ret = btrfs_dec_ref(trans, root, eb, 1,
7432                                                     wc->for_reloc);
7433                         else
7434                                 ret = btrfs_dec_ref(trans, root, eb, 0,
7435                                                     wc->for_reloc);
7436                         BUG_ON(ret); /* -ENOMEM */
7437                 }
7438                 /* make block locked assertion in clean_tree_block happy */
7439                 if (!path->locks[level] &&
7440                     btrfs_header_generation(eb) == trans->transid) {
7441                         btrfs_tree_lock(eb);
7442                         btrfs_set_lock_blocking(eb);
7443                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7444                 }
7445                 clean_tree_block(trans, root, eb);
7446         }
7447
7448         if (eb == root->node) {
7449                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7450                         parent = eb->start;
7451                 else
7452                         BUG_ON(root->root_key.objectid !=
7453                                btrfs_header_owner(eb));
7454         } else {
7455                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7456                         parent = path->nodes[level + 1]->start;
7457                 else
7458                         BUG_ON(root->root_key.objectid !=
7459                                btrfs_header_owner(path->nodes[level + 1]));
7460         }
7461
7462         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7463 out:
7464         wc->refs[level] = 0;
7465         wc->flags[level] = 0;
7466         return 0;
7467 }
7468
7469 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7470                                    struct btrfs_root *root,
7471                                    struct btrfs_path *path,
7472                                    struct walk_control *wc)
7473 {
7474         int level = wc->level;
7475         int lookup_info = 1;
7476         int ret;
7477
7478         while (level >= 0) {
7479                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7480                 if (ret > 0)
7481                         break;
7482
7483                 if (level == 0)
7484                         break;
7485
7486                 if (path->slots[level] >=
7487                     btrfs_header_nritems(path->nodes[level]))
7488                         break;
7489
7490                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7491                 if (ret > 0) {
7492                         path->slots[level]++;
7493                         continue;
7494                 } else if (ret < 0)
7495                         return ret;
7496                 level = wc->level;
7497         }
7498         return 0;
7499 }
7500
7501 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7502                                  struct btrfs_root *root,
7503                                  struct btrfs_path *path,
7504                                  struct walk_control *wc, int max_level)
7505 {
7506         int level = wc->level;
7507         int ret;
7508
7509         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7510         while (level < max_level && path->nodes[level]) {
7511                 wc->level = level;
7512                 if (path->slots[level] + 1 <
7513                     btrfs_header_nritems(path->nodes[level])) {
7514                         path->slots[level]++;
7515                         return 0;
7516                 } else {
7517                         ret = walk_up_proc(trans, root, path, wc);
7518                         if (ret > 0)
7519                                 return 0;
7520
7521                         if (path->locks[level]) {
7522                                 btrfs_tree_unlock_rw(path->nodes[level],
7523                                                      path->locks[level]);
7524                                 path->locks[level] = 0;
7525                         }
7526                         free_extent_buffer(path->nodes[level]);
7527                         path->nodes[level] = NULL;
7528                         level++;
7529                 }
7530         }
7531         return 1;
7532 }
7533
7534 /*
7535  * drop a subvolume tree.
7536  *
7537  * this function traverses the tree freeing any blocks that only
7538  * referenced by the tree.
7539  *
7540  * when a shared tree block is found. this function decreases its
7541  * reference count by one. if update_ref is true, this function
7542  * also make sure backrefs for the shared block and all lower level
7543  * blocks are properly updated.
7544  *
7545  * If called with for_reloc == 0, may exit early with -EAGAIN
7546  */
7547 int btrfs_drop_snapshot(struct btrfs_root *root,
7548                          struct btrfs_block_rsv *block_rsv, int update_ref,
7549                          int for_reloc)
7550 {
7551         struct btrfs_path *path;
7552         struct btrfs_trans_handle *trans;
7553         struct btrfs_root *tree_root = root->fs_info->tree_root;
7554         struct btrfs_root_item *root_item = &root->root_item;
7555         struct walk_control *wc;
7556         struct btrfs_key key;
7557         int err = 0;
7558         int ret;
7559         int level;
7560         bool root_dropped = false;
7561
7562         path = btrfs_alloc_path();
7563         if (!path) {
7564                 err = -ENOMEM;
7565                 goto out;
7566         }
7567
7568         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7569         if (!wc) {
7570                 btrfs_free_path(path);
7571                 err = -ENOMEM;
7572                 goto out;
7573         }
7574
7575         trans = btrfs_start_transaction(tree_root, 0);
7576         if (IS_ERR(trans)) {
7577                 err = PTR_ERR(trans);
7578                 goto out_free;
7579         }
7580
7581         if (block_rsv)
7582                 trans->block_rsv = block_rsv;
7583
7584         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7585                 level = btrfs_header_level(root->node);
7586                 path->nodes[level] = btrfs_lock_root_node(root);
7587                 btrfs_set_lock_blocking(path->nodes[level]);
7588                 path->slots[level] = 0;
7589                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7590                 memset(&wc->update_progress, 0,
7591                        sizeof(wc->update_progress));
7592         } else {
7593                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7594                 memcpy(&wc->update_progress, &key,
7595                        sizeof(wc->update_progress));
7596
7597                 level = root_item->drop_level;
7598                 BUG_ON(level == 0);
7599                 path->lowest_level = level;
7600                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7601                 path->lowest_level = 0;
7602                 if (ret < 0) {
7603                         err = ret;
7604                         goto out_end_trans;
7605                 }
7606                 WARN_ON(ret > 0);
7607
7608                 /*
7609                  * unlock our path, this is safe because only this
7610                  * function is allowed to delete this snapshot
7611                  */
7612                 btrfs_unlock_up_safe(path, 0);
7613
7614                 level = btrfs_header_level(root->node);
7615                 while (1) {
7616                         btrfs_tree_lock(path->nodes[level]);
7617                         btrfs_set_lock_blocking(path->nodes[level]);
7618                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7619
7620                         ret = btrfs_lookup_extent_info(trans, root,
7621                                                 path->nodes[level]->start,
7622                                                 level, 1, &wc->refs[level],
7623                                                 &wc->flags[level]);
7624                         if (ret < 0) {
7625                                 err = ret;
7626                                 goto out_end_trans;
7627                         }
7628                         BUG_ON(wc->refs[level] == 0);
7629
7630                         if (level == root_item->drop_level)
7631                                 break;
7632
7633                         btrfs_tree_unlock(path->nodes[level]);
7634                         path->locks[level] = 0;
7635                         WARN_ON(wc->refs[level] != 1);
7636                         level--;
7637                 }
7638         }
7639
7640         wc->level = level;
7641         wc->shared_level = -1;
7642         wc->stage = DROP_REFERENCE;
7643         wc->update_ref = update_ref;
7644         wc->keep_locks = 0;
7645         wc->for_reloc = for_reloc;
7646         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7647
7648         while (1) {
7649
7650                 ret = walk_down_tree(trans, root, path, wc);
7651                 if (ret < 0) {
7652                         err = ret;
7653                         break;
7654                 }
7655
7656                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7657                 if (ret < 0) {
7658                         err = ret;
7659                         break;
7660                 }
7661
7662                 if (ret > 0) {
7663                         BUG_ON(wc->stage != DROP_REFERENCE);
7664                         break;
7665                 }
7666
7667                 if (wc->stage == DROP_REFERENCE) {
7668                         level = wc->level;
7669                         btrfs_node_key(path->nodes[level],
7670                                        &root_item->drop_progress,
7671                                        path->slots[level]);
7672                         root_item->drop_level = level;
7673                 }
7674
7675                 BUG_ON(wc->level == 0);
7676                 if (btrfs_should_end_transaction(trans, tree_root) ||
7677                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
7678                         ret = btrfs_update_root(trans, tree_root,
7679                                                 &root->root_key,
7680                                                 root_item);
7681                         if (ret) {
7682                                 btrfs_abort_transaction(trans, tree_root, ret);
7683                                 err = ret;
7684                                 goto out_end_trans;
7685                         }
7686
7687                         btrfs_end_transaction_throttle(trans, tree_root);
7688                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
7689                                 pr_debug("BTRFS: drop snapshot early exit\n");
7690                                 err = -EAGAIN;
7691                                 goto out_free;
7692                         }
7693
7694                         trans = btrfs_start_transaction(tree_root, 0);
7695                         if (IS_ERR(trans)) {
7696                                 err = PTR_ERR(trans);
7697                                 goto out_free;
7698                         }
7699                         if (block_rsv)
7700                                 trans->block_rsv = block_rsv;
7701                 }
7702         }
7703         btrfs_release_path(path);
7704         if (err)
7705                 goto out_end_trans;
7706
7707         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7708         if (ret) {
7709                 btrfs_abort_transaction(trans, tree_root, ret);
7710                 goto out_end_trans;
7711         }
7712
7713         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7714                 ret = btrfs_find_root(tree_root, &root->root_key, path,
7715                                       NULL, NULL);
7716                 if (ret < 0) {
7717                         btrfs_abort_transaction(trans, tree_root, ret);
7718                         err = ret;
7719                         goto out_end_trans;
7720                 } else if (ret > 0) {
7721                         /* if we fail to delete the orphan item this time
7722                          * around, it'll get picked up the next time.
7723                          *
7724                          * The most common failure here is just -ENOENT.
7725                          */
7726                         btrfs_del_orphan_item(trans, tree_root,
7727                                               root->root_key.objectid);
7728                 }
7729         }
7730
7731         if (root->in_radix) {
7732                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
7733         } else {
7734                 free_extent_buffer(root->node);
7735                 free_extent_buffer(root->commit_root);
7736                 btrfs_put_fs_root(root);
7737         }
7738         root_dropped = true;
7739 out_end_trans:
7740         btrfs_end_transaction_throttle(trans, tree_root);
7741 out_free:
7742         kfree(wc);
7743         btrfs_free_path(path);
7744 out:
7745         /*
7746          * So if we need to stop dropping the snapshot for whatever reason we
7747          * need to make sure to add it back to the dead root list so that we
7748          * keep trying to do the work later.  This also cleans up roots if we
7749          * don't have it in the radix (like when we recover after a power fail
7750          * or unmount) so we don't leak memory.
7751          */
7752         if (!for_reloc && root_dropped == false)
7753                 btrfs_add_dead_root(root);
7754         if (err && err != -EAGAIN)
7755                 btrfs_std_error(root->fs_info, err);
7756         return err;
7757 }
7758
7759 /*
7760  * drop subtree rooted at tree block 'node'.
7761  *
7762  * NOTE: this function will unlock and release tree block 'node'
7763  * only used by relocation code
7764  */
7765 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7766                         struct btrfs_root *root,
7767                         struct extent_buffer *node,
7768                         struct extent_buffer *parent)
7769 {
7770         struct btrfs_path *path;
7771         struct walk_control *wc;
7772         int level;
7773         int parent_level;
7774         int ret = 0;
7775         int wret;
7776
7777         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7778
7779         path = btrfs_alloc_path();
7780         if (!path)
7781                 return -ENOMEM;
7782
7783         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7784         if (!wc) {
7785                 btrfs_free_path(path);
7786                 return -ENOMEM;
7787         }
7788
7789         btrfs_assert_tree_locked(parent);
7790         parent_level = btrfs_header_level(parent);
7791         extent_buffer_get(parent);
7792         path->nodes[parent_level] = parent;
7793         path->slots[parent_level] = btrfs_header_nritems(parent);
7794
7795         btrfs_assert_tree_locked(node);
7796         level = btrfs_header_level(node);
7797         path->nodes[level] = node;
7798         path->slots[level] = 0;
7799         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7800
7801         wc->refs[parent_level] = 1;
7802         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7803         wc->level = level;
7804         wc->shared_level = -1;
7805         wc->stage = DROP_REFERENCE;
7806         wc->update_ref = 0;
7807         wc->keep_locks = 1;
7808         wc->for_reloc = 1;
7809         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7810
7811         while (1) {
7812                 wret = walk_down_tree(trans, root, path, wc);
7813                 if (wret < 0) {
7814                         ret = wret;
7815                         break;
7816                 }
7817
7818                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7819                 if (wret < 0)
7820                         ret = wret;
7821                 if (wret != 0)
7822                         break;
7823         }
7824
7825         kfree(wc);
7826         btrfs_free_path(path);
7827         return ret;
7828 }
7829
7830 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7831 {
7832         u64 num_devices;
7833         u64 stripped;
7834
7835         /*
7836          * if restripe for this chunk_type is on pick target profile and
7837          * return, otherwise do the usual balance
7838          */
7839         stripped = get_restripe_target(root->fs_info, flags);
7840         if (stripped)
7841                 return extended_to_chunk(stripped);
7842
7843         /*
7844          * we add in the count of missing devices because we want
7845          * to make sure that any RAID levels on a degraded FS
7846          * continue to be honored.
7847          */
7848         num_devices = root->fs_info->fs_devices->rw_devices +
7849                 root->fs_info->fs_devices->missing_devices;
7850
7851         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7852                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7853                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7854
7855         if (num_devices == 1) {
7856                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7857                 stripped = flags & ~stripped;
7858
7859                 /* turn raid0 into single device chunks */
7860                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7861                         return stripped;
7862
7863                 /* turn mirroring into duplication */
7864                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7865                              BTRFS_BLOCK_GROUP_RAID10))
7866                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7867         } else {
7868                 /* they already had raid on here, just return */
7869                 if (flags & stripped)
7870                         return flags;
7871
7872                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7873                 stripped = flags & ~stripped;
7874
7875                 /* switch duplicated blocks with raid1 */
7876                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7877                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7878
7879                 /* this is drive concat, leave it alone */
7880         }
7881
7882         return flags;
7883 }
7884
7885 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7886 {
7887         struct btrfs_space_info *sinfo = cache->space_info;
7888         u64 num_bytes;
7889         u64 min_allocable_bytes;
7890         int ret = -ENOSPC;
7891
7892
7893         /*
7894          * We need some metadata space and system metadata space for
7895          * allocating chunks in some corner cases until we force to set
7896          * it to be readonly.
7897          */
7898         if ((sinfo->flags &
7899              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7900             !force)
7901                 min_allocable_bytes = 1 * 1024 * 1024;
7902         else
7903                 min_allocable_bytes = 0;
7904
7905         spin_lock(&sinfo->lock);
7906         spin_lock(&cache->lock);
7907
7908         if (cache->ro) {
7909                 ret = 0;
7910                 goto out;
7911         }
7912
7913         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7914                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7915
7916         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7917             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7918             min_allocable_bytes <= sinfo->total_bytes) {
7919                 sinfo->bytes_readonly += num_bytes;
7920                 cache->ro = 1;
7921                 ret = 0;
7922         }
7923 out:
7924         spin_unlock(&cache->lock);
7925         spin_unlock(&sinfo->lock);
7926         return ret;
7927 }
7928
7929 int btrfs_set_block_group_ro(struct btrfs_root *root,
7930                              struct btrfs_block_group_cache *cache)
7931
7932 {
7933         struct btrfs_trans_handle *trans;
7934         u64 alloc_flags;
7935         int ret;
7936
7937         BUG_ON(cache->ro);
7938
7939         trans = btrfs_join_transaction(root);
7940         if (IS_ERR(trans))
7941                 return PTR_ERR(trans);
7942
7943         alloc_flags = update_block_group_flags(root, cache->flags);
7944         if (alloc_flags != cache->flags) {
7945                 ret = do_chunk_alloc(trans, root, alloc_flags,
7946                                      CHUNK_ALLOC_FORCE);
7947                 if (ret < 0)
7948                         goto out;
7949         }
7950
7951         ret = set_block_group_ro(cache, 0);
7952         if (!ret)
7953                 goto out;
7954         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7955         ret = do_chunk_alloc(trans, root, alloc_flags,
7956                              CHUNK_ALLOC_FORCE);
7957         if (ret < 0)
7958                 goto out;
7959         ret = set_block_group_ro(cache, 0);
7960 out:
7961         btrfs_end_transaction(trans, root);
7962         return ret;
7963 }
7964
7965 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7966                             struct btrfs_root *root, u64 type)
7967 {
7968         u64 alloc_flags = get_alloc_profile(root, type);
7969         return do_chunk_alloc(trans, root, alloc_flags,
7970                               CHUNK_ALLOC_FORCE);
7971 }
7972
7973 /*
7974  * helper to account the unused space of all the readonly block group in the
7975  * list. takes mirrors into account.
7976  */
7977 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7978 {
7979         struct btrfs_block_group_cache *block_group;
7980         u64 free_bytes = 0;
7981         int factor;
7982
7983         list_for_each_entry(block_group, groups_list, list) {
7984                 spin_lock(&block_group->lock);
7985
7986                 if (!block_group->ro) {
7987                         spin_unlock(&block_group->lock);
7988                         continue;
7989                 }
7990
7991                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7992                                           BTRFS_BLOCK_GROUP_RAID10 |
7993                                           BTRFS_BLOCK_GROUP_DUP))
7994                         factor = 2;
7995                 else
7996                         factor = 1;
7997
7998                 free_bytes += (block_group->key.offset -
7999                                btrfs_block_group_used(&block_group->item)) *
8000                                factor;
8001
8002                 spin_unlock(&block_group->lock);
8003         }
8004
8005         return free_bytes;
8006 }
8007
8008 /*
8009  * helper to account the unused space of all the readonly block group in the
8010  * space_info. takes mirrors into account.
8011  */
8012 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8013 {
8014         int i;
8015         u64 free_bytes = 0;
8016
8017         spin_lock(&sinfo->lock);
8018
8019         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8020                 if (!list_empty(&sinfo->block_groups[i]))
8021                         free_bytes += __btrfs_get_ro_block_group_free_space(
8022                                                 &sinfo->block_groups[i]);
8023
8024         spin_unlock(&sinfo->lock);
8025
8026         return free_bytes;
8027 }
8028
8029 void btrfs_set_block_group_rw(struct btrfs_root *root,
8030                               struct btrfs_block_group_cache *cache)
8031 {
8032         struct btrfs_space_info *sinfo = cache->space_info;
8033         u64 num_bytes;
8034
8035         BUG_ON(!cache->ro);
8036
8037         spin_lock(&sinfo->lock);
8038         spin_lock(&cache->lock);
8039         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8040                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8041         sinfo->bytes_readonly -= num_bytes;
8042         cache->ro = 0;
8043         spin_unlock(&cache->lock);
8044         spin_unlock(&sinfo->lock);
8045 }
8046
8047 /*
8048  * checks to see if its even possible to relocate this block group.
8049  *
8050  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8051  * ok to go ahead and try.
8052  */
8053 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8054 {
8055         struct btrfs_block_group_cache *block_group;
8056         struct btrfs_space_info *space_info;
8057         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8058         struct btrfs_device *device;
8059         struct btrfs_trans_handle *trans;
8060         u64 min_free;
8061         u64 dev_min = 1;
8062         u64 dev_nr = 0;
8063         u64 target;
8064         int index;
8065         int full = 0;
8066         int ret = 0;
8067
8068         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8069
8070         /* odd, couldn't find the block group, leave it alone */
8071         if (!block_group)
8072                 return -1;
8073
8074         min_free = btrfs_block_group_used(&block_group->item);
8075
8076         /* no bytes used, we're good */
8077         if (!min_free)
8078                 goto out;
8079
8080         space_info = block_group->space_info;
8081         spin_lock(&space_info->lock);
8082
8083         full = space_info->full;
8084
8085         /*
8086          * if this is the last block group we have in this space, we can't
8087          * relocate it unless we're able to allocate a new chunk below.
8088          *
8089          * Otherwise, we need to make sure we have room in the space to handle
8090          * all of the extents from this block group.  If we can, we're good
8091          */
8092         if ((space_info->total_bytes != block_group->key.offset) &&
8093             (space_info->bytes_used + space_info->bytes_reserved +
8094              space_info->bytes_pinned + space_info->bytes_readonly +
8095              min_free < space_info->total_bytes)) {
8096                 spin_unlock(&space_info->lock);
8097                 goto out;
8098         }
8099         spin_unlock(&space_info->lock);
8100
8101         /*
8102          * ok we don't have enough space, but maybe we have free space on our
8103          * devices to allocate new chunks for relocation, so loop through our
8104          * alloc devices and guess if we have enough space.  if this block
8105          * group is going to be restriped, run checks against the target
8106          * profile instead of the current one.
8107          */
8108         ret = -1;
8109
8110         /*
8111          * index:
8112          *      0: raid10
8113          *      1: raid1
8114          *      2: dup
8115          *      3: raid0
8116          *      4: single
8117          */
8118         target = get_restripe_target(root->fs_info, block_group->flags);
8119         if (target) {
8120                 index = __get_raid_index(extended_to_chunk(target));
8121         } else {
8122                 /*
8123                  * this is just a balance, so if we were marked as full
8124                  * we know there is no space for a new chunk
8125                  */
8126                 if (full)
8127                         goto out;
8128
8129                 index = get_block_group_index(block_group);
8130         }
8131
8132         if (index == BTRFS_RAID_RAID10) {
8133                 dev_min = 4;
8134                 /* Divide by 2 */
8135                 min_free >>= 1;
8136         } else if (index == BTRFS_RAID_RAID1) {
8137                 dev_min = 2;
8138         } else if (index == BTRFS_RAID_DUP) {
8139                 /* Multiply by 2 */
8140                 min_free <<= 1;
8141         } else if (index == BTRFS_RAID_RAID0) {
8142                 dev_min = fs_devices->rw_devices;
8143                 do_div(min_free, dev_min);
8144         }
8145
8146         /* We need to do this so that we can look at pending chunks */
8147         trans = btrfs_join_transaction(root);
8148         if (IS_ERR(trans)) {
8149                 ret = PTR_ERR(trans);
8150                 goto out;
8151         }
8152
8153         mutex_lock(&root->fs_info->chunk_mutex);
8154         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8155                 u64 dev_offset;
8156
8157                 /*
8158                  * check to make sure we can actually find a chunk with enough
8159                  * space to fit our block group in.
8160                  */
8161                 if (device->total_bytes > device->bytes_used + min_free &&
8162                     !device->is_tgtdev_for_dev_replace) {
8163                         ret = find_free_dev_extent(trans, device, min_free,
8164                                                    &dev_offset, NULL);
8165                         if (!ret)
8166                                 dev_nr++;
8167
8168                         if (dev_nr >= dev_min)
8169                                 break;
8170
8171                         ret = -1;
8172                 }
8173         }
8174         mutex_unlock(&root->fs_info->chunk_mutex);
8175         btrfs_end_transaction(trans, root);
8176 out:
8177         btrfs_put_block_group(block_group);
8178         return ret;
8179 }
8180
8181 static int find_first_block_group(struct btrfs_root *root,
8182                 struct btrfs_path *path, struct btrfs_key *key)
8183 {
8184         int ret = 0;
8185         struct btrfs_key found_key;
8186         struct extent_buffer *leaf;
8187         int slot;
8188
8189         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8190         if (ret < 0)
8191                 goto out;
8192
8193         while (1) {
8194                 slot = path->slots[0];
8195                 leaf = path->nodes[0];
8196                 if (slot >= btrfs_header_nritems(leaf)) {
8197                         ret = btrfs_next_leaf(root, path);
8198                         if (ret == 0)
8199                                 continue;
8200                         if (ret < 0)
8201                                 goto out;
8202                         break;
8203                 }
8204                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8205
8206                 if (found_key.objectid >= key->objectid &&
8207                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8208                         ret = 0;
8209                         goto out;
8210                 }
8211                 path->slots[0]++;
8212         }
8213 out:
8214         return ret;
8215 }
8216
8217 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8218 {
8219         struct btrfs_block_group_cache *block_group;
8220         u64 last = 0;
8221
8222         while (1) {
8223                 struct inode *inode;
8224
8225                 block_group = btrfs_lookup_first_block_group(info, last);
8226                 while (block_group) {
8227                         spin_lock(&block_group->lock);
8228                         if (block_group->iref)
8229                                 break;
8230                         spin_unlock(&block_group->lock);
8231                         block_group = next_block_group(info->tree_root,
8232                                                        block_group);
8233                 }
8234                 if (!block_group) {
8235                         if (last == 0)
8236                                 break;
8237                         last = 0;
8238                         continue;
8239                 }
8240
8241                 inode = block_group->inode;
8242                 block_group->iref = 0;
8243                 block_group->inode = NULL;
8244                 spin_unlock(&block_group->lock);
8245                 iput(inode);
8246                 last = block_group->key.objectid + block_group->key.offset;
8247                 btrfs_put_block_group(block_group);
8248         }
8249 }
8250
8251 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8252 {
8253         struct btrfs_block_group_cache *block_group;
8254         struct btrfs_space_info *space_info;
8255         struct btrfs_caching_control *caching_ctl;
8256         struct rb_node *n;
8257
8258         down_write(&info->extent_commit_sem);
8259         while (!list_empty(&info->caching_block_groups)) {
8260                 caching_ctl = list_entry(info->caching_block_groups.next,
8261                                          struct btrfs_caching_control, list);
8262                 list_del(&caching_ctl->list);
8263                 put_caching_control(caching_ctl);
8264         }
8265         up_write(&info->extent_commit_sem);
8266
8267         spin_lock(&info->block_group_cache_lock);
8268         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8269                 block_group = rb_entry(n, struct btrfs_block_group_cache,
8270                                        cache_node);
8271                 rb_erase(&block_group->cache_node,
8272                          &info->block_group_cache_tree);
8273                 spin_unlock(&info->block_group_cache_lock);
8274
8275                 down_write(&block_group->space_info->groups_sem);
8276                 list_del(&block_group->list);
8277                 up_write(&block_group->space_info->groups_sem);
8278
8279                 if (block_group->cached == BTRFS_CACHE_STARTED)
8280                         wait_block_group_cache_done(block_group);
8281
8282                 /*
8283                  * We haven't cached this block group, which means we could
8284                  * possibly have excluded extents on this block group.
8285                  */
8286                 if (block_group->cached == BTRFS_CACHE_NO ||
8287                     block_group->cached == BTRFS_CACHE_ERROR)
8288                         free_excluded_extents(info->extent_root, block_group);
8289
8290                 btrfs_remove_free_space_cache(block_group);
8291                 btrfs_put_block_group(block_group);
8292
8293                 spin_lock(&info->block_group_cache_lock);
8294         }
8295         spin_unlock(&info->block_group_cache_lock);
8296
8297         /* now that all the block groups are freed, go through and
8298          * free all the space_info structs.  This is only called during
8299          * the final stages of unmount, and so we know nobody is
8300          * using them.  We call synchronize_rcu() once before we start,
8301          * just to be on the safe side.
8302          */
8303         synchronize_rcu();
8304
8305         release_global_block_rsv(info);
8306
8307         while (!list_empty(&info->space_info)) {
8308                 int i;
8309
8310                 space_info = list_entry(info->space_info.next,
8311                                         struct btrfs_space_info,
8312                                         list);
8313                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8314                         if (WARN_ON(space_info->bytes_pinned > 0 ||
8315                             space_info->bytes_reserved > 0 ||
8316                             space_info->bytes_may_use > 0)) {
8317                                 dump_space_info(space_info, 0, 0);
8318                         }
8319                 }
8320                 list_del(&space_info->list);
8321                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
8322                         struct kobject *kobj;
8323                         kobj = &space_info->block_group_kobjs[i];
8324                         if (kobj->parent) {
8325                                 kobject_del(kobj);
8326                                 kobject_put(kobj);
8327                         }
8328                 }
8329                 kobject_del(&space_info->kobj);
8330                 kobject_put(&space_info->kobj);
8331         }
8332         return 0;
8333 }
8334
8335 static void __link_block_group(struct btrfs_space_info *space_info,
8336                                struct btrfs_block_group_cache *cache)
8337 {
8338         int index = get_block_group_index(cache);
8339
8340         down_write(&space_info->groups_sem);
8341         if (list_empty(&space_info->block_groups[index])) {
8342                 struct kobject *kobj = &space_info->block_group_kobjs[index];
8343                 int ret;
8344
8345                 kobject_get(&space_info->kobj); /* put in release */
8346                 ret = kobject_add(kobj, &space_info->kobj, "%s",
8347                                   get_raid_name(index));
8348                 if (ret) {
8349                         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
8350                         kobject_put(&space_info->kobj);
8351                 }
8352         }
8353         list_add_tail(&cache->list, &space_info->block_groups[index]);
8354         up_write(&space_info->groups_sem);
8355 }
8356
8357 static struct btrfs_block_group_cache *
8358 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
8359 {
8360         struct btrfs_block_group_cache *cache;
8361
8362         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8363         if (!cache)
8364                 return NULL;
8365
8366         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8367                                         GFP_NOFS);
8368         if (!cache->free_space_ctl) {
8369                 kfree(cache);
8370                 return NULL;
8371         }
8372
8373         cache->key.objectid = start;
8374         cache->key.offset = size;
8375         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8376
8377         cache->sectorsize = root->sectorsize;
8378         cache->fs_info = root->fs_info;
8379         cache->full_stripe_len = btrfs_full_stripe_len(root,
8380                                                &root->fs_info->mapping_tree,
8381                                                start);
8382         atomic_set(&cache->count, 1);
8383         spin_lock_init(&cache->lock);
8384         INIT_LIST_HEAD(&cache->list);
8385         INIT_LIST_HEAD(&cache->cluster_list);
8386         INIT_LIST_HEAD(&cache->new_bg_list);
8387         btrfs_init_free_space_ctl(cache);
8388
8389         return cache;
8390 }
8391
8392 int btrfs_read_block_groups(struct btrfs_root *root)
8393 {
8394         struct btrfs_path *path;
8395         int ret;
8396         struct btrfs_block_group_cache *cache;
8397         struct btrfs_fs_info *info = root->fs_info;
8398         struct btrfs_space_info *space_info;
8399         struct btrfs_key key;
8400         struct btrfs_key found_key;
8401         struct extent_buffer *leaf;
8402         int need_clear = 0;
8403         u64 cache_gen;
8404
8405         root = info->extent_root;
8406         key.objectid = 0;
8407         key.offset = 0;
8408         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8409         path = btrfs_alloc_path();
8410         if (!path)
8411                 return -ENOMEM;
8412         path->reada = 1;
8413
8414         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8415         if (btrfs_test_opt(root, SPACE_CACHE) &&
8416             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8417                 need_clear = 1;
8418         if (btrfs_test_opt(root, CLEAR_CACHE))
8419                 need_clear = 1;
8420
8421         while (1) {
8422                 ret = find_first_block_group(root, path, &key);
8423                 if (ret > 0)
8424                         break;
8425                 if (ret != 0)
8426                         goto error;
8427
8428                 leaf = path->nodes[0];
8429                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8430
8431                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
8432                                                        found_key.offset);
8433                 if (!cache) {
8434                         ret = -ENOMEM;
8435                         goto error;
8436                 }
8437
8438                 if (need_clear) {
8439                         /*
8440                          * When we mount with old space cache, we need to
8441                          * set BTRFS_DC_CLEAR and set dirty flag.
8442                          *
8443                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
8444                          *    truncate the old free space cache inode and
8445                          *    setup a new one.
8446                          * b) Setting 'dirty flag' makes sure that we flush
8447                          *    the new space cache info onto disk.
8448                          */
8449                         cache->disk_cache_state = BTRFS_DC_CLEAR;
8450                         if (btrfs_test_opt(root, SPACE_CACHE))
8451                                 cache->dirty = 1;
8452                 }
8453
8454                 read_extent_buffer(leaf, &cache->item,
8455                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
8456                                    sizeof(cache->item));
8457                 cache->flags = btrfs_block_group_flags(&cache->item);
8458
8459                 key.objectid = found_key.objectid + found_key.offset;
8460                 btrfs_release_path(path);
8461
8462                 /*
8463                  * We need to exclude the super stripes now so that the space
8464                  * info has super bytes accounted for, otherwise we'll think
8465                  * we have more space than we actually do.
8466                  */
8467                 ret = exclude_super_stripes(root, cache);
8468                 if (ret) {
8469                         /*
8470                          * We may have excluded something, so call this just in
8471                          * case.
8472                          */
8473                         free_excluded_extents(root, cache);
8474                         btrfs_put_block_group(cache);
8475                         goto error;
8476                 }
8477
8478                 /*
8479                  * check for two cases, either we are full, and therefore
8480                  * don't need to bother with the caching work since we won't
8481                  * find any space, or we are empty, and we can just add all
8482                  * the space in and be done with it.  This saves us _alot_ of
8483                  * time, particularly in the full case.
8484                  */
8485                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8486                         cache->last_byte_to_unpin = (u64)-1;
8487                         cache->cached = BTRFS_CACHE_FINISHED;
8488                         free_excluded_extents(root, cache);
8489                 } else if (btrfs_block_group_used(&cache->item) == 0) {
8490                         cache->last_byte_to_unpin = (u64)-1;
8491                         cache->cached = BTRFS_CACHE_FINISHED;
8492                         add_new_free_space(cache, root->fs_info,
8493                                            found_key.objectid,
8494                                            found_key.objectid +
8495                                            found_key.offset);
8496                         free_excluded_extents(root, cache);
8497                 }
8498
8499                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8500                 if (ret) {
8501                         btrfs_remove_free_space_cache(cache);
8502                         btrfs_put_block_group(cache);
8503                         goto error;
8504                 }
8505
8506                 ret = update_space_info(info, cache->flags, found_key.offset,
8507                                         btrfs_block_group_used(&cache->item),
8508                                         &space_info);
8509                 if (ret) {
8510                         btrfs_remove_free_space_cache(cache);
8511                         spin_lock(&info->block_group_cache_lock);
8512                         rb_erase(&cache->cache_node,
8513                                  &info->block_group_cache_tree);
8514                         spin_unlock(&info->block_group_cache_lock);
8515                         btrfs_put_block_group(cache);
8516                         goto error;
8517                 }
8518
8519                 cache->space_info = space_info;
8520                 spin_lock(&cache->space_info->lock);
8521                 cache->space_info->bytes_readonly += cache->bytes_super;
8522                 spin_unlock(&cache->space_info->lock);
8523
8524                 __link_block_group(space_info, cache);
8525
8526                 set_avail_alloc_bits(root->fs_info, cache->flags);
8527                 if (btrfs_chunk_readonly(root, cache->key.objectid))
8528                         set_block_group_ro(cache, 1);
8529         }
8530
8531         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8532                 if (!(get_alloc_profile(root, space_info->flags) &
8533                       (BTRFS_BLOCK_GROUP_RAID10 |
8534                        BTRFS_BLOCK_GROUP_RAID1 |
8535                        BTRFS_BLOCK_GROUP_RAID5 |
8536                        BTRFS_BLOCK_GROUP_RAID6 |
8537                        BTRFS_BLOCK_GROUP_DUP)))
8538                         continue;
8539                 /*
8540                  * avoid allocating from un-mirrored block group if there are
8541                  * mirrored block groups.
8542                  */
8543                 list_for_each_entry(cache,
8544                                 &space_info->block_groups[BTRFS_RAID_RAID0],
8545                                 list)
8546                         set_block_group_ro(cache, 1);
8547                 list_for_each_entry(cache,
8548                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
8549                                 list)
8550                         set_block_group_ro(cache, 1);
8551         }
8552
8553         init_global_block_rsv(info);
8554         ret = 0;
8555 error:
8556         btrfs_free_path(path);
8557         return ret;
8558 }
8559
8560 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8561                                        struct btrfs_root *root)
8562 {
8563         struct btrfs_block_group_cache *block_group, *tmp;
8564         struct btrfs_root *extent_root = root->fs_info->extent_root;
8565         struct btrfs_block_group_item item;
8566         struct btrfs_key key;
8567         int ret = 0;
8568
8569         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8570                                  new_bg_list) {
8571                 list_del_init(&block_group->new_bg_list);
8572
8573                 if (ret)
8574                         continue;
8575
8576                 spin_lock(&block_group->lock);
8577                 memcpy(&item, &block_group->item, sizeof(item));
8578                 memcpy(&key, &block_group->key, sizeof(key));
8579                 spin_unlock(&block_group->lock);
8580
8581                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8582                                         sizeof(item));
8583                 if (ret)
8584                         btrfs_abort_transaction(trans, extent_root, ret);
8585                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
8586                                                key.objectid, key.offset);
8587                 if (ret)
8588                         btrfs_abort_transaction(trans, extent_root, ret);
8589         }
8590 }
8591
8592 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8593                            struct btrfs_root *root, u64 bytes_used,
8594                            u64 type, u64 chunk_objectid, u64 chunk_offset,
8595                            u64 size)
8596 {
8597         int ret;
8598         struct btrfs_root *extent_root;
8599         struct btrfs_block_group_cache *cache;
8600
8601         extent_root = root->fs_info->extent_root;
8602
8603         root->fs_info->last_trans_log_full_commit = trans->transid;
8604
8605         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
8606         if (!cache)
8607                 return -ENOMEM;
8608
8609         btrfs_set_block_group_used(&cache->item, bytes_used);
8610         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8611         btrfs_set_block_group_flags(&cache->item, type);
8612
8613         cache->flags = type;
8614         cache->last_byte_to_unpin = (u64)-1;
8615         cache->cached = BTRFS_CACHE_FINISHED;
8616         ret = exclude_super_stripes(root, cache);
8617         if (ret) {
8618                 /*
8619                  * We may have excluded something, so call this just in
8620                  * case.
8621                  */
8622                 free_excluded_extents(root, cache);
8623                 btrfs_put_block_group(cache);
8624                 return ret;
8625         }
8626
8627         add_new_free_space(cache, root->fs_info, chunk_offset,
8628                            chunk_offset + size);
8629
8630         free_excluded_extents(root, cache);
8631
8632         ret = btrfs_add_block_group_cache(root->fs_info, cache);
8633         if (ret) {
8634                 btrfs_remove_free_space_cache(cache);
8635                 btrfs_put_block_group(cache);
8636                 return ret;
8637         }
8638
8639         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8640                                 &cache->space_info);
8641         if (ret) {
8642                 btrfs_remove_free_space_cache(cache);
8643                 spin_lock(&root->fs_info->block_group_cache_lock);
8644                 rb_erase(&cache->cache_node,
8645                          &root->fs_info->block_group_cache_tree);
8646                 spin_unlock(&root->fs_info->block_group_cache_lock);
8647                 btrfs_put_block_group(cache);
8648                 return ret;
8649         }
8650         update_global_block_rsv(root->fs_info);
8651
8652         spin_lock(&cache->space_info->lock);
8653         cache->space_info->bytes_readonly += cache->bytes_super;
8654         spin_unlock(&cache->space_info->lock);
8655
8656         __link_block_group(cache->space_info, cache);
8657
8658         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8659
8660         set_avail_alloc_bits(extent_root->fs_info, type);
8661
8662         return 0;
8663 }
8664
8665 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8666 {
8667         u64 extra_flags = chunk_to_extended(flags) &
8668                                 BTRFS_EXTENDED_PROFILE_MASK;
8669
8670         write_seqlock(&fs_info->profiles_lock);
8671         if (flags & BTRFS_BLOCK_GROUP_DATA)
8672                 fs_info->avail_data_alloc_bits &= ~extra_flags;
8673         if (flags & BTRFS_BLOCK_GROUP_METADATA)
8674                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8675         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8676                 fs_info->avail_system_alloc_bits &= ~extra_flags;
8677         write_sequnlock(&fs_info->profiles_lock);
8678 }
8679
8680 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8681                              struct btrfs_root *root, u64 group_start)
8682 {
8683         struct btrfs_path *path;
8684         struct btrfs_block_group_cache *block_group;
8685         struct btrfs_free_cluster *cluster;
8686         struct btrfs_root *tree_root = root->fs_info->tree_root;
8687         struct btrfs_key key;
8688         struct inode *inode;
8689         int ret;
8690         int index;
8691         int factor;
8692
8693         root = root->fs_info->extent_root;
8694
8695         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8696         BUG_ON(!block_group);
8697         BUG_ON(!block_group->ro);
8698
8699         /*
8700          * Free the reserved super bytes from this block group before
8701          * remove it.
8702          */
8703         free_excluded_extents(root, block_group);
8704
8705         memcpy(&key, &block_group->key, sizeof(key));
8706         index = get_block_group_index(block_group);
8707         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8708                                   BTRFS_BLOCK_GROUP_RAID1 |
8709                                   BTRFS_BLOCK_GROUP_RAID10))
8710                 factor = 2;
8711         else
8712                 factor = 1;
8713
8714         /* make sure this block group isn't part of an allocation cluster */
8715         cluster = &root->fs_info->data_alloc_cluster;
8716         spin_lock(&cluster->refill_lock);
8717         btrfs_return_cluster_to_free_space(block_group, cluster);
8718         spin_unlock(&cluster->refill_lock);
8719
8720         /*
8721          * make sure this block group isn't part of a metadata
8722          * allocation cluster
8723          */
8724         cluster = &root->fs_info->meta_alloc_cluster;
8725         spin_lock(&cluster->refill_lock);
8726         btrfs_return_cluster_to_free_space(block_group, cluster);
8727         spin_unlock(&cluster->refill_lock);
8728
8729         path = btrfs_alloc_path();
8730         if (!path) {
8731                 ret = -ENOMEM;
8732                 goto out;
8733         }
8734
8735         inode = lookup_free_space_inode(tree_root, block_group, path);
8736         if (!IS_ERR(inode)) {
8737                 ret = btrfs_orphan_add(trans, inode);
8738                 if (ret) {
8739                         btrfs_add_delayed_iput(inode);
8740                         goto out;
8741                 }
8742                 clear_nlink(inode);
8743                 /* One for the block groups ref */
8744                 spin_lock(&block_group->lock);
8745                 if (block_group->iref) {
8746                         block_group->iref = 0;
8747                         block_group->inode = NULL;
8748                         spin_unlock(&block_group->lock);
8749                         iput(inode);
8750                 } else {
8751                         spin_unlock(&block_group->lock);
8752                 }
8753                 /* One for our lookup ref */
8754                 btrfs_add_delayed_iput(inode);
8755         }
8756
8757         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8758         key.offset = block_group->key.objectid;
8759         key.type = 0;
8760
8761         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8762         if (ret < 0)
8763                 goto out;
8764         if (ret > 0)
8765                 btrfs_release_path(path);
8766         if (ret == 0) {
8767                 ret = btrfs_del_item(trans, tree_root, path);
8768                 if (ret)
8769                         goto out;
8770                 btrfs_release_path(path);
8771         }
8772
8773         spin_lock(&root->fs_info->block_group_cache_lock);
8774         rb_erase(&block_group->cache_node,
8775                  &root->fs_info->block_group_cache_tree);
8776
8777         if (root->fs_info->first_logical_byte == block_group->key.objectid)
8778                 root->fs_info->first_logical_byte = (u64)-1;
8779         spin_unlock(&root->fs_info->block_group_cache_lock);
8780
8781         down_write(&block_group->space_info->groups_sem);
8782         /*
8783          * we must use list_del_init so people can check to see if they
8784          * are still on the list after taking the semaphore
8785          */
8786         list_del_init(&block_group->list);
8787         if (list_empty(&block_group->space_info->block_groups[index])) {
8788                 kobject_del(&block_group->space_info->block_group_kobjs[index]);
8789                 kobject_put(&block_group->space_info->block_group_kobjs[index]);
8790                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8791         }
8792         up_write(&block_group->space_info->groups_sem);
8793
8794         if (block_group->cached == BTRFS_CACHE_STARTED)
8795                 wait_block_group_cache_done(block_group);
8796
8797         btrfs_remove_free_space_cache(block_group);
8798
8799         spin_lock(&block_group->space_info->lock);
8800         block_group->space_info->total_bytes -= block_group->key.offset;
8801         block_group->space_info->bytes_readonly -= block_group->key.offset;
8802         block_group->space_info->disk_total -= block_group->key.offset * factor;
8803         spin_unlock(&block_group->space_info->lock);
8804
8805         memcpy(&key, &block_group->key, sizeof(key));
8806
8807         btrfs_clear_space_info_full(root->fs_info);
8808
8809         btrfs_put_block_group(block_group);
8810         btrfs_put_block_group(block_group);
8811
8812         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8813         if (ret > 0)
8814                 ret = -EIO;
8815         if (ret < 0)
8816                 goto out;
8817
8818         ret = btrfs_del_item(trans, root, path);
8819 out:
8820         btrfs_free_path(path);
8821         return ret;
8822 }
8823
8824 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8825 {
8826         struct btrfs_space_info *space_info;
8827         struct btrfs_super_block *disk_super;
8828         u64 features;
8829         u64 flags;
8830         int mixed = 0;
8831         int ret;
8832
8833         disk_super = fs_info->super_copy;
8834         if (!btrfs_super_root(disk_super))
8835                 return 1;
8836
8837         features = btrfs_super_incompat_flags(disk_super);
8838         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8839                 mixed = 1;
8840
8841         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8842         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8843         if (ret)
8844                 goto out;
8845
8846         if (mixed) {
8847                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8848                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8849         } else {
8850                 flags = BTRFS_BLOCK_GROUP_METADATA;
8851                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8852                 if (ret)
8853                         goto out;
8854
8855                 flags = BTRFS_BLOCK_GROUP_DATA;
8856                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8857         }
8858 out:
8859         return ret;
8860 }
8861
8862 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8863 {
8864         return unpin_extent_range(root, start, end);
8865 }
8866
8867 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8868                                u64 num_bytes, u64 *actual_bytes)
8869 {
8870         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8871 }
8872
8873 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8874 {
8875         struct btrfs_fs_info *fs_info = root->fs_info;
8876         struct btrfs_block_group_cache *cache = NULL;
8877         u64 group_trimmed;
8878         u64 start;
8879         u64 end;
8880         u64 trimmed = 0;
8881         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8882         int ret = 0;
8883
8884         /*
8885          * try to trim all FS space, our block group may start from non-zero.
8886          */
8887         if (range->len == total_bytes)
8888                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8889         else
8890                 cache = btrfs_lookup_block_group(fs_info, range->start);
8891
8892         while (cache) {
8893                 if (cache->key.objectid >= (range->start + range->len)) {
8894                         btrfs_put_block_group(cache);
8895                         break;
8896                 }
8897
8898                 start = max(range->start, cache->key.objectid);
8899                 end = min(range->start + range->len,
8900                                 cache->key.objectid + cache->key.offset);
8901
8902                 if (end - start >= range->minlen) {
8903                         if (!block_group_cache_done(cache)) {
8904                                 ret = cache_block_group(cache, 0);
8905                                 if (ret) {
8906                                         btrfs_put_block_group(cache);
8907                                         break;
8908                                 }
8909                                 ret = wait_block_group_cache_done(cache);
8910                                 if (ret) {
8911                                         btrfs_put_block_group(cache);
8912                                         break;
8913                                 }
8914                         }
8915                         ret = btrfs_trim_block_group(cache,
8916                                                      &group_trimmed,
8917                                                      start,
8918                                                      end,
8919                                                      range->minlen);
8920
8921                         trimmed += group_trimmed;
8922                         if (ret) {
8923                                 btrfs_put_block_group(cache);
8924                                 break;
8925                         }
8926                 }
8927
8928                 cache = next_block_group(fs_info->tree_root, cache);
8929         }
8930
8931         range->len = trimmed;
8932         return ret;
8933 }
8934
8935 /*
8936  * btrfs_{start,end}_write() is similar to mnt_{want, drop}_write(),
8937  * they are used to prevent the some tasks writing data into the page cache
8938  * by nocow before the subvolume is snapshoted, but flush the data into
8939  * the disk after the snapshot creation.
8940  */
8941 void btrfs_end_nocow_write(struct btrfs_root *root)
8942 {
8943         percpu_counter_dec(&root->subv_writers->counter);
8944         /*
8945          * Make sure counter is updated before we wake up
8946          * waiters.
8947          */
8948         smp_mb();
8949         if (waitqueue_active(&root->subv_writers->wait))
8950                 wake_up(&root->subv_writers->wait);
8951 }
8952
8953 int btrfs_start_nocow_write(struct btrfs_root *root)
8954 {
8955         if (unlikely(atomic_read(&root->will_be_snapshoted)))
8956                 return 0;
8957
8958         percpu_counter_inc(&root->subv_writers->counter);
8959         /*
8960          * Make sure counter is updated before we check for snapshot creation.
8961          */
8962         smp_mb();
8963         if (unlikely(atomic_read(&root->will_be_snapshoted))) {
8964                 btrfs_end_nocow_write(root);
8965                 return 0;
8966         }
8967         return 1;
8968 }