Merge tag 'sunxi-fixes-for-4.3' of https://git.kernel.org/pub/scm/linux/kernel/git...
[linux-drm-fsl-dcu.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_trans_handle *trans,
78                               struct btrfs_root *root, u64 bytenr,
79                               u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81                                 struct btrfs_root *root,
82                                 struct btrfs_delayed_ref_node *node, u64 parent,
83                                 u64 root_objectid, u64 owner_objectid,
84                                 u64 owner_offset, int refs_to_drop,
85                                 struct btrfs_delayed_extent_op *extra_op);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87                                     struct extent_buffer *leaf,
88                                     struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90                                       struct btrfs_root *root,
91                                       u64 parent, u64 root_objectid,
92                                       u64 flags, u64 owner, u64 offset,
93                                       struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95                                      struct btrfs_root *root,
96                                      u64 parent, u64 root_objectid,
97                                      u64 flags, struct btrfs_disk_key *key,
98                                      int level, struct btrfs_key *ins,
99                                      int no_quota);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101                           struct btrfs_root *extent_root, u64 flags,
102                           int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104                          struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106                             int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108                                        u64 num_bytes, int reserve,
109                                        int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111                                u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113                      u64 bytenr, u64 num_bytes, int reserved);
114
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118         smp_mb();
119         return cache->cached == BTRFS_CACHE_FINISHED ||
120                 cache->cached == BTRFS_CACHE_ERROR;
121 }
122
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125         return (cache->flags & bits) == bits;
126 }
127
128 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130         atomic_inc(&cache->count);
131 }
132
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135         if (atomic_dec_and_test(&cache->count)) {
136                 WARN_ON(cache->pinned > 0);
137                 WARN_ON(cache->reserved > 0);
138                 kfree(cache->free_space_ctl);
139                 kfree(cache);
140         }
141 }
142
143 /*
144  * this adds the block group to the fs_info rb tree for the block group
145  * cache
146  */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148                                 struct btrfs_block_group_cache *block_group)
149 {
150         struct rb_node **p;
151         struct rb_node *parent = NULL;
152         struct btrfs_block_group_cache *cache;
153
154         spin_lock(&info->block_group_cache_lock);
155         p = &info->block_group_cache_tree.rb_node;
156
157         while (*p) {
158                 parent = *p;
159                 cache = rb_entry(parent, struct btrfs_block_group_cache,
160                                  cache_node);
161                 if (block_group->key.objectid < cache->key.objectid) {
162                         p = &(*p)->rb_left;
163                 } else if (block_group->key.objectid > cache->key.objectid) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         spin_unlock(&info->block_group_cache_lock);
167                         return -EEXIST;
168                 }
169         }
170
171         rb_link_node(&block_group->cache_node, parent, p);
172         rb_insert_color(&block_group->cache_node,
173                         &info->block_group_cache_tree);
174
175         if (info->first_logical_byte > block_group->key.objectid)
176                 info->first_logical_byte = block_group->key.objectid;
177
178         spin_unlock(&info->block_group_cache_lock);
179
180         return 0;
181 }
182
183 /*
184  * This will return the block group at or after bytenr if contains is 0, else
185  * it will return the block group that contains the bytenr
186  */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189                               int contains)
190 {
191         struct btrfs_block_group_cache *cache, *ret = NULL;
192         struct rb_node *n;
193         u64 end, start;
194
195         spin_lock(&info->block_group_cache_lock);
196         n = info->block_group_cache_tree.rb_node;
197
198         while (n) {
199                 cache = rb_entry(n, struct btrfs_block_group_cache,
200                                  cache_node);
201                 end = cache->key.objectid + cache->key.offset - 1;
202                 start = cache->key.objectid;
203
204                 if (bytenr < start) {
205                         if (!contains && (!ret || start < ret->key.objectid))
206                                 ret = cache;
207                         n = n->rb_left;
208                 } else if (bytenr > start) {
209                         if (contains && bytenr <= end) {
210                                 ret = cache;
211                                 break;
212                         }
213                         n = n->rb_right;
214                 } else {
215                         ret = cache;
216                         break;
217                 }
218         }
219         if (ret) {
220                 btrfs_get_block_group(ret);
221                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222                         info->first_logical_byte = ret->key.objectid;
223         }
224         spin_unlock(&info->block_group_cache_lock);
225
226         return ret;
227 }
228
229 static int add_excluded_extent(struct btrfs_root *root,
230                                u64 start, u64 num_bytes)
231 {
232         u64 end = start + num_bytes - 1;
233         set_extent_bits(&root->fs_info->freed_extents[0],
234                         start, end, EXTENT_UPTODATE, GFP_NOFS);
235         set_extent_bits(&root->fs_info->freed_extents[1],
236                         start, end, EXTENT_UPTODATE, GFP_NOFS);
237         return 0;
238 }
239
240 static void free_excluded_extents(struct btrfs_root *root,
241                                   struct btrfs_block_group_cache *cache)
242 {
243         u64 start, end;
244
245         start = cache->key.objectid;
246         end = start + cache->key.offset - 1;
247
248         clear_extent_bits(&root->fs_info->freed_extents[0],
249                           start, end, EXTENT_UPTODATE, GFP_NOFS);
250         clear_extent_bits(&root->fs_info->freed_extents[1],
251                           start, end, EXTENT_UPTODATE, GFP_NOFS);
252 }
253
254 static int exclude_super_stripes(struct btrfs_root *root,
255                                  struct btrfs_block_group_cache *cache)
256 {
257         u64 bytenr;
258         u64 *logical;
259         int stripe_len;
260         int i, nr, ret;
261
262         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264                 cache->bytes_super += stripe_len;
265                 ret = add_excluded_extent(root, cache->key.objectid,
266                                           stripe_len);
267                 if (ret)
268                         return ret;
269         }
270
271         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272                 bytenr = btrfs_sb_offset(i);
273                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274                                        cache->key.objectid, bytenr,
275                                        0, &logical, &nr, &stripe_len);
276                 if (ret)
277                         return ret;
278
279                 while (nr--) {
280                         u64 start, len;
281
282                         if (logical[nr] > cache->key.objectid +
283                             cache->key.offset)
284                                 continue;
285
286                         if (logical[nr] + stripe_len <= cache->key.objectid)
287                                 continue;
288
289                         start = logical[nr];
290                         if (start < cache->key.objectid) {
291                                 start = cache->key.objectid;
292                                 len = (logical[nr] + stripe_len) - start;
293                         } else {
294                                 len = min_t(u64, stripe_len,
295                                             cache->key.objectid +
296                                             cache->key.offset - start);
297                         }
298
299                         cache->bytes_super += len;
300                         ret = add_excluded_extent(root, start, len);
301                         if (ret) {
302                                 kfree(logical);
303                                 return ret;
304                         }
305                 }
306
307                 kfree(logical);
308         }
309         return 0;
310 }
311
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315         struct btrfs_caching_control *ctl;
316
317         spin_lock(&cache->lock);
318         if (!cache->caching_ctl) {
319                 spin_unlock(&cache->lock);
320                 return NULL;
321         }
322
323         ctl = cache->caching_ctl;
324         atomic_inc(&ctl->count);
325         spin_unlock(&cache->lock);
326         return ctl;
327 }
328
329 static void put_caching_control(struct btrfs_caching_control *ctl)
330 {
331         if (atomic_dec_and_test(&ctl->count))
332                 kfree(ctl);
333 }
334
335 /*
336  * this is only called by cache_block_group, since we could have freed extents
337  * we need to check the pinned_extents for any extents that can't be used yet
338  * since their free space will be released as soon as the transaction commits.
339  */
340 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
341                               struct btrfs_fs_info *info, u64 start, u64 end)
342 {
343         u64 extent_start, extent_end, size, total_added = 0;
344         int ret;
345
346         while (start < end) {
347                 ret = find_first_extent_bit(info->pinned_extents, start,
348                                             &extent_start, &extent_end,
349                                             EXTENT_DIRTY | EXTENT_UPTODATE,
350                                             NULL);
351                 if (ret)
352                         break;
353
354                 if (extent_start <= start) {
355                         start = extent_end + 1;
356                 } else if (extent_start > start && extent_start < end) {
357                         size = extent_start - start;
358                         total_added += size;
359                         ret = btrfs_add_free_space(block_group, start,
360                                                    size);
361                         BUG_ON(ret); /* -ENOMEM or logic error */
362                         start = extent_end + 1;
363                 } else {
364                         break;
365                 }
366         }
367
368         if (start < end) {
369                 size = end - start;
370                 total_added += size;
371                 ret = btrfs_add_free_space(block_group, start, size);
372                 BUG_ON(ret); /* -ENOMEM or logic error */
373         }
374
375         return total_added;
376 }
377
378 static noinline void caching_thread(struct btrfs_work *work)
379 {
380         struct btrfs_block_group_cache *block_group;
381         struct btrfs_fs_info *fs_info;
382         struct btrfs_caching_control *caching_ctl;
383         struct btrfs_root *extent_root;
384         struct btrfs_path *path;
385         struct extent_buffer *leaf;
386         struct btrfs_key key;
387         u64 total_found = 0;
388         u64 last = 0;
389         u32 nritems;
390         int ret = -ENOMEM;
391
392         caching_ctl = container_of(work, struct btrfs_caching_control, work);
393         block_group = caching_ctl->block_group;
394         fs_info = block_group->fs_info;
395         extent_root = fs_info->extent_root;
396
397         path = btrfs_alloc_path();
398         if (!path)
399                 goto out;
400
401         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
402
403         /*
404          * We don't want to deadlock with somebody trying to allocate a new
405          * extent for the extent root while also trying to search the extent
406          * root to add free space.  So we skip locking and search the commit
407          * root, since its read-only
408          */
409         path->skip_locking = 1;
410         path->search_commit_root = 1;
411         path->reada = 1;
412
413         key.objectid = last;
414         key.offset = 0;
415         key.type = BTRFS_EXTENT_ITEM_KEY;
416 again:
417         mutex_lock(&caching_ctl->mutex);
418         /* need to make sure the commit_root doesn't disappear */
419         down_read(&fs_info->commit_root_sem);
420
421 next:
422         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
423         if (ret < 0)
424                 goto err;
425
426         leaf = path->nodes[0];
427         nritems = btrfs_header_nritems(leaf);
428
429         while (1) {
430                 if (btrfs_fs_closing(fs_info) > 1) {
431                         last = (u64)-1;
432                         break;
433                 }
434
435                 if (path->slots[0] < nritems) {
436                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
437                 } else {
438                         ret = find_next_key(path, 0, &key);
439                         if (ret)
440                                 break;
441
442                         if (need_resched() ||
443                             rwsem_is_contended(&fs_info->commit_root_sem)) {
444                                 caching_ctl->progress = last;
445                                 btrfs_release_path(path);
446                                 up_read(&fs_info->commit_root_sem);
447                                 mutex_unlock(&caching_ctl->mutex);
448                                 cond_resched();
449                                 goto again;
450                         }
451
452                         ret = btrfs_next_leaf(extent_root, path);
453                         if (ret < 0)
454                                 goto err;
455                         if (ret)
456                                 break;
457                         leaf = path->nodes[0];
458                         nritems = btrfs_header_nritems(leaf);
459                         continue;
460                 }
461
462                 if (key.objectid < last) {
463                         key.objectid = last;
464                         key.offset = 0;
465                         key.type = BTRFS_EXTENT_ITEM_KEY;
466
467                         caching_ctl->progress = last;
468                         btrfs_release_path(path);
469                         goto next;
470                 }
471
472                 if (key.objectid < block_group->key.objectid) {
473                         path->slots[0]++;
474                         continue;
475                 }
476
477                 if (key.objectid >= block_group->key.objectid +
478                     block_group->key.offset)
479                         break;
480
481                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
482                     key.type == BTRFS_METADATA_ITEM_KEY) {
483                         total_found += add_new_free_space(block_group,
484                                                           fs_info, last,
485                                                           key.objectid);
486                         if (key.type == BTRFS_METADATA_ITEM_KEY)
487                                 last = key.objectid +
488                                         fs_info->tree_root->nodesize;
489                         else
490                                 last = key.objectid + key.offset;
491
492                         if (total_found > (1024 * 1024 * 2)) {
493                                 total_found = 0;
494                                 wake_up(&caching_ctl->wait);
495                         }
496                 }
497                 path->slots[0]++;
498         }
499         ret = 0;
500
501         total_found += add_new_free_space(block_group, fs_info, last,
502                                           block_group->key.objectid +
503                                           block_group->key.offset);
504         caching_ctl->progress = (u64)-1;
505
506         spin_lock(&block_group->lock);
507         block_group->caching_ctl = NULL;
508         block_group->cached = BTRFS_CACHE_FINISHED;
509         spin_unlock(&block_group->lock);
510
511 err:
512         btrfs_free_path(path);
513         up_read(&fs_info->commit_root_sem);
514
515         free_excluded_extents(extent_root, block_group);
516
517         mutex_unlock(&caching_ctl->mutex);
518 out:
519         if (ret) {
520                 spin_lock(&block_group->lock);
521                 block_group->caching_ctl = NULL;
522                 block_group->cached = BTRFS_CACHE_ERROR;
523                 spin_unlock(&block_group->lock);
524         }
525         wake_up(&caching_ctl->wait);
526
527         put_caching_control(caching_ctl);
528         btrfs_put_block_group(block_group);
529 }
530
531 static int cache_block_group(struct btrfs_block_group_cache *cache,
532                              int load_cache_only)
533 {
534         DEFINE_WAIT(wait);
535         struct btrfs_fs_info *fs_info = cache->fs_info;
536         struct btrfs_caching_control *caching_ctl;
537         int ret = 0;
538
539         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
540         if (!caching_ctl)
541                 return -ENOMEM;
542
543         INIT_LIST_HEAD(&caching_ctl->list);
544         mutex_init(&caching_ctl->mutex);
545         init_waitqueue_head(&caching_ctl->wait);
546         caching_ctl->block_group = cache;
547         caching_ctl->progress = cache->key.objectid;
548         atomic_set(&caching_ctl->count, 1);
549         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
550                         caching_thread, NULL, NULL);
551
552         spin_lock(&cache->lock);
553         /*
554          * This should be a rare occasion, but this could happen I think in the
555          * case where one thread starts to load the space cache info, and then
556          * some other thread starts a transaction commit which tries to do an
557          * allocation while the other thread is still loading the space cache
558          * info.  The previous loop should have kept us from choosing this block
559          * group, but if we've moved to the state where we will wait on caching
560          * block groups we need to first check if we're doing a fast load here,
561          * so we can wait for it to finish, otherwise we could end up allocating
562          * from a block group who's cache gets evicted for one reason or
563          * another.
564          */
565         while (cache->cached == BTRFS_CACHE_FAST) {
566                 struct btrfs_caching_control *ctl;
567
568                 ctl = cache->caching_ctl;
569                 atomic_inc(&ctl->count);
570                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
571                 spin_unlock(&cache->lock);
572
573                 schedule();
574
575                 finish_wait(&ctl->wait, &wait);
576                 put_caching_control(ctl);
577                 spin_lock(&cache->lock);
578         }
579
580         if (cache->cached != BTRFS_CACHE_NO) {
581                 spin_unlock(&cache->lock);
582                 kfree(caching_ctl);
583                 return 0;
584         }
585         WARN_ON(cache->caching_ctl);
586         cache->caching_ctl = caching_ctl;
587         cache->cached = BTRFS_CACHE_FAST;
588         spin_unlock(&cache->lock);
589
590         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
591                 mutex_lock(&caching_ctl->mutex);
592                 ret = load_free_space_cache(fs_info, cache);
593
594                 spin_lock(&cache->lock);
595                 if (ret == 1) {
596                         cache->caching_ctl = NULL;
597                         cache->cached = BTRFS_CACHE_FINISHED;
598                         cache->last_byte_to_unpin = (u64)-1;
599                         caching_ctl->progress = (u64)-1;
600                 } else {
601                         if (load_cache_only) {
602                                 cache->caching_ctl = NULL;
603                                 cache->cached = BTRFS_CACHE_NO;
604                         } else {
605                                 cache->cached = BTRFS_CACHE_STARTED;
606                                 cache->has_caching_ctl = 1;
607                         }
608                 }
609                 spin_unlock(&cache->lock);
610                 mutex_unlock(&caching_ctl->mutex);
611
612                 wake_up(&caching_ctl->wait);
613                 if (ret == 1) {
614                         put_caching_control(caching_ctl);
615                         free_excluded_extents(fs_info->extent_root, cache);
616                         return 0;
617                 }
618         } else {
619                 /*
620                  * We are not going to do the fast caching, set cached to the
621                  * appropriate value and wakeup any waiters.
622                  */
623                 spin_lock(&cache->lock);
624                 if (load_cache_only) {
625                         cache->caching_ctl = NULL;
626                         cache->cached = BTRFS_CACHE_NO;
627                 } else {
628                         cache->cached = BTRFS_CACHE_STARTED;
629                         cache->has_caching_ctl = 1;
630                 }
631                 spin_unlock(&cache->lock);
632                 wake_up(&caching_ctl->wait);
633         }
634
635         if (load_cache_only) {
636                 put_caching_control(caching_ctl);
637                 return 0;
638         }
639
640         down_write(&fs_info->commit_root_sem);
641         atomic_inc(&caching_ctl->count);
642         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
643         up_write(&fs_info->commit_root_sem);
644
645         btrfs_get_block_group(cache);
646
647         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
648
649         return ret;
650 }
651
652 /*
653  * return the block group that starts at or after bytenr
654  */
655 static struct btrfs_block_group_cache *
656 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
657 {
658         struct btrfs_block_group_cache *cache;
659
660         cache = block_group_cache_tree_search(info, bytenr, 0);
661
662         return cache;
663 }
664
665 /*
666  * return the block group that contains the given bytenr
667  */
668 struct btrfs_block_group_cache *btrfs_lookup_block_group(
669                                                  struct btrfs_fs_info *info,
670                                                  u64 bytenr)
671 {
672         struct btrfs_block_group_cache *cache;
673
674         cache = block_group_cache_tree_search(info, bytenr, 1);
675
676         return cache;
677 }
678
679 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
680                                                   u64 flags)
681 {
682         struct list_head *head = &info->space_info;
683         struct btrfs_space_info *found;
684
685         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
686
687         rcu_read_lock();
688         list_for_each_entry_rcu(found, head, list) {
689                 if (found->flags & flags) {
690                         rcu_read_unlock();
691                         return found;
692                 }
693         }
694         rcu_read_unlock();
695         return NULL;
696 }
697
698 /*
699  * after adding space to the filesystem, we need to clear the full flags
700  * on all the space infos.
701  */
702 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
703 {
704         struct list_head *head = &info->space_info;
705         struct btrfs_space_info *found;
706
707         rcu_read_lock();
708         list_for_each_entry_rcu(found, head, list)
709                 found->full = 0;
710         rcu_read_unlock();
711 }
712
713 /* simple helper to search for an existing data extent at a given offset */
714 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
715 {
716         int ret;
717         struct btrfs_key key;
718         struct btrfs_path *path;
719
720         path = btrfs_alloc_path();
721         if (!path)
722                 return -ENOMEM;
723
724         key.objectid = start;
725         key.offset = len;
726         key.type = BTRFS_EXTENT_ITEM_KEY;
727         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
728                                 0, 0);
729         btrfs_free_path(path);
730         return ret;
731 }
732
733 /*
734  * helper function to lookup reference count and flags of a tree block.
735  *
736  * the head node for delayed ref is used to store the sum of all the
737  * reference count modifications queued up in the rbtree. the head
738  * node may also store the extent flags to set. This way you can check
739  * to see what the reference count and extent flags would be if all of
740  * the delayed refs are not processed.
741  */
742 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
743                              struct btrfs_root *root, u64 bytenr,
744                              u64 offset, int metadata, u64 *refs, u64 *flags)
745 {
746         struct btrfs_delayed_ref_head *head;
747         struct btrfs_delayed_ref_root *delayed_refs;
748         struct btrfs_path *path;
749         struct btrfs_extent_item *ei;
750         struct extent_buffer *leaf;
751         struct btrfs_key key;
752         u32 item_size;
753         u64 num_refs;
754         u64 extent_flags;
755         int ret;
756
757         /*
758          * If we don't have skinny metadata, don't bother doing anything
759          * different
760          */
761         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
762                 offset = root->nodesize;
763                 metadata = 0;
764         }
765
766         path = btrfs_alloc_path();
767         if (!path)
768                 return -ENOMEM;
769
770         if (!trans) {
771                 path->skip_locking = 1;
772                 path->search_commit_root = 1;
773         }
774
775 search_again:
776         key.objectid = bytenr;
777         key.offset = offset;
778         if (metadata)
779                 key.type = BTRFS_METADATA_ITEM_KEY;
780         else
781                 key.type = BTRFS_EXTENT_ITEM_KEY;
782
783         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
784                                 &key, path, 0, 0);
785         if (ret < 0)
786                 goto out_free;
787
788         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
789                 if (path->slots[0]) {
790                         path->slots[0]--;
791                         btrfs_item_key_to_cpu(path->nodes[0], &key,
792                                               path->slots[0]);
793                         if (key.objectid == bytenr &&
794                             key.type == BTRFS_EXTENT_ITEM_KEY &&
795                             key.offset == root->nodesize)
796                                 ret = 0;
797                 }
798         }
799
800         if (ret == 0) {
801                 leaf = path->nodes[0];
802                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
803                 if (item_size >= sizeof(*ei)) {
804                         ei = btrfs_item_ptr(leaf, path->slots[0],
805                                             struct btrfs_extent_item);
806                         num_refs = btrfs_extent_refs(leaf, ei);
807                         extent_flags = btrfs_extent_flags(leaf, ei);
808                 } else {
809 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
810                         struct btrfs_extent_item_v0 *ei0;
811                         BUG_ON(item_size != sizeof(*ei0));
812                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
813                                              struct btrfs_extent_item_v0);
814                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
815                         /* FIXME: this isn't correct for data */
816                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
817 #else
818                         BUG();
819 #endif
820                 }
821                 BUG_ON(num_refs == 0);
822         } else {
823                 num_refs = 0;
824                 extent_flags = 0;
825                 ret = 0;
826         }
827
828         if (!trans)
829                 goto out;
830
831         delayed_refs = &trans->transaction->delayed_refs;
832         spin_lock(&delayed_refs->lock);
833         head = btrfs_find_delayed_ref_head(trans, bytenr);
834         if (head) {
835                 if (!mutex_trylock(&head->mutex)) {
836                         atomic_inc(&head->node.refs);
837                         spin_unlock(&delayed_refs->lock);
838
839                         btrfs_release_path(path);
840
841                         /*
842                          * Mutex was contended, block until it's released and try
843                          * again
844                          */
845                         mutex_lock(&head->mutex);
846                         mutex_unlock(&head->mutex);
847                         btrfs_put_delayed_ref(&head->node);
848                         goto search_again;
849                 }
850                 spin_lock(&head->lock);
851                 if (head->extent_op && head->extent_op->update_flags)
852                         extent_flags |= head->extent_op->flags_to_set;
853                 else
854                         BUG_ON(num_refs == 0);
855
856                 num_refs += head->node.ref_mod;
857                 spin_unlock(&head->lock);
858                 mutex_unlock(&head->mutex);
859         }
860         spin_unlock(&delayed_refs->lock);
861 out:
862         WARN_ON(num_refs == 0);
863         if (refs)
864                 *refs = num_refs;
865         if (flags)
866                 *flags = extent_flags;
867 out_free:
868         btrfs_free_path(path);
869         return ret;
870 }
871
872 /*
873  * Back reference rules.  Back refs have three main goals:
874  *
875  * 1) differentiate between all holders of references to an extent so that
876  *    when a reference is dropped we can make sure it was a valid reference
877  *    before freeing the extent.
878  *
879  * 2) Provide enough information to quickly find the holders of an extent
880  *    if we notice a given block is corrupted or bad.
881  *
882  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
883  *    maintenance.  This is actually the same as #2, but with a slightly
884  *    different use case.
885  *
886  * There are two kinds of back refs. The implicit back refs is optimized
887  * for pointers in non-shared tree blocks. For a given pointer in a block,
888  * back refs of this kind provide information about the block's owner tree
889  * and the pointer's key. These information allow us to find the block by
890  * b-tree searching. The full back refs is for pointers in tree blocks not
891  * referenced by their owner trees. The location of tree block is recorded
892  * in the back refs. Actually the full back refs is generic, and can be
893  * used in all cases the implicit back refs is used. The major shortcoming
894  * of the full back refs is its overhead. Every time a tree block gets
895  * COWed, we have to update back refs entry for all pointers in it.
896  *
897  * For a newly allocated tree block, we use implicit back refs for
898  * pointers in it. This means most tree related operations only involve
899  * implicit back refs. For a tree block created in old transaction, the
900  * only way to drop a reference to it is COW it. So we can detect the
901  * event that tree block loses its owner tree's reference and do the
902  * back refs conversion.
903  *
904  * When a tree block is COW'd through a tree, there are four cases:
905  *
906  * The reference count of the block is one and the tree is the block's
907  * owner tree. Nothing to do in this case.
908  *
909  * The reference count of the block is one and the tree is not the
910  * block's owner tree. In this case, full back refs is used for pointers
911  * in the block. Remove these full back refs, add implicit back refs for
912  * every pointers in the new block.
913  *
914  * The reference count of the block is greater than one and the tree is
915  * the block's owner tree. In this case, implicit back refs is used for
916  * pointers in the block. Add full back refs for every pointers in the
917  * block, increase lower level extents' reference counts. The original
918  * implicit back refs are entailed to the new block.
919  *
920  * The reference count of the block is greater than one and the tree is
921  * not the block's owner tree. Add implicit back refs for every pointer in
922  * the new block, increase lower level extents' reference count.
923  *
924  * Back Reference Key composing:
925  *
926  * The key objectid corresponds to the first byte in the extent,
927  * The key type is used to differentiate between types of back refs.
928  * There are different meanings of the key offset for different types
929  * of back refs.
930  *
931  * File extents can be referenced by:
932  *
933  * - multiple snapshots, subvolumes, or different generations in one subvol
934  * - different files inside a single subvolume
935  * - different offsets inside a file (bookend extents in file.c)
936  *
937  * The extent ref structure for the implicit back refs has fields for:
938  *
939  * - Objectid of the subvolume root
940  * - objectid of the file holding the reference
941  * - original offset in the file
942  * - how many bookend extents
943  *
944  * The key offset for the implicit back refs is hash of the first
945  * three fields.
946  *
947  * The extent ref structure for the full back refs has field for:
948  *
949  * - number of pointers in the tree leaf
950  *
951  * The key offset for the implicit back refs is the first byte of
952  * the tree leaf
953  *
954  * When a file extent is allocated, The implicit back refs is used.
955  * the fields are filled in:
956  *
957  *     (root_key.objectid, inode objectid, offset in file, 1)
958  *
959  * When a file extent is removed file truncation, we find the
960  * corresponding implicit back refs and check the following fields:
961  *
962  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
963  *
964  * Btree extents can be referenced by:
965  *
966  * - Different subvolumes
967  *
968  * Both the implicit back refs and the full back refs for tree blocks
969  * only consist of key. The key offset for the implicit back refs is
970  * objectid of block's owner tree. The key offset for the full back refs
971  * is the first byte of parent block.
972  *
973  * When implicit back refs is used, information about the lowest key and
974  * level of the tree block are required. These information are stored in
975  * tree block info structure.
976  */
977
978 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
979 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
980                                   struct btrfs_root *root,
981                                   struct btrfs_path *path,
982                                   u64 owner, u32 extra_size)
983 {
984         struct btrfs_extent_item *item;
985         struct btrfs_extent_item_v0 *ei0;
986         struct btrfs_extent_ref_v0 *ref0;
987         struct btrfs_tree_block_info *bi;
988         struct extent_buffer *leaf;
989         struct btrfs_key key;
990         struct btrfs_key found_key;
991         u32 new_size = sizeof(*item);
992         u64 refs;
993         int ret;
994
995         leaf = path->nodes[0];
996         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
997
998         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
999         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1000                              struct btrfs_extent_item_v0);
1001         refs = btrfs_extent_refs_v0(leaf, ei0);
1002
1003         if (owner == (u64)-1) {
1004                 while (1) {
1005                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1006                                 ret = btrfs_next_leaf(root, path);
1007                                 if (ret < 0)
1008                                         return ret;
1009                                 BUG_ON(ret > 0); /* Corruption */
1010                                 leaf = path->nodes[0];
1011                         }
1012                         btrfs_item_key_to_cpu(leaf, &found_key,
1013                                               path->slots[0]);
1014                         BUG_ON(key.objectid != found_key.objectid);
1015                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1016                                 path->slots[0]++;
1017                                 continue;
1018                         }
1019                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1020                                               struct btrfs_extent_ref_v0);
1021                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1022                         break;
1023                 }
1024         }
1025         btrfs_release_path(path);
1026
1027         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1028                 new_size += sizeof(*bi);
1029
1030         new_size -= sizeof(*ei0);
1031         ret = btrfs_search_slot(trans, root, &key, path,
1032                                 new_size + extra_size, 1);
1033         if (ret < 0)
1034                 return ret;
1035         BUG_ON(ret); /* Corruption */
1036
1037         btrfs_extend_item(root, path, new_size);
1038
1039         leaf = path->nodes[0];
1040         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1041         btrfs_set_extent_refs(leaf, item, refs);
1042         /* FIXME: get real generation */
1043         btrfs_set_extent_generation(leaf, item, 0);
1044         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1045                 btrfs_set_extent_flags(leaf, item,
1046                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1047                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1048                 bi = (struct btrfs_tree_block_info *)(item + 1);
1049                 /* FIXME: get first key of the block */
1050                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1051                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1052         } else {
1053                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1054         }
1055         btrfs_mark_buffer_dirty(leaf);
1056         return 0;
1057 }
1058 #endif
1059
1060 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1061 {
1062         u32 high_crc = ~(u32)0;
1063         u32 low_crc = ~(u32)0;
1064         __le64 lenum;
1065
1066         lenum = cpu_to_le64(root_objectid);
1067         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1068         lenum = cpu_to_le64(owner);
1069         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1070         lenum = cpu_to_le64(offset);
1071         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1072
1073         return ((u64)high_crc << 31) ^ (u64)low_crc;
1074 }
1075
1076 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1077                                      struct btrfs_extent_data_ref *ref)
1078 {
1079         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1080                                     btrfs_extent_data_ref_objectid(leaf, ref),
1081                                     btrfs_extent_data_ref_offset(leaf, ref));
1082 }
1083
1084 static int match_extent_data_ref(struct extent_buffer *leaf,
1085                                  struct btrfs_extent_data_ref *ref,
1086                                  u64 root_objectid, u64 owner, u64 offset)
1087 {
1088         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1089             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1090             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1091                 return 0;
1092         return 1;
1093 }
1094
1095 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1096                                            struct btrfs_root *root,
1097                                            struct btrfs_path *path,
1098                                            u64 bytenr, u64 parent,
1099                                            u64 root_objectid,
1100                                            u64 owner, u64 offset)
1101 {
1102         struct btrfs_key key;
1103         struct btrfs_extent_data_ref *ref;
1104         struct extent_buffer *leaf;
1105         u32 nritems;
1106         int ret;
1107         int recow;
1108         int err = -ENOENT;
1109
1110         key.objectid = bytenr;
1111         if (parent) {
1112                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1113                 key.offset = parent;
1114         } else {
1115                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1116                 key.offset = hash_extent_data_ref(root_objectid,
1117                                                   owner, offset);
1118         }
1119 again:
1120         recow = 0;
1121         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1122         if (ret < 0) {
1123                 err = ret;
1124                 goto fail;
1125         }
1126
1127         if (parent) {
1128                 if (!ret)
1129                         return 0;
1130 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1131                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1132                 btrfs_release_path(path);
1133                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1134                 if (ret < 0) {
1135                         err = ret;
1136                         goto fail;
1137                 }
1138                 if (!ret)
1139                         return 0;
1140 #endif
1141                 goto fail;
1142         }
1143
1144         leaf = path->nodes[0];
1145         nritems = btrfs_header_nritems(leaf);
1146         while (1) {
1147                 if (path->slots[0] >= nritems) {
1148                         ret = btrfs_next_leaf(root, path);
1149                         if (ret < 0)
1150                                 err = ret;
1151                         if (ret)
1152                                 goto fail;
1153
1154                         leaf = path->nodes[0];
1155                         nritems = btrfs_header_nritems(leaf);
1156                         recow = 1;
1157                 }
1158
1159                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1160                 if (key.objectid != bytenr ||
1161                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1162                         goto fail;
1163
1164                 ref = btrfs_item_ptr(leaf, path->slots[0],
1165                                      struct btrfs_extent_data_ref);
1166
1167                 if (match_extent_data_ref(leaf, ref, root_objectid,
1168                                           owner, offset)) {
1169                         if (recow) {
1170                                 btrfs_release_path(path);
1171                                 goto again;
1172                         }
1173                         err = 0;
1174                         break;
1175                 }
1176                 path->slots[0]++;
1177         }
1178 fail:
1179         return err;
1180 }
1181
1182 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1183                                            struct btrfs_root *root,
1184                                            struct btrfs_path *path,
1185                                            u64 bytenr, u64 parent,
1186                                            u64 root_objectid, u64 owner,
1187                                            u64 offset, int refs_to_add)
1188 {
1189         struct btrfs_key key;
1190         struct extent_buffer *leaf;
1191         u32 size;
1192         u32 num_refs;
1193         int ret;
1194
1195         key.objectid = bytenr;
1196         if (parent) {
1197                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1198                 key.offset = parent;
1199                 size = sizeof(struct btrfs_shared_data_ref);
1200         } else {
1201                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1202                 key.offset = hash_extent_data_ref(root_objectid,
1203                                                   owner, offset);
1204                 size = sizeof(struct btrfs_extent_data_ref);
1205         }
1206
1207         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1208         if (ret && ret != -EEXIST)
1209                 goto fail;
1210
1211         leaf = path->nodes[0];
1212         if (parent) {
1213                 struct btrfs_shared_data_ref *ref;
1214                 ref = btrfs_item_ptr(leaf, path->slots[0],
1215                                      struct btrfs_shared_data_ref);
1216                 if (ret == 0) {
1217                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1218                 } else {
1219                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1220                         num_refs += refs_to_add;
1221                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1222                 }
1223         } else {
1224                 struct btrfs_extent_data_ref *ref;
1225                 while (ret == -EEXIST) {
1226                         ref = btrfs_item_ptr(leaf, path->slots[0],
1227                                              struct btrfs_extent_data_ref);
1228                         if (match_extent_data_ref(leaf, ref, root_objectid,
1229                                                   owner, offset))
1230                                 break;
1231                         btrfs_release_path(path);
1232                         key.offset++;
1233                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1234                                                       size);
1235                         if (ret && ret != -EEXIST)
1236                                 goto fail;
1237
1238                         leaf = path->nodes[0];
1239                 }
1240                 ref = btrfs_item_ptr(leaf, path->slots[0],
1241                                      struct btrfs_extent_data_ref);
1242                 if (ret == 0) {
1243                         btrfs_set_extent_data_ref_root(leaf, ref,
1244                                                        root_objectid);
1245                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1246                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1247                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1248                 } else {
1249                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1250                         num_refs += refs_to_add;
1251                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1252                 }
1253         }
1254         btrfs_mark_buffer_dirty(leaf);
1255         ret = 0;
1256 fail:
1257         btrfs_release_path(path);
1258         return ret;
1259 }
1260
1261 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1262                                            struct btrfs_root *root,
1263                                            struct btrfs_path *path,
1264                                            int refs_to_drop, int *last_ref)
1265 {
1266         struct btrfs_key key;
1267         struct btrfs_extent_data_ref *ref1 = NULL;
1268         struct btrfs_shared_data_ref *ref2 = NULL;
1269         struct extent_buffer *leaf;
1270         u32 num_refs = 0;
1271         int ret = 0;
1272
1273         leaf = path->nodes[0];
1274         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1275
1276         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1277                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1278                                       struct btrfs_extent_data_ref);
1279                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1280         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1281                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1282                                       struct btrfs_shared_data_ref);
1283                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1284 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1285         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1286                 struct btrfs_extent_ref_v0 *ref0;
1287                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1288                                       struct btrfs_extent_ref_v0);
1289                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1290 #endif
1291         } else {
1292                 BUG();
1293         }
1294
1295         BUG_ON(num_refs < refs_to_drop);
1296         num_refs -= refs_to_drop;
1297
1298         if (num_refs == 0) {
1299                 ret = btrfs_del_item(trans, root, path);
1300                 *last_ref = 1;
1301         } else {
1302                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1303                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1304                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1305                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1306 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1307                 else {
1308                         struct btrfs_extent_ref_v0 *ref0;
1309                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1310                                         struct btrfs_extent_ref_v0);
1311                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1312                 }
1313 #endif
1314                 btrfs_mark_buffer_dirty(leaf);
1315         }
1316         return ret;
1317 }
1318
1319 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1320                                           struct btrfs_extent_inline_ref *iref)
1321 {
1322         struct btrfs_key key;
1323         struct extent_buffer *leaf;
1324         struct btrfs_extent_data_ref *ref1;
1325         struct btrfs_shared_data_ref *ref2;
1326         u32 num_refs = 0;
1327
1328         leaf = path->nodes[0];
1329         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1330         if (iref) {
1331                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1332                     BTRFS_EXTENT_DATA_REF_KEY) {
1333                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1334                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1335                 } else {
1336                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1337                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1338                 }
1339         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1340                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1341                                       struct btrfs_extent_data_ref);
1342                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1343         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1344                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1345                                       struct btrfs_shared_data_ref);
1346                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1347 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1348         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1349                 struct btrfs_extent_ref_v0 *ref0;
1350                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1351                                       struct btrfs_extent_ref_v0);
1352                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1353 #endif
1354         } else {
1355                 WARN_ON(1);
1356         }
1357         return num_refs;
1358 }
1359
1360 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1361                                           struct btrfs_root *root,
1362                                           struct btrfs_path *path,
1363                                           u64 bytenr, u64 parent,
1364                                           u64 root_objectid)
1365 {
1366         struct btrfs_key key;
1367         int ret;
1368
1369         key.objectid = bytenr;
1370         if (parent) {
1371                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1372                 key.offset = parent;
1373         } else {
1374                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1375                 key.offset = root_objectid;
1376         }
1377
1378         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1379         if (ret > 0)
1380                 ret = -ENOENT;
1381 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1382         if (ret == -ENOENT && parent) {
1383                 btrfs_release_path(path);
1384                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1385                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1386                 if (ret > 0)
1387                         ret = -ENOENT;
1388         }
1389 #endif
1390         return ret;
1391 }
1392
1393 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1394                                           struct btrfs_root *root,
1395                                           struct btrfs_path *path,
1396                                           u64 bytenr, u64 parent,
1397                                           u64 root_objectid)
1398 {
1399         struct btrfs_key key;
1400         int ret;
1401
1402         key.objectid = bytenr;
1403         if (parent) {
1404                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1405                 key.offset = parent;
1406         } else {
1407                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1408                 key.offset = root_objectid;
1409         }
1410
1411         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1412         btrfs_release_path(path);
1413         return ret;
1414 }
1415
1416 static inline int extent_ref_type(u64 parent, u64 owner)
1417 {
1418         int type;
1419         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1420                 if (parent > 0)
1421                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1422                 else
1423                         type = BTRFS_TREE_BLOCK_REF_KEY;
1424         } else {
1425                 if (parent > 0)
1426                         type = BTRFS_SHARED_DATA_REF_KEY;
1427                 else
1428                         type = BTRFS_EXTENT_DATA_REF_KEY;
1429         }
1430         return type;
1431 }
1432
1433 static int find_next_key(struct btrfs_path *path, int level,
1434                          struct btrfs_key *key)
1435
1436 {
1437         for (; level < BTRFS_MAX_LEVEL; level++) {
1438                 if (!path->nodes[level])
1439                         break;
1440                 if (path->slots[level] + 1 >=
1441                     btrfs_header_nritems(path->nodes[level]))
1442                         continue;
1443                 if (level == 0)
1444                         btrfs_item_key_to_cpu(path->nodes[level], key,
1445                                               path->slots[level] + 1);
1446                 else
1447                         btrfs_node_key_to_cpu(path->nodes[level], key,
1448                                               path->slots[level] + 1);
1449                 return 0;
1450         }
1451         return 1;
1452 }
1453
1454 /*
1455  * look for inline back ref. if back ref is found, *ref_ret is set
1456  * to the address of inline back ref, and 0 is returned.
1457  *
1458  * if back ref isn't found, *ref_ret is set to the address where it
1459  * should be inserted, and -ENOENT is returned.
1460  *
1461  * if insert is true and there are too many inline back refs, the path
1462  * points to the extent item, and -EAGAIN is returned.
1463  *
1464  * NOTE: inline back refs are ordered in the same way that back ref
1465  *       items in the tree are ordered.
1466  */
1467 static noinline_for_stack
1468 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1469                                  struct btrfs_root *root,
1470                                  struct btrfs_path *path,
1471                                  struct btrfs_extent_inline_ref **ref_ret,
1472                                  u64 bytenr, u64 num_bytes,
1473                                  u64 parent, u64 root_objectid,
1474                                  u64 owner, u64 offset, int insert)
1475 {
1476         struct btrfs_key key;
1477         struct extent_buffer *leaf;
1478         struct btrfs_extent_item *ei;
1479         struct btrfs_extent_inline_ref *iref;
1480         u64 flags;
1481         u64 item_size;
1482         unsigned long ptr;
1483         unsigned long end;
1484         int extra_size;
1485         int type;
1486         int want;
1487         int ret;
1488         int err = 0;
1489         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1490                                                  SKINNY_METADATA);
1491
1492         key.objectid = bytenr;
1493         key.type = BTRFS_EXTENT_ITEM_KEY;
1494         key.offset = num_bytes;
1495
1496         want = extent_ref_type(parent, owner);
1497         if (insert) {
1498                 extra_size = btrfs_extent_inline_ref_size(want);
1499                 path->keep_locks = 1;
1500         } else
1501                 extra_size = -1;
1502
1503         /*
1504          * Owner is our parent level, so we can just add one to get the level
1505          * for the block we are interested in.
1506          */
1507         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1508                 key.type = BTRFS_METADATA_ITEM_KEY;
1509                 key.offset = owner;
1510         }
1511
1512 again:
1513         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1514         if (ret < 0) {
1515                 err = ret;
1516                 goto out;
1517         }
1518
1519         /*
1520          * We may be a newly converted file system which still has the old fat
1521          * extent entries for metadata, so try and see if we have one of those.
1522          */
1523         if (ret > 0 && skinny_metadata) {
1524                 skinny_metadata = false;
1525                 if (path->slots[0]) {
1526                         path->slots[0]--;
1527                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1528                                               path->slots[0]);
1529                         if (key.objectid == bytenr &&
1530                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1531                             key.offset == num_bytes)
1532                                 ret = 0;
1533                 }
1534                 if (ret) {
1535                         key.objectid = bytenr;
1536                         key.type = BTRFS_EXTENT_ITEM_KEY;
1537                         key.offset = num_bytes;
1538                         btrfs_release_path(path);
1539                         goto again;
1540                 }
1541         }
1542
1543         if (ret && !insert) {
1544                 err = -ENOENT;
1545                 goto out;
1546         } else if (WARN_ON(ret)) {
1547                 err = -EIO;
1548                 goto out;
1549         }
1550
1551         leaf = path->nodes[0];
1552         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1553 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1554         if (item_size < sizeof(*ei)) {
1555                 if (!insert) {
1556                         err = -ENOENT;
1557                         goto out;
1558                 }
1559                 ret = convert_extent_item_v0(trans, root, path, owner,
1560                                              extra_size);
1561                 if (ret < 0) {
1562                         err = ret;
1563                         goto out;
1564                 }
1565                 leaf = path->nodes[0];
1566                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1567         }
1568 #endif
1569         BUG_ON(item_size < sizeof(*ei));
1570
1571         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1572         flags = btrfs_extent_flags(leaf, ei);
1573
1574         ptr = (unsigned long)(ei + 1);
1575         end = (unsigned long)ei + item_size;
1576
1577         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1578                 ptr += sizeof(struct btrfs_tree_block_info);
1579                 BUG_ON(ptr > end);
1580         }
1581
1582         err = -ENOENT;
1583         while (1) {
1584                 if (ptr >= end) {
1585                         WARN_ON(ptr > end);
1586                         break;
1587                 }
1588                 iref = (struct btrfs_extent_inline_ref *)ptr;
1589                 type = btrfs_extent_inline_ref_type(leaf, iref);
1590                 if (want < type)
1591                         break;
1592                 if (want > type) {
1593                         ptr += btrfs_extent_inline_ref_size(type);
1594                         continue;
1595                 }
1596
1597                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1598                         struct btrfs_extent_data_ref *dref;
1599                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1600                         if (match_extent_data_ref(leaf, dref, root_objectid,
1601                                                   owner, offset)) {
1602                                 err = 0;
1603                                 break;
1604                         }
1605                         if (hash_extent_data_ref_item(leaf, dref) <
1606                             hash_extent_data_ref(root_objectid, owner, offset))
1607                                 break;
1608                 } else {
1609                         u64 ref_offset;
1610                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1611                         if (parent > 0) {
1612                                 if (parent == ref_offset) {
1613                                         err = 0;
1614                                         break;
1615                                 }
1616                                 if (ref_offset < parent)
1617                                         break;
1618                         } else {
1619                                 if (root_objectid == ref_offset) {
1620                                         err = 0;
1621                                         break;
1622                                 }
1623                                 if (ref_offset < root_objectid)
1624                                         break;
1625                         }
1626                 }
1627                 ptr += btrfs_extent_inline_ref_size(type);
1628         }
1629         if (err == -ENOENT && insert) {
1630                 if (item_size + extra_size >=
1631                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1632                         err = -EAGAIN;
1633                         goto out;
1634                 }
1635                 /*
1636                  * To add new inline back ref, we have to make sure
1637                  * there is no corresponding back ref item.
1638                  * For simplicity, we just do not add new inline back
1639                  * ref if there is any kind of item for this block
1640                  */
1641                 if (find_next_key(path, 0, &key) == 0 &&
1642                     key.objectid == bytenr &&
1643                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1644                         err = -EAGAIN;
1645                         goto out;
1646                 }
1647         }
1648         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1649 out:
1650         if (insert) {
1651                 path->keep_locks = 0;
1652                 btrfs_unlock_up_safe(path, 1);
1653         }
1654         return err;
1655 }
1656
1657 /*
1658  * helper to add new inline back ref
1659  */
1660 static noinline_for_stack
1661 void setup_inline_extent_backref(struct btrfs_root *root,
1662                                  struct btrfs_path *path,
1663                                  struct btrfs_extent_inline_ref *iref,
1664                                  u64 parent, u64 root_objectid,
1665                                  u64 owner, u64 offset, int refs_to_add,
1666                                  struct btrfs_delayed_extent_op *extent_op)
1667 {
1668         struct extent_buffer *leaf;
1669         struct btrfs_extent_item *ei;
1670         unsigned long ptr;
1671         unsigned long end;
1672         unsigned long item_offset;
1673         u64 refs;
1674         int size;
1675         int type;
1676
1677         leaf = path->nodes[0];
1678         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1679         item_offset = (unsigned long)iref - (unsigned long)ei;
1680
1681         type = extent_ref_type(parent, owner);
1682         size = btrfs_extent_inline_ref_size(type);
1683
1684         btrfs_extend_item(root, path, size);
1685
1686         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1687         refs = btrfs_extent_refs(leaf, ei);
1688         refs += refs_to_add;
1689         btrfs_set_extent_refs(leaf, ei, refs);
1690         if (extent_op)
1691                 __run_delayed_extent_op(extent_op, leaf, ei);
1692
1693         ptr = (unsigned long)ei + item_offset;
1694         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1695         if (ptr < end - size)
1696                 memmove_extent_buffer(leaf, ptr + size, ptr,
1697                                       end - size - ptr);
1698
1699         iref = (struct btrfs_extent_inline_ref *)ptr;
1700         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1701         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1702                 struct btrfs_extent_data_ref *dref;
1703                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1704                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1705                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1706                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1707                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1708         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1709                 struct btrfs_shared_data_ref *sref;
1710                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1711                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1712                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1713         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1714                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1715         } else {
1716                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1717         }
1718         btrfs_mark_buffer_dirty(leaf);
1719 }
1720
1721 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1722                                  struct btrfs_root *root,
1723                                  struct btrfs_path *path,
1724                                  struct btrfs_extent_inline_ref **ref_ret,
1725                                  u64 bytenr, u64 num_bytes, u64 parent,
1726                                  u64 root_objectid, u64 owner, u64 offset)
1727 {
1728         int ret;
1729
1730         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1731                                            bytenr, num_bytes, parent,
1732                                            root_objectid, owner, offset, 0);
1733         if (ret != -ENOENT)
1734                 return ret;
1735
1736         btrfs_release_path(path);
1737         *ref_ret = NULL;
1738
1739         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1740                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1741                                             root_objectid);
1742         } else {
1743                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1744                                              root_objectid, owner, offset);
1745         }
1746         return ret;
1747 }
1748
1749 /*
1750  * helper to update/remove inline back ref
1751  */
1752 static noinline_for_stack
1753 void update_inline_extent_backref(struct btrfs_root *root,
1754                                   struct btrfs_path *path,
1755                                   struct btrfs_extent_inline_ref *iref,
1756                                   int refs_to_mod,
1757                                   struct btrfs_delayed_extent_op *extent_op,
1758                                   int *last_ref)
1759 {
1760         struct extent_buffer *leaf;
1761         struct btrfs_extent_item *ei;
1762         struct btrfs_extent_data_ref *dref = NULL;
1763         struct btrfs_shared_data_ref *sref = NULL;
1764         unsigned long ptr;
1765         unsigned long end;
1766         u32 item_size;
1767         int size;
1768         int type;
1769         u64 refs;
1770
1771         leaf = path->nodes[0];
1772         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1773         refs = btrfs_extent_refs(leaf, ei);
1774         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1775         refs += refs_to_mod;
1776         btrfs_set_extent_refs(leaf, ei, refs);
1777         if (extent_op)
1778                 __run_delayed_extent_op(extent_op, leaf, ei);
1779
1780         type = btrfs_extent_inline_ref_type(leaf, iref);
1781
1782         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1783                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1784                 refs = btrfs_extent_data_ref_count(leaf, dref);
1785         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1786                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1787                 refs = btrfs_shared_data_ref_count(leaf, sref);
1788         } else {
1789                 refs = 1;
1790                 BUG_ON(refs_to_mod != -1);
1791         }
1792
1793         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1794         refs += refs_to_mod;
1795
1796         if (refs > 0) {
1797                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1798                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1799                 else
1800                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1801         } else {
1802                 *last_ref = 1;
1803                 size =  btrfs_extent_inline_ref_size(type);
1804                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1805                 ptr = (unsigned long)iref;
1806                 end = (unsigned long)ei + item_size;
1807                 if (ptr + size < end)
1808                         memmove_extent_buffer(leaf, ptr, ptr + size,
1809                                               end - ptr - size);
1810                 item_size -= size;
1811                 btrfs_truncate_item(root, path, item_size, 1);
1812         }
1813         btrfs_mark_buffer_dirty(leaf);
1814 }
1815
1816 static noinline_for_stack
1817 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1818                                  struct btrfs_root *root,
1819                                  struct btrfs_path *path,
1820                                  u64 bytenr, u64 num_bytes, u64 parent,
1821                                  u64 root_objectid, u64 owner,
1822                                  u64 offset, int refs_to_add,
1823                                  struct btrfs_delayed_extent_op *extent_op)
1824 {
1825         struct btrfs_extent_inline_ref *iref;
1826         int ret;
1827
1828         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1829                                            bytenr, num_bytes, parent,
1830                                            root_objectid, owner, offset, 1);
1831         if (ret == 0) {
1832                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1833                 update_inline_extent_backref(root, path, iref,
1834                                              refs_to_add, extent_op, NULL);
1835         } else if (ret == -ENOENT) {
1836                 setup_inline_extent_backref(root, path, iref, parent,
1837                                             root_objectid, owner, offset,
1838                                             refs_to_add, extent_op);
1839                 ret = 0;
1840         }
1841         return ret;
1842 }
1843
1844 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1845                                  struct btrfs_root *root,
1846                                  struct btrfs_path *path,
1847                                  u64 bytenr, u64 parent, u64 root_objectid,
1848                                  u64 owner, u64 offset, int refs_to_add)
1849 {
1850         int ret;
1851         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1852                 BUG_ON(refs_to_add != 1);
1853                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1854                                             parent, root_objectid);
1855         } else {
1856                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1857                                              parent, root_objectid,
1858                                              owner, offset, refs_to_add);
1859         }
1860         return ret;
1861 }
1862
1863 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1864                                  struct btrfs_root *root,
1865                                  struct btrfs_path *path,
1866                                  struct btrfs_extent_inline_ref *iref,
1867                                  int refs_to_drop, int is_data, int *last_ref)
1868 {
1869         int ret = 0;
1870
1871         BUG_ON(!is_data && refs_to_drop != 1);
1872         if (iref) {
1873                 update_inline_extent_backref(root, path, iref,
1874                                              -refs_to_drop, NULL, last_ref);
1875         } else if (is_data) {
1876                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1877                                              last_ref);
1878         } else {
1879                 *last_ref = 1;
1880                 ret = btrfs_del_item(trans, root, path);
1881         }
1882         return ret;
1883 }
1884
1885 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1886 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1887                                u64 *discarded_bytes)
1888 {
1889         int j, ret = 0;
1890         u64 bytes_left, end;
1891         u64 aligned_start = ALIGN(start, 1 << 9);
1892
1893         if (WARN_ON(start != aligned_start)) {
1894                 len -= aligned_start - start;
1895                 len = round_down(len, 1 << 9);
1896                 start = aligned_start;
1897         }
1898
1899         *discarded_bytes = 0;
1900
1901         if (!len)
1902                 return 0;
1903
1904         end = start + len;
1905         bytes_left = len;
1906
1907         /* Skip any superblocks on this device. */
1908         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1909                 u64 sb_start = btrfs_sb_offset(j);
1910                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1911                 u64 size = sb_start - start;
1912
1913                 if (!in_range(sb_start, start, bytes_left) &&
1914                     !in_range(sb_end, start, bytes_left) &&
1915                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1916                         continue;
1917
1918                 /*
1919                  * Superblock spans beginning of range.  Adjust start and
1920                  * try again.
1921                  */
1922                 if (sb_start <= start) {
1923                         start += sb_end - start;
1924                         if (start > end) {
1925                                 bytes_left = 0;
1926                                 break;
1927                         }
1928                         bytes_left = end - start;
1929                         continue;
1930                 }
1931
1932                 if (size) {
1933                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1934                                                    GFP_NOFS, 0);
1935                         if (!ret)
1936                                 *discarded_bytes += size;
1937                         else if (ret != -EOPNOTSUPP)
1938                                 return ret;
1939                 }
1940
1941                 start = sb_end;
1942                 if (start > end) {
1943                         bytes_left = 0;
1944                         break;
1945                 }
1946                 bytes_left = end - start;
1947         }
1948
1949         if (bytes_left) {
1950                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
1951                                            GFP_NOFS, 0);
1952                 if (!ret)
1953                         *discarded_bytes += bytes_left;
1954         }
1955         return ret;
1956 }
1957
1958 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1959                          u64 num_bytes, u64 *actual_bytes)
1960 {
1961         int ret;
1962         u64 discarded_bytes = 0;
1963         struct btrfs_bio *bbio = NULL;
1964
1965
1966         /* Tell the block device(s) that the sectors can be discarded */
1967         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1968                               bytenr, &num_bytes, &bbio, 0);
1969         /* Error condition is -ENOMEM */
1970         if (!ret) {
1971                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1972                 int i;
1973
1974
1975                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1976                         u64 bytes;
1977                         if (!stripe->dev->can_discard)
1978                                 continue;
1979
1980                         ret = btrfs_issue_discard(stripe->dev->bdev,
1981                                                   stripe->physical,
1982                                                   stripe->length,
1983                                                   &bytes);
1984                         if (!ret)
1985                                 discarded_bytes += bytes;
1986                         else if (ret != -EOPNOTSUPP)
1987                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1988
1989                         /*
1990                          * Just in case we get back EOPNOTSUPP for some reason,
1991                          * just ignore the return value so we don't screw up
1992                          * people calling discard_extent.
1993                          */
1994                         ret = 0;
1995                 }
1996                 btrfs_put_bbio(bbio);
1997         }
1998
1999         if (actual_bytes)
2000                 *actual_bytes = discarded_bytes;
2001
2002
2003         if (ret == -EOPNOTSUPP)
2004                 ret = 0;
2005         return ret;
2006 }
2007
2008 /* Can return -ENOMEM */
2009 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2010                          struct btrfs_root *root,
2011                          u64 bytenr, u64 num_bytes, u64 parent,
2012                          u64 root_objectid, u64 owner, u64 offset,
2013                          int no_quota)
2014 {
2015         int ret;
2016         struct btrfs_fs_info *fs_info = root->fs_info;
2017
2018         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2019                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2020
2021         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2022                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2023                                         num_bytes,
2024                                         parent, root_objectid, (int)owner,
2025                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2026         } else {
2027                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2028                                         num_bytes,
2029                                         parent, root_objectid, owner, offset,
2030                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2031         }
2032         return ret;
2033 }
2034
2035 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2036                                   struct btrfs_root *root,
2037                                   struct btrfs_delayed_ref_node *node,
2038                                   u64 parent, u64 root_objectid,
2039                                   u64 owner, u64 offset, int refs_to_add,
2040                                   struct btrfs_delayed_extent_op *extent_op)
2041 {
2042         struct btrfs_fs_info *fs_info = root->fs_info;
2043         struct btrfs_path *path;
2044         struct extent_buffer *leaf;
2045         struct btrfs_extent_item *item;
2046         struct btrfs_key key;
2047         u64 bytenr = node->bytenr;
2048         u64 num_bytes = node->num_bytes;
2049         u64 refs;
2050         int ret;
2051         int no_quota = node->no_quota;
2052
2053         path = btrfs_alloc_path();
2054         if (!path)
2055                 return -ENOMEM;
2056
2057         if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
2058                 no_quota = 1;
2059
2060         path->reada = 1;
2061         path->leave_spinning = 1;
2062         /* this will setup the path even if it fails to insert the back ref */
2063         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2064                                            bytenr, num_bytes, parent,
2065                                            root_objectid, owner, offset,
2066                                            refs_to_add, extent_op);
2067         if ((ret < 0 && ret != -EAGAIN) || !ret)
2068                 goto out;
2069
2070         /*
2071          * Ok we had -EAGAIN which means we didn't have space to insert and
2072          * inline extent ref, so just update the reference count and add a
2073          * normal backref.
2074          */
2075         leaf = path->nodes[0];
2076         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2077         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2078         refs = btrfs_extent_refs(leaf, item);
2079         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2080         if (extent_op)
2081                 __run_delayed_extent_op(extent_op, leaf, item);
2082
2083         btrfs_mark_buffer_dirty(leaf);
2084         btrfs_release_path(path);
2085
2086         path->reada = 1;
2087         path->leave_spinning = 1;
2088         /* now insert the actual backref */
2089         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2090                                     path, bytenr, parent, root_objectid,
2091                                     owner, offset, refs_to_add);
2092         if (ret)
2093                 btrfs_abort_transaction(trans, root, ret);
2094 out:
2095         btrfs_free_path(path);
2096         return ret;
2097 }
2098
2099 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2100                                 struct btrfs_root *root,
2101                                 struct btrfs_delayed_ref_node *node,
2102                                 struct btrfs_delayed_extent_op *extent_op,
2103                                 int insert_reserved)
2104 {
2105         int ret = 0;
2106         struct btrfs_delayed_data_ref *ref;
2107         struct btrfs_key ins;
2108         u64 parent = 0;
2109         u64 ref_root = 0;
2110         u64 flags = 0;
2111
2112         ins.objectid = node->bytenr;
2113         ins.offset = node->num_bytes;
2114         ins.type = BTRFS_EXTENT_ITEM_KEY;
2115
2116         ref = btrfs_delayed_node_to_data_ref(node);
2117         trace_run_delayed_data_ref(node, ref, node->action);
2118
2119         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2120                 parent = ref->parent;
2121         ref_root = ref->root;
2122
2123         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2124                 if (extent_op)
2125                         flags |= extent_op->flags_to_set;
2126                 ret = alloc_reserved_file_extent(trans, root,
2127                                                  parent, ref_root, flags,
2128                                                  ref->objectid, ref->offset,
2129                                                  &ins, node->ref_mod);
2130         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2131                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2132                                              ref_root, ref->objectid,
2133                                              ref->offset, node->ref_mod,
2134                                              extent_op);
2135         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2136                 ret = __btrfs_free_extent(trans, root, node, parent,
2137                                           ref_root, ref->objectid,
2138                                           ref->offset, node->ref_mod,
2139                                           extent_op);
2140         } else {
2141                 BUG();
2142         }
2143         return ret;
2144 }
2145
2146 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2147                                     struct extent_buffer *leaf,
2148                                     struct btrfs_extent_item *ei)
2149 {
2150         u64 flags = btrfs_extent_flags(leaf, ei);
2151         if (extent_op->update_flags) {
2152                 flags |= extent_op->flags_to_set;
2153                 btrfs_set_extent_flags(leaf, ei, flags);
2154         }
2155
2156         if (extent_op->update_key) {
2157                 struct btrfs_tree_block_info *bi;
2158                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2159                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2160                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2161         }
2162 }
2163
2164 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2165                                  struct btrfs_root *root,
2166                                  struct btrfs_delayed_ref_node *node,
2167                                  struct btrfs_delayed_extent_op *extent_op)
2168 {
2169         struct btrfs_key key;
2170         struct btrfs_path *path;
2171         struct btrfs_extent_item *ei;
2172         struct extent_buffer *leaf;
2173         u32 item_size;
2174         int ret;
2175         int err = 0;
2176         int metadata = !extent_op->is_data;
2177
2178         if (trans->aborted)
2179                 return 0;
2180
2181         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2182                 metadata = 0;
2183
2184         path = btrfs_alloc_path();
2185         if (!path)
2186                 return -ENOMEM;
2187
2188         key.objectid = node->bytenr;
2189
2190         if (metadata) {
2191                 key.type = BTRFS_METADATA_ITEM_KEY;
2192                 key.offset = extent_op->level;
2193         } else {
2194                 key.type = BTRFS_EXTENT_ITEM_KEY;
2195                 key.offset = node->num_bytes;
2196         }
2197
2198 again:
2199         path->reada = 1;
2200         path->leave_spinning = 1;
2201         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2202                                 path, 0, 1);
2203         if (ret < 0) {
2204                 err = ret;
2205                 goto out;
2206         }
2207         if (ret > 0) {
2208                 if (metadata) {
2209                         if (path->slots[0] > 0) {
2210                                 path->slots[0]--;
2211                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2212                                                       path->slots[0]);
2213                                 if (key.objectid == node->bytenr &&
2214                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2215                                     key.offset == node->num_bytes)
2216                                         ret = 0;
2217                         }
2218                         if (ret > 0) {
2219                                 btrfs_release_path(path);
2220                                 metadata = 0;
2221
2222                                 key.objectid = node->bytenr;
2223                                 key.offset = node->num_bytes;
2224                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2225                                 goto again;
2226                         }
2227                 } else {
2228                         err = -EIO;
2229                         goto out;
2230                 }
2231         }
2232
2233         leaf = path->nodes[0];
2234         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2235 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2236         if (item_size < sizeof(*ei)) {
2237                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2238                                              path, (u64)-1, 0);
2239                 if (ret < 0) {
2240                         err = ret;
2241                         goto out;
2242                 }
2243                 leaf = path->nodes[0];
2244                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2245         }
2246 #endif
2247         BUG_ON(item_size < sizeof(*ei));
2248         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2249         __run_delayed_extent_op(extent_op, leaf, ei);
2250
2251         btrfs_mark_buffer_dirty(leaf);
2252 out:
2253         btrfs_free_path(path);
2254         return err;
2255 }
2256
2257 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2258                                 struct btrfs_root *root,
2259                                 struct btrfs_delayed_ref_node *node,
2260                                 struct btrfs_delayed_extent_op *extent_op,
2261                                 int insert_reserved)
2262 {
2263         int ret = 0;
2264         struct btrfs_delayed_tree_ref *ref;
2265         struct btrfs_key ins;
2266         u64 parent = 0;
2267         u64 ref_root = 0;
2268         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2269                                                  SKINNY_METADATA);
2270
2271         ref = btrfs_delayed_node_to_tree_ref(node);
2272         trace_run_delayed_tree_ref(node, ref, node->action);
2273
2274         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2275                 parent = ref->parent;
2276         ref_root = ref->root;
2277
2278         ins.objectid = node->bytenr;
2279         if (skinny_metadata) {
2280                 ins.offset = ref->level;
2281                 ins.type = BTRFS_METADATA_ITEM_KEY;
2282         } else {
2283                 ins.offset = node->num_bytes;
2284                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2285         }
2286
2287         BUG_ON(node->ref_mod != 1);
2288         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2289                 BUG_ON(!extent_op || !extent_op->update_flags);
2290                 ret = alloc_reserved_tree_block(trans, root,
2291                                                 parent, ref_root,
2292                                                 extent_op->flags_to_set,
2293                                                 &extent_op->key,
2294                                                 ref->level, &ins,
2295                                                 node->no_quota);
2296         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2297                 ret = __btrfs_inc_extent_ref(trans, root, node,
2298                                              parent, ref_root,
2299                                              ref->level, 0, 1,
2300                                              extent_op);
2301         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2302                 ret = __btrfs_free_extent(trans, root, node,
2303                                           parent, ref_root,
2304                                           ref->level, 0, 1, extent_op);
2305         } else {
2306                 BUG();
2307         }
2308         return ret;
2309 }
2310
2311 /* helper function to actually process a single delayed ref entry */
2312 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2313                                struct btrfs_root *root,
2314                                struct btrfs_delayed_ref_node *node,
2315                                struct btrfs_delayed_extent_op *extent_op,
2316                                int insert_reserved)
2317 {
2318         int ret = 0;
2319
2320         if (trans->aborted) {
2321                 if (insert_reserved)
2322                         btrfs_pin_extent(root, node->bytenr,
2323                                          node->num_bytes, 1);
2324                 return 0;
2325         }
2326
2327         if (btrfs_delayed_ref_is_head(node)) {
2328                 struct btrfs_delayed_ref_head *head;
2329                 /*
2330                  * we've hit the end of the chain and we were supposed
2331                  * to insert this extent into the tree.  But, it got
2332                  * deleted before we ever needed to insert it, so all
2333                  * we have to do is clean up the accounting
2334                  */
2335                 BUG_ON(extent_op);
2336                 head = btrfs_delayed_node_to_head(node);
2337                 trace_run_delayed_ref_head(node, head, node->action);
2338
2339                 if (insert_reserved) {
2340                         btrfs_pin_extent(root, node->bytenr,
2341                                          node->num_bytes, 1);
2342                         if (head->is_data) {
2343                                 ret = btrfs_del_csums(trans, root,
2344                                                       node->bytenr,
2345                                                       node->num_bytes);
2346                         }
2347                 }
2348                 return ret;
2349         }
2350
2351         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2352             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2353                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2354                                            insert_reserved);
2355         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2356                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2357                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2358                                            insert_reserved);
2359         else
2360                 BUG();
2361         return ret;
2362 }
2363
2364 static inline struct btrfs_delayed_ref_node *
2365 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2366 {
2367         struct btrfs_delayed_ref_node *ref;
2368
2369         if (list_empty(&head->ref_list))
2370                 return NULL;
2371
2372         /*
2373          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2374          * This is to prevent a ref count from going down to zero, which deletes
2375          * the extent item from the extent tree, when there still are references
2376          * to add, which would fail because they would not find the extent item.
2377          */
2378         list_for_each_entry(ref, &head->ref_list, list) {
2379                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2380                         return ref;
2381         }
2382
2383         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2384                           list);
2385 }
2386
2387 /*
2388  * Returns 0 on success or if called with an already aborted transaction.
2389  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2390  */
2391 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2392                                              struct btrfs_root *root,
2393                                              unsigned long nr)
2394 {
2395         struct btrfs_delayed_ref_root *delayed_refs;
2396         struct btrfs_delayed_ref_node *ref;
2397         struct btrfs_delayed_ref_head *locked_ref = NULL;
2398         struct btrfs_delayed_extent_op *extent_op;
2399         struct btrfs_fs_info *fs_info = root->fs_info;
2400         ktime_t start = ktime_get();
2401         int ret;
2402         unsigned long count = 0;
2403         unsigned long actual_count = 0;
2404         int must_insert_reserved = 0;
2405
2406         delayed_refs = &trans->transaction->delayed_refs;
2407         while (1) {
2408                 if (!locked_ref) {
2409                         if (count >= nr)
2410                                 break;
2411
2412                         spin_lock(&delayed_refs->lock);
2413                         locked_ref = btrfs_select_ref_head(trans);
2414                         if (!locked_ref) {
2415                                 spin_unlock(&delayed_refs->lock);
2416                                 break;
2417                         }
2418
2419                         /* grab the lock that says we are going to process
2420                          * all the refs for this head */
2421                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2422                         spin_unlock(&delayed_refs->lock);
2423                         /*
2424                          * we may have dropped the spin lock to get the head
2425                          * mutex lock, and that might have given someone else
2426                          * time to free the head.  If that's true, it has been
2427                          * removed from our list and we can move on.
2428                          */
2429                         if (ret == -EAGAIN) {
2430                                 locked_ref = NULL;
2431                                 count++;
2432                                 continue;
2433                         }
2434                 }
2435
2436                 spin_lock(&locked_ref->lock);
2437
2438                 /*
2439                  * locked_ref is the head node, so we have to go one
2440                  * node back for any delayed ref updates
2441                  */
2442                 ref = select_delayed_ref(locked_ref);
2443
2444                 if (ref && ref->seq &&
2445                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2446                         spin_unlock(&locked_ref->lock);
2447                         btrfs_delayed_ref_unlock(locked_ref);
2448                         spin_lock(&delayed_refs->lock);
2449                         locked_ref->processing = 0;
2450                         delayed_refs->num_heads_ready++;
2451                         spin_unlock(&delayed_refs->lock);
2452                         locked_ref = NULL;
2453                         cond_resched();
2454                         count++;
2455                         continue;
2456                 }
2457
2458                 /*
2459                  * record the must insert reserved flag before we
2460                  * drop the spin lock.
2461                  */
2462                 must_insert_reserved = locked_ref->must_insert_reserved;
2463                 locked_ref->must_insert_reserved = 0;
2464
2465                 extent_op = locked_ref->extent_op;
2466                 locked_ref->extent_op = NULL;
2467
2468                 if (!ref) {
2469
2470
2471                         /* All delayed refs have been processed, Go ahead
2472                          * and send the head node to run_one_delayed_ref,
2473                          * so that any accounting fixes can happen
2474                          */
2475                         ref = &locked_ref->node;
2476
2477                         if (extent_op && must_insert_reserved) {
2478                                 btrfs_free_delayed_extent_op(extent_op);
2479                                 extent_op = NULL;
2480                         }
2481
2482                         if (extent_op) {
2483                                 spin_unlock(&locked_ref->lock);
2484                                 ret = run_delayed_extent_op(trans, root,
2485                                                             ref, extent_op);
2486                                 btrfs_free_delayed_extent_op(extent_op);
2487
2488                                 if (ret) {
2489                                         /*
2490                                          * Need to reset must_insert_reserved if
2491                                          * there was an error so the abort stuff
2492                                          * can cleanup the reserved space
2493                                          * properly.
2494                                          */
2495                                         if (must_insert_reserved)
2496                                                 locked_ref->must_insert_reserved = 1;
2497                                         locked_ref->processing = 0;
2498                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2499                                         btrfs_delayed_ref_unlock(locked_ref);
2500                                         return ret;
2501                                 }
2502                                 continue;
2503                         }
2504
2505                         /*
2506                          * Need to drop our head ref lock and re-aqcuire the
2507                          * delayed ref lock and then re-check to make sure
2508                          * nobody got added.
2509                          */
2510                         spin_unlock(&locked_ref->lock);
2511                         spin_lock(&delayed_refs->lock);
2512                         spin_lock(&locked_ref->lock);
2513                         if (!list_empty(&locked_ref->ref_list) ||
2514                             locked_ref->extent_op) {
2515                                 spin_unlock(&locked_ref->lock);
2516                                 spin_unlock(&delayed_refs->lock);
2517                                 continue;
2518                         }
2519                         ref->in_tree = 0;
2520                         delayed_refs->num_heads--;
2521                         rb_erase(&locked_ref->href_node,
2522                                  &delayed_refs->href_root);
2523                         spin_unlock(&delayed_refs->lock);
2524                 } else {
2525                         actual_count++;
2526                         ref->in_tree = 0;
2527                         list_del(&ref->list);
2528                 }
2529                 atomic_dec(&delayed_refs->num_entries);
2530
2531                 if (!btrfs_delayed_ref_is_head(ref)) {
2532                         /*
2533                          * when we play the delayed ref, also correct the
2534                          * ref_mod on head
2535                          */
2536                         switch (ref->action) {
2537                         case BTRFS_ADD_DELAYED_REF:
2538                         case BTRFS_ADD_DELAYED_EXTENT:
2539                                 locked_ref->node.ref_mod -= ref->ref_mod;
2540                                 break;
2541                         case BTRFS_DROP_DELAYED_REF:
2542                                 locked_ref->node.ref_mod += ref->ref_mod;
2543                                 break;
2544                         default:
2545                                 WARN_ON(1);
2546                         }
2547                 }
2548                 spin_unlock(&locked_ref->lock);
2549
2550                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2551                                           must_insert_reserved);
2552
2553                 btrfs_free_delayed_extent_op(extent_op);
2554                 if (ret) {
2555                         locked_ref->processing = 0;
2556                         btrfs_delayed_ref_unlock(locked_ref);
2557                         btrfs_put_delayed_ref(ref);
2558                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2559                         return ret;
2560                 }
2561
2562                 /*
2563                  * If this node is a head, that means all the refs in this head
2564                  * have been dealt with, and we will pick the next head to deal
2565                  * with, so we must unlock the head and drop it from the cluster
2566                  * list before we release it.
2567                  */
2568                 if (btrfs_delayed_ref_is_head(ref)) {
2569                         if (locked_ref->is_data &&
2570                             locked_ref->total_ref_mod < 0) {
2571                                 spin_lock(&delayed_refs->lock);
2572                                 delayed_refs->pending_csums -= ref->num_bytes;
2573                                 spin_unlock(&delayed_refs->lock);
2574                         }
2575                         btrfs_delayed_ref_unlock(locked_ref);
2576                         locked_ref = NULL;
2577                 }
2578                 btrfs_put_delayed_ref(ref);
2579                 count++;
2580                 cond_resched();
2581         }
2582
2583         /*
2584          * We don't want to include ref heads since we can have empty ref heads
2585          * and those will drastically skew our runtime down since we just do
2586          * accounting, no actual extent tree updates.
2587          */
2588         if (actual_count > 0) {
2589                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2590                 u64 avg;
2591
2592                 /*
2593                  * We weigh the current average higher than our current runtime
2594                  * to avoid large swings in the average.
2595                  */
2596                 spin_lock(&delayed_refs->lock);
2597                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2598                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2599                 spin_unlock(&delayed_refs->lock);
2600         }
2601         return 0;
2602 }
2603
2604 #ifdef SCRAMBLE_DELAYED_REFS
2605 /*
2606  * Normally delayed refs get processed in ascending bytenr order. This
2607  * correlates in most cases to the order added. To expose dependencies on this
2608  * order, we start to process the tree in the middle instead of the beginning
2609  */
2610 static u64 find_middle(struct rb_root *root)
2611 {
2612         struct rb_node *n = root->rb_node;
2613         struct btrfs_delayed_ref_node *entry;
2614         int alt = 1;
2615         u64 middle;
2616         u64 first = 0, last = 0;
2617
2618         n = rb_first(root);
2619         if (n) {
2620                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2621                 first = entry->bytenr;
2622         }
2623         n = rb_last(root);
2624         if (n) {
2625                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2626                 last = entry->bytenr;
2627         }
2628         n = root->rb_node;
2629
2630         while (n) {
2631                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2632                 WARN_ON(!entry->in_tree);
2633
2634                 middle = entry->bytenr;
2635
2636                 if (alt)
2637                         n = n->rb_left;
2638                 else
2639                         n = n->rb_right;
2640
2641                 alt = 1 - alt;
2642         }
2643         return middle;
2644 }
2645 #endif
2646
2647 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2648 {
2649         u64 num_bytes;
2650
2651         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2652                              sizeof(struct btrfs_extent_inline_ref));
2653         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2654                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2655
2656         /*
2657          * We don't ever fill up leaves all the way so multiply by 2 just to be
2658          * closer to what we're really going to want to ouse.
2659          */
2660         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2661 }
2662
2663 /*
2664  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2665  * would require to store the csums for that many bytes.
2666  */
2667 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2668 {
2669         u64 csum_size;
2670         u64 num_csums_per_leaf;
2671         u64 num_csums;
2672
2673         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2674         num_csums_per_leaf = div64_u64(csum_size,
2675                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2676         num_csums = div64_u64(csum_bytes, root->sectorsize);
2677         num_csums += num_csums_per_leaf - 1;
2678         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2679         return num_csums;
2680 }
2681
2682 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2683                                        struct btrfs_root *root)
2684 {
2685         struct btrfs_block_rsv *global_rsv;
2686         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2687         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2688         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2689         u64 num_bytes, num_dirty_bgs_bytes;
2690         int ret = 0;
2691
2692         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2693         num_heads = heads_to_leaves(root, num_heads);
2694         if (num_heads > 1)
2695                 num_bytes += (num_heads - 1) * root->nodesize;
2696         num_bytes <<= 1;
2697         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2698         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2699                                                              num_dirty_bgs);
2700         global_rsv = &root->fs_info->global_block_rsv;
2701
2702         /*
2703          * If we can't allocate any more chunks lets make sure we have _lots_ of
2704          * wiggle room since running delayed refs can create more delayed refs.
2705          */
2706         if (global_rsv->space_info->full) {
2707                 num_dirty_bgs_bytes <<= 1;
2708                 num_bytes <<= 1;
2709         }
2710
2711         spin_lock(&global_rsv->lock);
2712         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2713                 ret = 1;
2714         spin_unlock(&global_rsv->lock);
2715         return ret;
2716 }
2717
2718 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2719                                        struct btrfs_root *root)
2720 {
2721         struct btrfs_fs_info *fs_info = root->fs_info;
2722         u64 num_entries =
2723                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2724         u64 avg_runtime;
2725         u64 val;
2726
2727         smp_mb();
2728         avg_runtime = fs_info->avg_delayed_ref_runtime;
2729         val = num_entries * avg_runtime;
2730         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2731                 return 1;
2732         if (val >= NSEC_PER_SEC / 2)
2733                 return 2;
2734
2735         return btrfs_check_space_for_delayed_refs(trans, root);
2736 }
2737
2738 struct async_delayed_refs {
2739         struct btrfs_root *root;
2740         int count;
2741         int error;
2742         int sync;
2743         struct completion wait;
2744         struct btrfs_work work;
2745 };
2746
2747 static void delayed_ref_async_start(struct btrfs_work *work)
2748 {
2749         struct async_delayed_refs *async;
2750         struct btrfs_trans_handle *trans;
2751         int ret;
2752
2753         async = container_of(work, struct async_delayed_refs, work);
2754
2755         trans = btrfs_join_transaction(async->root);
2756         if (IS_ERR(trans)) {
2757                 async->error = PTR_ERR(trans);
2758                 goto done;
2759         }
2760
2761         /*
2762          * trans->sync means that when we call end_transaciton, we won't
2763          * wait on delayed refs
2764          */
2765         trans->sync = true;
2766         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2767         if (ret)
2768                 async->error = ret;
2769
2770         ret = btrfs_end_transaction(trans, async->root);
2771         if (ret && !async->error)
2772                 async->error = ret;
2773 done:
2774         if (async->sync)
2775                 complete(&async->wait);
2776         else
2777                 kfree(async);
2778 }
2779
2780 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2781                                  unsigned long count, int wait)
2782 {
2783         struct async_delayed_refs *async;
2784         int ret;
2785
2786         async = kmalloc(sizeof(*async), GFP_NOFS);
2787         if (!async)
2788                 return -ENOMEM;
2789
2790         async->root = root->fs_info->tree_root;
2791         async->count = count;
2792         async->error = 0;
2793         if (wait)
2794                 async->sync = 1;
2795         else
2796                 async->sync = 0;
2797         init_completion(&async->wait);
2798
2799         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2800                         delayed_ref_async_start, NULL, NULL);
2801
2802         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2803
2804         if (wait) {
2805                 wait_for_completion(&async->wait);
2806                 ret = async->error;
2807                 kfree(async);
2808                 return ret;
2809         }
2810         return 0;
2811 }
2812
2813 /*
2814  * this starts processing the delayed reference count updates and
2815  * extent insertions we have queued up so far.  count can be
2816  * 0, which means to process everything in the tree at the start
2817  * of the run (but not newly added entries), or it can be some target
2818  * number you'd like to process.
2819  *
2820  * Returns 0 on success or if called with an aborted transaction
2821  * Returns <0 on error and aborts the transaction
2822  */
2823 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2824                            struct btrfs_root *root, unsigned long count)
2825 {
2826         struct rb_node *node;
2827         struct btrfs_delayed_ref_root *delayed_refs;
2828         struct btrfs_delayed_ref_head *head;
2829         int ret;
2830         int run_all = count == (unsigned long)-1;
2831
2832         /* We'll clean this up in btrfs_cleanup_transaction */
2833         if (trans->aborted)
2834                 return 0;
2835
2836         if (root == root->fs_info->extent_root)
2837                 root = root->fs_info->tree_root;
2838
2839         delayed_refs = &trans->transaction->delayed_refs;
2840         if (count == 0)
2841                 count = atomic_read(&delayed_refs->num_entries) * 2;
2842
2843 again:
2844 #ifdef SCRAMBLE_DELAYED_REFS
2845         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2846 #endif
2847         ret = __btrfs_run_delayed_refs(trans, root, count);
2848         if (ret < 0) {
2849                 btrfs_abort_transaction(trans, root, ret);
2850                 return ret;
2851         }
2852
2853         if (run_all) {
2854                 if (!list_empty(&trans->new_bgs))
2855                         btrfs_create_pending_block_groups(trans, root);
2856
2857                 spin_lock(&delayed_refs->lock);
2858                 node = rb_first(&delayed_refs->href_root);
2859                 if (!node) {
2860                         spin_unlock(&delayed_refs->lock);
2861                         goto out;
2862                 }
2863                 count = (unsigned long)-1;
2864
2865                 while (node) {
2866                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2867                                         href_node);
2868                         if (btrfs_delayed_ref_is_head(&head->node)) {
2869                                 struct btrfs_delayed_ref_node *ref;
2870
2871                                 ref = &head->node;
2872                                 atomic_inc(&ref->refs);
2873
2874                                 spin_unlock(&delayed_refs->lock);
2875                                 /*
2876                                  * Mutex was contended, block until it's
2877                                  * released and try again
2878                                  */
2879                                 mutex_lock(&head->mutex);
2880                                 mutex_unlock(&head->mutex);
2881
2882                                 btrfs_put_delayed_ref(ref);
2883                                 cond_resched();
2884                                 goto again;
2885                         } else {
2886                                 WARN_ON(1);
2887                         }
2888                         node = rb_next(node);
2889                 }
2890                 spin_unlock(&delayed_refs->lock);
2891                 cond_resched();
2892                 goto again;
2893         }
2894 out:
2895         assert_qgroups_uptodate(trans);
2896         return 0;
2897 }
2898
2899 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2900                                 struct btrfs_root *root,
2901                                 u64 bytenr, u64 num_bytes, u64 flags,
2902                                 int level, int is_data)
2903 {
2904         struct btrfs_delayed_extent_op *extent_op;
2905         int ret;
2906
2907         extent_op = btrfs_alloc_delayed_extent_op();
2908         if (!extent_op)
2909                 return -ENOMEM;
2910
2911         extent_op->flags_to_set = flags;
2912         extent_op->update_flags = 1;
2913         extent_op->update_key = 0;
2914         extent_op->is_data = is_data ? 1 : 0;
2915         extent_op->level = level;
2916
2917         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2918                                           num_bytes, extent_op);
2919         if (ret)
2920                 btrfs_free_delayed_extent_op(extent_op);
2921         return ret;
2922 }
2923
2924 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2925                                       struct btrfs_root *root,
2926                                       struct btrfs_path *path,
2927                                       u64 objectid, u64 offset, u64 bytenr)
2928 {
2929         struct btrfs_delayed_ref_head *head;
2930         struct btrfs_delayed_ref_node *ref;
2931         struct btrfs_delayed_data_ref *data_ref;
2932         struct btrfs_delayed_ref_root *delayed_refs;
2933         int ret = 0;
2934
2935         delayed_refs = &trans->transaction->delayed_refs;
2936         spin_lock(&delayed_refs->lock);
2937         head = btrfs_find_delayed_ref_head(trans, bytenr);
2938         if (!head) {
2939                 spin_unlock(&delayed_refs->lock);
2940                 return 0;
2941         }
2942
2943         if (!mutex_trylock(&head->mutex)) {
2944                 atomic_inc(&head->node.refs);
2945                 spin_unlock(&delayed_refs->lock);
2946
2947                 btrfs_release_path(path);
2948
2949                 /*
2950                  * Mutex was contended, block until it's released and let
2951                  * caller try again
2952                  */
2953                 mutex_lock(&head->mutex);
2954                 mutex_unlock(&head->mutex);
2955                 btrfs_put_delayed_ref(&head->node);
2956                 return -EAGAIN;
2957         }
2958         spin_unlock(&delayed_refs->lock);
2959
2960         spin_lock(&head->lock);
2961         list_for_each_entry(ref, &head->ref_list, list) {
2962                 /* If it's a shared ref we know a cross reference exists */
2963                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2964                         ret = 1;
2965                         break;
2966                 }
2967
2968                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2969
2970                 /*
2971                  * If our ref doesn't match the one we're currently looking at
2972                  * then we have a cross reference.
2973                  */
2974                 if (data_ref->root != root->root_key.objectid ||
2975                     data_ref->objectid != objectid ||
2976                     data_ref->offset != offset) {
2977                         ret = 1;
2978                         break;
2979                 }
2980         }
2981         spin_unlock(&head->lock);
2982         mutex_unlock(&head->mutex);
2983         return ret;
2984 }
2985
2986 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2987                                         struct btrfs_root *root,
2988                                         struct btrfs_path *path,
2989                                         u64 objectid, u64 offset, u64 bytenr)
2990 {
2991         struct btrfs_root *extent_root = root->fs_info->extent_root;
2992         struct extent_buffer *leaf;
2993         struct btrfs_extent_data_ref *ref;
2994         struct btrfs_extent_inline_ref *iref;
2995         struct btrfs_extent_item *ei;
2996         struct btrfs_key key;
2997         u32 item_size;
2998         int ret;
2999
3000         key.objectid = bytenr;
3001         key.offset = (u64)-1;
3002         key.type = BTRFS_EXTENT_ITEM_KEY;
3003
3004         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3005         if (ret < 0)
3006                 goto out;
3007         BUG_ON(ret == 0); /* Corruption */
3008
3009         ret = -ENOENT;
3010         if (path->slots[0] == 0)
3011                 goto out;
3012
3013         path->slots[0]--;
3014         leaf = path->nodes[0];
3015         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3016
3017         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3018                 goto out;
3019
3020         ret = 1;
3021         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3022 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3023         if (item_size < sizeof(*ei)) {
3024                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3025                 goto out;
3026         }
3027 #endif
3028         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3029
3030         if (item_size != sizeof(*ei) +
3031             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3032                 goto out;
3033
3034         if (btrfs_extent_generation(leaf, ei) <=
3035             btrfs_root_last_snapshot(&root->root_item))
3036                 goto out;
3037
3038         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3039         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3040             BTRFS_EXTENT_DATA_REF_KEY)
3041                 goto out;
3042
3043         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3044         if (btrfs_extent_refs(leaf, ei) !=
3045             btrfs_extent_data_ref_count(leaf, ref) ||
3046             btrfs_extent_data_ref_root(leaf, ref) !=
3047             root->root_key.objectid ||
3048             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3049             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3050                 goto out;
3051
3052         ret = 0;
3053 out:
3054         return ret;
3055 }
3056
3057 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3058                           struct btrfs_root *root,
3059                           u64 objectid, u64 offset, u64 bytenr)
3060 {
3061         struct btrfs_path *path;
3062         int ret;
3063         int ret2;
3064
3065         path = btrfs_alloc_path();
3066         if (!path)
3067                 return -ENOENT;
3068
3069         do {
3070                 ret = check_committed_ref(trans, root, path, objectid,
3071                                           offset, bytenr);
3072                 if (ret && ret != -ENOENT)
3073                         goto out;
3074
3075                 ret2 = check_delayed_ref(trans, root, path, objectid,
3076                                          offset, bytenr);
3077         } while (ret2 == -EAGAIN);
3078
3079         if (ret2 && ret2 != -ENOENT) {
3080                 ret = ret2;
3081                 goto out;
3082         }
3083
3084         if (ret != -ENOENT || ret2 != -ENOENT)
3085                 ret = 0;
3086 out:
3087         btrfs_free_path(path);
3088         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3089                 WARN_ON(ret > 0);
3090         return ret;
3091 }
3092
3093 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3094                            struct btrfs_root *root,
3095                            struct extent_buffer *buf,
3096                            int full_backref, int inc)
3097 {
3098         u64 bytenr;
3099         u64 num_bytes;
3100         u64 parent;
3101         u64 ref_root;
3102         u32 nritems;
3103         struct btrfs_key key;
3104         struct btrfs_file_extent_item *fi;
3105         int i;
3106         int level;
3107         int ret = 0;
3108         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3109                             u64, u64, u64, u64, u64, u64, int);
3110
3111
3112         if (btrfs_test_is_dummy_root(root))
3113                 return 0;
3114
3115         ref_root = btrfs_header_owner(buf);
3116         nritems = btrfs_header_nritems(buf);
3117         level = btrfs_header_level(buf);
3118
3119         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3120                 return 0;
3121
3122         if (inc)
3123                 process_func = btrfs_inc_extent_ref;
3124         else
3125                 process_func = btrfs_free_extent;
3126
3127         if (full_backref)
3128                 parent = buf->start;
3129         else
3130                 parent = 0;
3131
3132         for (i = 0; i < nritems; i++) {
3133                 if (level == 0) {
3134                         btrfs_item_key_to_cpu(buf, &key, i);
3135                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3136                                 continue;
3137                         fi = btrfs_item_ptr(buf, i,
3138                                             struct btrfs_file_extent_item);
3139                         if (btrfs_file_extent_type(buf, fi) ==
3140                             BTRFS_FILE_EXTENT_INLINE)
3141                                 continue;
3142                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3143                         if (bytenr == 0)
3144                                 continue;
3145
3146                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3147                         key.offset -= btrfs_file_extent_offset(buf, fi);
3148                         ret = process_func(trans, root, bytenr, num_bytes,
3149                                            parent, ref_root, key.objectid,
3150                                            key.offset, 1);
3151                         if (ret)
3152                                 goto fail;
3153                 } else {
3154                         bytenr = btrfs_node_blockptr(buf, i);
3155                         num_bytes = root->nodesize;
3156                         ret = process_func(trans, root, bytenr, num_bytes,
3157                                            parent, ref_root, level - 1, 0,
3158                                            1);
3159                         if (ret)
3160                                 goto fail;
3161                 }
3162         }
3163         return 0;
3164 fail:
3165         return ret;
3166 }
3167
3168 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3169                   struct extent_buffer *buf, int full_backref)
3170 {
3171         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3172 }
3173
3174 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3175                   struct extent_buffer *buf, int full_backref)
3176 {
3177         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3178 }
3179
3180 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3181                                  struct btrfs_root *root,
3182                                  struct btrfs_path *path,
3183                                  struct btrfs_block_group_cache *cache)
3184 {
3185         int ret;
3186         struct btrfs_root *extent_root = root->fs_info->extent_root;
3187         unsigned long bi;
3188         struct extent_buffer *leaf;
3189
3190         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3191         if (ret) {
3192                 if (ret > 0)
3193                         ret = -ENOENT;
3194                 goto fail;
3195         }
3196
3197         leaf = path->nodes[0];
3198         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3199         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3200         btrfs_mark_buffer_dirty(leaf);
3201 fail:
3202         btrfs_release_path(path);
3203         return ret;
3204
3205 }
3206
3207 static struct btrfs_block_group_cache *
3208 next_block_group(struct btrfs_root *root,
3209                  struct btrfs_block_group_cache *cache)
3210 {
3211         struct rb_node *node;
3212
3213         spin_lock(&root->fs_info->block_group_cache_lock);
3214
3215         /* If our block group was removed, we need a full search. */
3216         if (RB_EMPTY_NODE(&cache->cache_node)) {
3217                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3218
3219                 spin_unlock(&root->fs_info->block_group_cache_lock);
3220                 btrfs_put_block_group(cache);
3221                 cache = btrfs_lookup_first_block_group(root->fs_info,
3222                                                        next_bytenr);
3223                 return cache;
3224         }
3225         node = rb_next(&cache->cache_node);
3226         btrfs_put_block_group(cache);
3227         if (node) {
3228                 cache = rb_entry(node, struct btrfs_block_group_cache,
3229                                  cache_node);
3230                 btrfs_get_block_group(cache);
3231         } else
3232                 cache = NULL;
3233         spin_unlock(&root->fs_info->block_group_cache_lock);
3234         return cache;
3235 }
3236
3237 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3238                             struct btrfs_trans_handle *trans,
3239                             struct btrfs_path *path)
3240 {
3241         struct btrfs_root *root = block_group->fs_info->tree_root;
3242         struct inode *inode = NULL;
3243         u64 alloc_hint = 0;
3244         int dcs = BTRFS_DC_ERROR;
3245         u64 num_pages = 0;
3246         int retries = 0;
3247         int ret = 0;
3248
3249         /*
3250          * If this block group is smaller than 100 megs don't bother caching the
3251          * block group.
3252          */
3253         if (block_group->key.offset < (100 * 1024 * 1024)) {
3254                 spin_lock(&block_group->lock);
3255                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3256                 spin_unlock(&block_group->lock);
3257                 return 0;
3258         }
3259
3260         if (trans->aborted)
3261                 return 0;
3262 again:
3263         inode = lookup_free_space_inode(root, block_group, path);
3264         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3265                 ret = PTR_ERR(inode);
3266                 btrfs_release_path(path);
3267                 goto out;
3268         }
3269
3270         if (IS_ERR(inode)) {
3271                 BUG_ON(retries);
3272                 retries++;
3273
3274                 if (block_group->ro)
3275                         goto out_free;
3276
3277                 ret = create_free_space_inode(root, trans, block_group, path);
3278                 if (ret)
3279                         goto out_free;
3280                 goto again;
3281         }
3282
3283         /* We've already setup this transaction, go ahead and exit */
3284         if (block_group->cache_generation == trans->transid &&
3285             i_size_read(inode)) {
3286                 dcs = BTRFS_DC_SETUP;
3287                 goto out_put;
3288         }
3289
3290         /*
3291          * We want to set the generation to 0, that way if anything goes wrong
3292          * from here on out we know not to trust this cache when we load up next
3293          * time.
3294          */
3295         BTRFS_I(inode)->generation = 0;
3296         ret = btrfs_update_inode(trans, root, inode);
3297         if (ret) {
3298                 /*
3299                  * So theoretically we could recover from this, simply set the
3300                  * super cache generation to 0 so we know to invalidate the
3301                  * cache, but then we'd have to keep track of the block groups
3302                  * that fail this way so we know we _have_ to reset this cache
3303                  * before the next commit or risk reading stale cache.  So to
3304                  * limit our exposure to horrible edge cases lets just abort the
3305                  * transaction, this only happens in really bad situations
3306                  * anyway.
3307                  */
3308                 btrfs_abort_transaction(trans, root, ret);
3309                 goto out_put;
3310         }
3311         WARN_ON(ret);
3312
3313         if (i_size_read(inode) > 0) {
3314                 ret = btrfs_check_trunc_cache_free_space(root,
3315                                         &root->fs_info->global_block_rsv);
3316                 if (ret)
3317                         goto out_put;
3318
3319                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3320                 if (ret)
3321                         goto out_put;
3322         }
3323
3324         spin_lock(&block_group->lock);
3325         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3326             !btrfs_test_opt(root, SPACE_CACHE)) {
3327                 /*
3328                  * don't bother trying to write stuff out _if_
3329                  * a) we're not cached,
3330                  * b) we're with nospace_cache mount option.
3331                  */
3332                 dcs = BTRFS_DC_WRITTEN;
3333                 spin_unlock(&block_group->lock);
3334                 goto out_put;
3335         }
3336         spin_unlock(&block_group->lock);
3337
3338         /*
3339          * Try to preallocate enough space based on how big the block group is.
3340          * Keep in mind this has to include any pinned space which could end up
3341          * taking up quite a bit since it's not folded into the other space
3342          * cache.
3343          */
3344         num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3345         if (!num_pages)
3346                 num_pages = 1;
3347
3348         num_pages *= 16;
3349         num_pages *= PAGE_CACHE_SIZE;
3350
3351         ret = btrfs_check_data_free_space(inode, num_pages, num_pages);
3352         if (ret)
3353                 goto out_put;
3354
3355         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3356                                               num_pages, num_pages,
3357                                               &alloc_hint);
3358         if (!ret)
3359                 dcs = BTRFS_DC_SETUP;
3360         btrfs_free_reserved_data_space(inode, num_pages);
3361
3362 out_put:
3363         iput(inode);
3364 out_free:
3365         btrfs_release_path(path);
3366 out:
3367         spin_lock(&block_group->lock);
3368         if (!ret && dcs == BTRFS_DC_SETUP)
3369                 block_group->cache_generation = trans->transid;
3370         block_group->disk_cache_state = dcs;
3371         spin_unlock(&block_group->lock);
3372
3373         return ret;
3374 }
3375
3376 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3377                             struct btrfs_root *root)
3378 {
3379         struct btrfs_block_group_cache *cache, *tmp;
3380         struct btrfs_transaction *cur_trans = trans->transaction;
3381         struct btrfs_path *path;
3382
3383         if (list_empty(&cur_trans->dirty_bgs) ||
3384             !btrfs_test_opt(root, SPACE_CACHE))
3385                 return 0;
3386
3387         path = btrfs_alloc_path();
3388         if (!path)
3389                 return -ENOMEM;
3390
3391         /* Could add new block groups, use _safe just in case */
3392         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3393                                  dirty_list) {
3394                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3395                         cache_save_setup(cache, trans, path);
3396         }
3397
3398         btrfs_free_path(path);
3399         return 0;
3400 }
3401
3402 /*
3403  * transaction commit does final block group cache writeback during a
3404  * critical section where nothing is allowed to change the FS.  This is
3405  * required in order for the cache to actually match the block group,
3406  * but can introduce a lot of latency into the commit.
3407  *
3408  * So, btrfs_start_dirty_block_groups is here to kick off block group
3409  * cache IO.  There's a chance we'll have to redo some of it if the
3410  * block group changes again during the commit, but it greatly reduces
3411  * the commit latency by getting rid of the easy block groups while
3412  * we're still allowing others to join the commit.
3413  */
3414 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3415                                    struct btrfs_root *root)
3416 {
3417         struct btrfs_block_group_cache *cache;
3418         struct btrfs_transaction *cur_trans = trans->transaction;
3419         int ret = 0;
3420         int should_put;
3421         struct btrfs_path *path = NULL;
3422         LIST_HEAD(dirty);
3423         struct list_head *io = &cur_trans->io_bgs;
3424         int num_started = 0;
3425         int loops = 0;
3426
3427         spin_lock(&cur_trans->dirty_bgs_lock);
3428         if (list_empty(&cur_trans->dirty_bgs)) {
3429                 spin_unlock(&cur_trans->dirty_bgs_lock);
3430                 return 0;
3431         }
3432         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3433         spin_unlock(&cur_trans->dirty_bgs_lock);
3434
3435 again:
3436         /*
3437          * make sure all the block groups on our dirty list actually
3438          * exist
3439          */
3440         btrfs_create_pending_block_groups(trans, root);
3441
3442         if (!path) {
3443                 path = btrfs_alloc_path();
3444                 if (!path)
3445                         return -ENOMEM;
3446         }
3447
3448         /*
3449          * cache_write_mutex is here only to save us from balance or automatic
3450          * removal of empty block groups deleting this block group while we are
3451          * writing out the cache
3452          */
3453         mutex_lock(&trans->transaction->cache_write_mutex);
3454         while (!list_empty(&dirty)) {
3455                 cache = list_first_entry(&dirty,
3456                                          struct btrfs_block_group_cache,
3457                                          dirty_list);
3458                 /*
3459                  * this can happen if something re-dirties a block
3460                  * group that is already under IO.  Just wait for it to
3461                  * finish and then do it all again
3462                  */
3463                 if (!list_empty(&cache->io_list)) {
3464                         list_del_init(&cache->io_list);
3465                         btrfs_wait_cache_io(root, trans, cache,
3466                                             &cache->io_ctl, path,
3467                                             cache->key.objectid);
3468                         btrfs_put_block_group(cache);
3469                 }
3470
3471
3472                 /*
3473                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3474                  * if it should update the cache_state.  Don't delete
3475                  * until after we wait.
3476                  *
3477                  * Since we're not running in the commit critical section
3478                  * we need the dirty_bgs_lock to protect from update_block_group
3479                  */
3480                 spin_lock(&cur_trans->dirty_bgs_lock);
3481                 list_del_init(&cache->dirty_list);
3482                 spin_unlock(&cur_trans->dirty_bgs_lock);
3483
3484                 should_put = 1;
3485
3486                 cache_save_setup(cache, trans, path);
3487
3488                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3489                         cache->io_ctl.inode = NULL;
3490                         ret = btrfs_write_out_cache(root, trans, cache, path);
3491                         if (ret == 0 && cache->io_ctl.inode) {
3492                                 num_started++;
3493                                 should_put = 0;
3494
3495                                 /*
3496                                  * the cache_write_mutex is protecting
3497                                  * the io_list
3498                                  */
3499                                 list_add_tail(&cache->io_list, io);
3500                         } else {
3501                                 /*
3502                                  * if we failed to write the cache, the
3503                                  * generation will be bad and life goes on
3504                                  */
3505                                 ret = 0;
3506                         }
3507                 }
3508                 if (!ret) {
3509                         ret = write_one_cache_group(trans, root, path, cache);
3510                         /*
3511                          * Our block group might still be attached to the list
3512                          * of new block groups in the transaction handle of some
3513                          * other task (struct btrfs_trans_handle->new_bgs). This
3514                          * means its block group item isn't yet in the extent
3515                          * tree. If this happens ignore the error, as we will
3516                          * try again later in the critical section of the
3517                          * transaction commit.
3518                          */
3519                         if (ret == -ENOENT) {
3520                                 ret = 0;
3521                                 spin_lock(&cur_trans->dirty_bgs_lock);
3522                                 if (list_empty(&cache->dirty_list)) {
3523                                         list_add_tail(&cache->dirty_list,
3524                                                       &cur_trans->dirty_bgs);
3525                                         btrfs_get_block_group(cache);
3526                                 }
3527                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3528                         } else if (ret) {
3529                                 btrfs_abort_transaction(trans, root, ret);
3530                         }
3531                 }
3532
3533                 /* if its not on the io list, we need to put the block group */
3534                 if (should_put)
3535                         btrfs_put_block_group(cache);
3536
3537                 if (ret)
3538                         break;
3539
3540                 /*
3541                  * Avoid blocking other tasks for too long. It might even save
3542                  * us from writing caches for block groups that are going to be
3543                  * removed.
3544                  */
3545                 mutex_unlock(&trans->transaction->cache_write_mutex);
3546                 mutex_lock(&trans->transaction->cache_write_mutex);
3547         }
3548         mutex_unlock(&trans->transaction->cache_write_mutex);
3549
3550         /*
3551          * go through delayed refs for all the stuff we've just kicked off
3552          * and then loop back (just once)
3553          */
3554         ret = btrfs_run_delayed_refs(trans, root, 0);
3555         if (!ret && loops == 0) {
3556                 loops++;
3557                 spin_lock(&cur_trans->dirty_bgs_lock);
3558                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3559                 /*
3560                  * dirty_bgs_lock protects us from concurrent block group
3561                  * deletes too (not just cache_write_mutex).
3562                  */
3563                 if (!list_empty(&dirty)) {
3564                         spin_unlock(&cur_trans->dirty_bgs_lock);
3565                         goto again;
3566                 }
3567                 spin_unlock(&cur_trans->dirty_bgs_lock);
3568         }
3569
3570         btrfs_free_path(path);
3571         return ret;
3572 }
3573
3574 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3575                                    struct btrfs_root *root)
3576 {
3577         struct btrfs_block_group_cache *cache;
3578         struct btrfs_transaction *cur_trans = trans->transaction;
3579         int ret = 0;
3580         int should_put;
3581         struct btrfs_path *path;
3582         struct list_head *io = &cur_trans->io_bgs;
3583         int num_started = 0;
3584
3585         path = btrfs_alloc_path();
3586         if (!path)
3587                 return -ENOMEM;
3588
3589         /*
3590          * We don't need the lock here since we are protected by the transaction
3591          * commit.  We want to do the cache_save_setup first and then run the
3592          * delayed refs to make sure we have the best chance at doing this all
3593          * in one shot.
3594          */
3595         while (!list_empty(&cur_trans->dirty_bgs)) {
3596                 cache = list_first_entry(&cur_trans->dirty_bgs,
3597                                          struct btrfs_block_group_cache,
3598                                          dirty_list);
3599
3600                 /*
3601                  * this can happen if cache_save_setup re-dirties a block
3602                  * group that is already under IO.  Just wait for it to
3603                  * finish and then do it all again
3604                  */
3605                 if (!list_empty(&cache->io_list)) {
3606                         list_del_init(&cache->io_list);
3607                         btrfs_wait_cache_io(root, trans, cache,
3608                                             &cache->io_ctl, path,
3609                                             cache->key.objectid);
3610                         btrfs_put_block_group(cache);
3611                 }
3612
3613                 /*
3614                  * don't remove from the dirty list until after we've waited
3615                  * on any pending IO
3616                  */
3617                 list_del_init(&cache->dirty_list);
3618                 should_put = 1;
3619
3620                 cache_save_setup(cache, trans, path);
3621
3622                 if (!ret)
3623                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3624
3625                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3626                         cache->io_ctl.inode = NULL;
3627                         ret = btrfs_write_out_cache(root, trans, cache, path);
3628                         if (ret == 0 && cache->io_ctl.inode) {
3629                                 num_started++;
3630                                 should_put = 0;
3631                                 list_add_tail(&cache->io_list, io);
3632                         } else {
3633                                 /*
3634                                  * if we failed to write the cache, the
3635                                  * generation will be bad and life goes on
3636                                  */
3637                                 ret = 0;
3638                         }
3639                 }
3640                 if (!ret) {
3641                         ret = write_one_cache_group(trans, root, path, cache);
3642                         if (ret)
3643                                 btrfs_abort_transaction(trans, root, ret);
3644                 }
3645
3646                 /* if its not on the io list, we need to put the block group */
3647                 if (should_put)
3648                         btrfs_put_block_group(cache);
3649         }
3650
3651         while (!list_empty(io)) {
3652                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3653                                          io_list);
3654                 list_del_init(&cache->io_list);
3655                 btrfs_wait_cache_io(root, trans, cache,
3656                                     &cache->io_ctl, path, cache->key.objectid);
3657                 btrfs_put_block_group(cache);
3658         }
3659
3660         btrfs_free_path(path);
3661         return ret;
3662 }
3663
3664 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3665 {
3666         struct btrfs_block_group_cache *block_group;
3667         int readonly = 0;
3668
3669         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3670         if (!block_group || block_group->ro)
3671                 readonly = 1;
3672         if (block_group)
3673                 btrfs_put_block_group(block_group);
3674         return readonly;
3675 }
3676
3677 static const char *alloc_name(u64 flags)
3678 {
3679         switch (flags) {
3680         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3681                 return "mixed";
3682         case BTRFS_BLOCK_GROUP_METADATA:
3683                 return "metadata";
3684         case BTRFS_BLOCK_GROUP_DATA:
3685                 return "data";
3686         case BTRFS_BLOCK_GROUP_SYSTEM:
3687                 return "system";
3688         default:
3689                 WARN_ON(1);
3690                 return "invalid-combination";
3691         };
3692 }
3693
3694 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3695                              u64 total_bytes, u64 bytes_used,
3696                              struct btrfs_space_info **space_info)
3697 {
3698         struct btrfs_space_info *found;
3699         int i;
3700         int factor;
3701         int ret;
3702
3703         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3704                      BTRFS_BLOCK_GROUP_RAID10))
3705                 factor = 2;
3706         else
3707                 factor = 1;
3708
3709         found = __find_space_info(info, flags);
3710         if (found) {
3711                 spin_lock(&found->lock);
3712                 found->total_bytes += total_bytes;
3713                 found->disk_total += total_bytes * factor;
3714                 found->bytes_used += bytes_used;
3715                 found->disk_used += bytes_used * factor;
3716                 if (total_bytes > 0)
3717                         found->full = 0;
3718                 spin_unlock(&found->lock);
3719                 *space_info = found;
3720                 return 0;
3721         }
3722         found = kzalloc(sizeof(*found), GFP_NOFS);
3723         if (!found)
3724                 return -ENOMEM;
3725
3726         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3727         if (ret) {
3728                 kfree(found);
3729                 return ret;
3730         }
3731
3732         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3733                 INIT_LIST_HEAD(&found->block_groups[i]);
3734         init_rwsem(&found->groups_sem);
3735         spin_lock_init(&found->lock);
3736         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3737         found->total_bytes = total_bytes;
3738         found->disk_total = total_bytes * factor;
3739         found->bytes_used = bytes_used;
3740         found->disk_used = bytes_used * factor;
3741         found->bytes_pinned = 0;
3742         found->bytes_reserved = 0;
3743         found->bytes_readonly = 0;
3744         found->bytes_may_use = 0;
3745         found->full = 0;
3746         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3747         found->chunk_alloc = 0;
3748         found->flush = 0;
3749         init_waitqueue_head(&found->wait);
3750         INIT_LIST_HEAD(&found->ro_bgs);
3751
3752         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3753                                     info->space_info_kobj, "%s",
3754                                     alloc_name(found->flags));
3755         if (ret) {
3756                 kfree(found);
3757                 return ret;
3758         }
3759
3760         *space_info = found;
3761         list_add_rcu(&found->list, &info->space_info);
3762         if (flags & BTRFS_BLOCK_GROUP_DATA)
3763                 info->data_sinfo = found;
3764
3765         return ret;
3766 }
3767
3768 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3769 {
3770         u64 extra_flags = chunk_to_extended(flags) &
3771                                 BTRFS_EXTENDED_PROFILE_MASK;
3772
3773         write_seqlock(&fs_info->profiles_lock);
3774         if (flags & BTRFS_BLOCK_GROUP_DATA)
3775                 fs_info->avail_data_alloc_bits |= extra_flags;
3776         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3777                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3778         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3779                 fs_info->avail_system_alloc_bits |= extra_flags;
3780         write_sequnlock(&fs_info->profiles_lock);
3781 }
3782
3783 /*
3784  * returns target flags in extended format or 0 if restripe for this
3785  * chunk_type is not in progress
3786  *
3787  * should be called with either volume_mutex or balance_lock held
3788  */
3789 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3790 {
3791         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3792         u64 target = 0;
3793
3794         if (!bctl)
3795                 return 0;
3796
3797         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3798             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3799                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3800         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3801                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3802                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3803         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3804                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3805                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3806         }
3807
3808         return target;
3809 }
3810
3811 /*
3812  * @flags: available profiles in extended format (see ctree.h)
3813  *
3814  * Returns reduced profile in chunk format.  If profile changing is in
3815  * progress (either running or paused) picks the target profile (if it's
3816  * already available), otherwise falls back to plain reducing.
3817  */
3818 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3819 {
3820         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3821         u64 target;
3822         u64 tmp;
3823
3824         /*
3825          * see if restripe for this chunk_type is in progress, if so
3826          * try to reduce to the target profile
3827          */
3828         spin_lock(&root->fs_info->balance_lock);
3829         target = get_restripe_target(root->fs_info, flags);
3830         if (target) {
3831                 /* pick target profile only if it's already available */
3832                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3833                         spin_unlock(&root->fs_info->balance_lock);
3834                         return extended_to_chunk(target);
3835                 }
3836         }
3837         spin_unlock(&root->fs_info->balance_lock);
3838
3839         /* First, mask out the RAID levels which aren't possible */
3840         if (num_devices == 1)
3841                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3842                            BTRFS_BLOCK_GROUP_RAID5);
3843         if (num_devices < 3)
3844                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3845         if (num_devices < 4)
3846                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3847
3848         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3849                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3850                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3851         flags &= ~tmp;
3852
3853         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3854                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3855         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3856                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3857         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3858                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3859         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3860                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3861         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3862                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3863
3864         return extended_to_chunk(flags | tmp);
3865 }
3866
3867 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3868 {
3869         unsigned seq;
3870         u64 flags;
3871
3872         do {
3873                 flags = orig_flags;
3874                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3875
3876                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3877                         flags |= root->fs_info->avail_data_alloc_bits;
3878                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3879                         flags |= root->fs_info->avail_system_alloc_bits;
3880                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3881                         flags |= root->fs_info->avail_metadata_alloc_bits;
3882         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3883
3884         return btrfs_reduce_alloc_profile(root, flags);
3885 }
3886
3887 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3888 {
3889         u64 flags;
3890         u64 ret;
3891
3892         if (data)
3893                 flags = BTRFS_BLOCK_GROUP_DATA;
3894         else if (root == root->fs_info->chunk_root)
3895                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3896         else
3897                 flags = BTRFS_BLOCK_GROUP_METADATA;
3898
3899         ret = get_alloc_profile(root, flags);
3900         return ret;
3901 }
3902
3903 /*
3904  * This will check the space that the inode allocates from to make sure we have
3905  * enough space for bytes.
3906  */
3907 int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
3908 {
3909         struct btrfs_space_info *data_sinfo;
3910         struct btrfs_root *root = BTRFS_I(inode)->root;
3911         struct btrfs_fs_info *fs_info = root->fs_info;
3912         u64 used;
3913         int ret = 0;
3914         int need_commit = 2;
3915         int have_pinned_space;
3916
3917         /* make sure bytes are sectorsize aligned */
3918         bytes = ALIGN(bytes, root->sectorsize);
3919
3920         if (btrfs_is_free_space_inode(inode)) {
3921                 need_commit = 0;
3922                 ASSERT(current->journal_info);
3923         }
3924
3925         data_sinfo = fs_info->data_sinfo;
3926         if (!data_sinfo)
3927                 goto alloc;
3928
3929 again:
3930         /* make sure we have enough space to handle the data first */
3931         spin_lock(&data_sinfo->lock);
3932         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3933                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3934                 data_sinfo->bytes_may_use;
3935
3936         if (used + bytes > data_sinfo->total_bytes) {
3937                 struct btrfs_trans_handle *trans;
3938
3939                 /*
3940                  * if we don't have enough free bytes in this space then we need
3941                  * to alloc a new chunk.
3942                  */
3943                 if (!data_sinfo->full) {
3944                         u64 alloc_target;
3945
3946                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3947                         spin_unlock(&data_sinfo->lock);
3948 alloc:
3949                         alloc_target = btrfs_get_alloc_profile(root, 1);
3950                         /*
3951                          * It is ugly that we don't call nolock join
3952                          * transaction for the free space inode case here.
3953                          * But it is safe because we only do the data space
3954                          * reservation for the free space cache in the
3955                          * transaction context, the common join transaction
3956                          * just increase the counter of the current transaction
3957                          * handler, doesn't try to acquire the trans_lock of
3958                          * the fs.
3959                          */
3960                         trans = btrfs_join_transaction(root);
3961                         if (IS_ERR(trans))
3962                                 return PTR_ERR(trans);
3963
3964                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3965                                              alloc_target,
3966                                              CHUNK_ALLOC_NO_FORCE);
3967                         btrfs_end_transaction(trans, root);
3968                         if (ret < 0) {
3969                                 if (ret != -ENOSPC)
3970                                         return ret;
3971                                 else {
3972                                         have_pinned_space = 1;
3973                                         goto commit_trans;
3974                                 }
3975                         }
3976
3977                         if (!data_sinfo)
3978                                 data_sinfo = fs_info->data_sinfo;
3979
3980                         goto again;
3981                 }
3982
3983                 /*
3984                  * If we don't have enough pinned space to deal with this
3985                  * allocation, and no removed chunk in current transaction,
3986                  * don't bother committing the transaction.
3987                  */
3988                 have_pinned_space = percpu_counter_compare(
3989                         &data_sinfo->total_bytes_pinned,
3990                         used + bytes - data_sinfo->total_bytes);
3991                 spin_unlock(&data_sinfo->lock);
3992
3993                 /* commit the current transaction and try again */
3994 commit_trans:
3995                 if (need_commit &&
3996                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3997                         need_commit--;
3998
3999                         if (need_commit > 0)
4000                                 btrfs_wait_ordered_roots(fs_info, -1);
4001
4002                         trans = btrfs_join_transaction(root);
4003                         if (IS_ERR(trans))
4004                                 return PTR_ERR(trans);
4005                         if (have_pinned_space >= 0 ||
4006                             trans->transaction->have_free_bgs ||
4007                             need_commit > 0) {
4008                                 ret = btrfs_commit_transaction(trans, root);
4009                                 if (ret)
4010                                         return ret;
4011                                 /*
4012                                  * make sure that all running delayed iput are
4013                                  * done
4014                                  */
4015                                 down_write(&root->fs_info->delayed_iput_sem);
4016                                 up_write(&root->fs_info->delayed_iput_sem);
4017                                 goto again;
4018                         } else {
4019                                 btrfs_end_transaction(trans, root);
4020                         }
4021                 }
4022
4023                 trace_btrfs_space_reservation(root->fs_info,
4024                                               "space_info:enospc",
4025                                               data_sinfo->flags, bytes, 1);
4026                 return -ENOSPC;
4027         }
4028         ret = btrfs_qgroup_reserve(root, write_bytes);
4029         if (ret)
4030                 goto out;
4031         data_sinfo->bytes_may_use += bytes;
4032         trace_btrfs_space_reservation(root->fs_info, "space_info",
4033                                       data_sinfo->flags, bytes, 1);
4034 out:
4035         spin_unlock(&data_sinfo->lock);
4036
4037         return ret;
4038 }
4039
4040 /*
4041  * Called if we need to clear a data reservation for this inode.
4042  */
4043 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
4044 {
4045         struct btrfs_root *root = BTRFS_I(inode)->root;
4046         struct btrfs_space_info *data_sinfo;
4047
4048         /* make sure bytes are sectorsize aligned */
4049         bytes = ALIGN(bytes, root->sectorsize);
4050
4051         data_sinfo = root->fs_info->data_sinfo;
4052         spin_lock(&data_sinfo->lock);
4053         WARN_ON(data_sinfo->bytes_may_use < bytes);
4054         data_sinfo->bytes_may_use -= bytes;
4055         trace_btrfs_space_reservation(root->fs_info, "space_info",
4056                                       data_sinfo->flags, bytes, 0);
4057         spin_unlock(&data_sinfo->lock);
4058 }
4059
4060 static void force_metadata_allocation(struct btrfs_fs_info *info)
4061 {
4062         struct list_head *head = &info->space_info;
4063         struct btrfs_space_info *found;
4064
4065         rcu_read_lock();
4066         list_for_each_entry_rcu(found, head, list) {
4067                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4068                         found->force_alloc = CHUNK_ALLOC_FORCE;
4069         }
4070         rcu_read_unlock();
4071 }
4072
4073 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4074 {
4075         return (global->size << 1);
4076 }
4077
4078 static int should_alloc_chunk(struct btrfs_root *root,
4079                               struct btrfs_space_info *sinfo, int force)
4080 {
4081         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4082         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4083         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4084         u64 thresh;
4085
4086         if (force == CHUNK_ALLOC_FORCE)
4087                 return 1;
4088
4089         /*
4090          * We need to take into account the global rsv because for all intents
4091          * and purposes it's used space.  Don't worry about locking the
4092          * global_rsv, it doesn't change except when the transaction commits.
4093          */
4094         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4095                 num_allocated += calc_global_rsv_need_space(global_rsv);
4096
4097         /*
4098          * in limited mode, we want to have some free space up to
4099          * about 1% of the FS size.
4100          */
4101         if (force == CHUNK_ALLOC_LIMITED) {
4102                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4103                 thresh = max_t(u64, 64 * 1024 * 1024,
4104                                div_factor_fine(thresh, 1));
4105
4106                 if (num_bytes - num_allocated < thresh)
4107                         return 1;
4108         }
4109
4110         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
4111                 return 0;
4112         return 1;
4113 }
4114
4115 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4116 {
4117         u64 num_dev;
4118
4119         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4120                     BTRFS_BLOCK_GROUP_RAID0 |
4121                     BTRFS_BLOCK_GROUP_RAID5 |
4122                     BTRFS_BLOCK_GROUP_RAID6))
4123                 num_dev = root->fs_info->fs_devices->rw_devices;
4124         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4125                 num_dev = 2;
4126         else
4127                 num_dev = 1;    /* DUP or single */
4128
4129         return num_dev;
4130 }
4131
4132 /*
4133  * If @is_allocation is true, reserve space in the system space info necessary
4134  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4135  * removing a chunk.
4136  */
4137 void check_system_chunk(struct btrfs_trans_handle *trans,
4138                         struct btrfs_root *root,
4139                         u64 type)
4140 {
4141         struct btrfs_space_info *info;
4142         u64 left;
4143         u64 thresh;
4144         int ret = 0;
4145         u64 num_devs;
4146
4147         /*
4148          * Needed because we can end up allocating a system chunk and for an
4149          * atomic and race free space reservation in the chunk block reserve.
4150          */
4151         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4152
4153         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4154         spin_lock(&info->lock);
4155         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4156                 info->bytes_reserved - info->bytes_readonly -
4157                 info->bytes_may_use;
4158         spin_unlock(&info->lock);
4159
4160         num_devs = get_profile_num_devs(root, type);
4161
4162         /* num_devs device items to update and 1 chunk item to add or remove */
4163         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4164                 btrfs_calc_trans_metadata_size(root, 1);
4165
4166         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4167                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4168                         left, thresh, type);
4169                 dump_space_info(info, 0, 0);
4170         }
4171
4172         if (left < thresh) {
4173                 u64 flags;
4174
4175                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4176                 /*
4177                  * Ignore failure to create system chunk. We might end up not
4178                  * needing it, as we might not need to COW all nodes/leafs from
4179                  * the paths we visit in the chunk tree (they were already COWed
4180                  * or created in the current transaction for example).
4181                  */
4182                 ret = btrfs_alloc_chunk(trans, root, flags);
4183         }
4184
4185         if (!ret) {
4186                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4187                                           &root->fs_info->chunk_block_rsv,
4188                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4189                 if (!ret)
4190                         trans->chunk_bytes_reserved += thresh;
4191         }
4192 }
4193
4194 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4195                           struct btrfs_root *extent_root, u64 flags, int force)
4196 {
4197         struct btrfs_space_info *space_info;
4198         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4199         int wait_for_alloc = 0;
4200         int ret = 0;
4201
4202         /* Don't re-enter if we're already allocating a chunk */
4203         if (trans->allocating_chunk)
4204                 return -ENOSPC;
4205
4206         space_info = __find_space_info(extent_root->fs_info, flags);
4207         if (!space_info) {
4208                 ret = update_space_info(extent_root->fs_info, flags,
4209                                         0, 0, &space_info);
4210                 BUG_ON(ret); /* -ENOMEM */
4211         }
4212         BUG_ON(!space_info); /* Logic error */
4213
4214 again:
4215         spin_lock(&space_info->lock);
4216         if (force < space_info->force_alloc)
4217                 force = space_info->force_alloc;
4218         if (space_info->full) {
4219                 if (should_alloc_chunk(extent_root, space_info, force))
4220                         ret = -ENOSPC;
4221                 else
4222                         ret = 0;
4223                 spin_unlock(&space_info->lock);
4224                 return ret;
4225         }
4226
4227         if (!should_alloc_chunk(extent_root, space_info, force)) {
4228                 spin_unlock(&space_info->lock);
4229                 return 0;
4230         } else if (space_info->chunk_alloc) {
4231                 wait_for_alloc = 1;
4232         } else {
4233                 space_info->chunk_alloc = 1;
4234         }
4235
4236         spin_unlock(&space_info->lock);
4237
4238         mutex_lock(&fs_info->chunk_mutex);
4239
4240         /*
4241          * The chunk_mutex is held throughout the entirety of a chunk
4242          * allocation, so once we've acquired the chunk_mutex we know that the
4243          * other guy is done and we need to recheck and see if we should
4244          * allocate.
4245          */
4246         if (wait_for_alloc) {
4247                 mutex_unlock(&fs_info->chunk_mutex);
4248                 wait_for_alloc = 0;
4249                 goto again;
4250         }
4251
4252         trans->allocating_chunk = true;
4253
4254         /*
4255          * If we have mixed data/metadata chunks we want to make sure we keep
4256          * allocating mixed chunks instead of individual chunks.
4257          */
4258         if (btrfs_mixed_space_info(space_info))
4259                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4260
4261         /*
4262          * if we're doing a data chunk, go ahead and make sure that
4263          * we keep a reasonable number of metadata chunks allocated in the
4264          * FS as well.
4265          */
4266         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4267                 fs_info->data_chunk_allocations++;
4268                 if (!(fs_info->data_chunk_allocations %
4269                       fs_info->metadata_ratio))
4270                         force_metadata_allocation(fs_info);
4271         }
4272
4273         /*
4274          * Check if we have enough space in SYSTEM chunk because we may need
4275          * to update devices.
4276          */
4277         check_system_chunk(trans, extent_root, flags);
4278
4279         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4280         trans->allocating_chunk = false;
4281
4282         spin_lock(&space_info->lock);
4283         if (ret < 0 && ret != -ENOSPC)
4284                 goto out;
4285         if (ret)
4286                 space_info->full = 1;
4287         else
4288                 ret = 1;
4289
4290         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4291 out:
4292         space_info->chunk_alloc = 0;
4293         spin_unlock(&space_info->lock);
4294         mutex_unlock(&fs_info->chunk_mutex);
4295         /*
4296          * When we allocate a new chunk we reserve space in the chunk block
4297          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4298          * add new nodes/leafs to it if we end up needing to do it when
4299          * inserting the chunk item and updating device items as part of the
4300          * second phase of chunk allocation, performed by
4301          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4302          * large number of new block groups to create in our transaction
4303          * handle's new_bgs list to avoid exhausting the chunk block reserve
4304          * in extreme cases - like having a single transaction create many new
4305          * block groups when starting to write out the free space caches of all
4306          * the block groups that were made dirty during the lifetime of the
4307          * transaction.
4308          */
4309         if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4310                 btrfs_create_pending_block_groups(trans, trans->root);
4311                 btrfs_trans_release_chunk_metadata(trans);
4312         }
4313         return ret;
4314 }
4315
4316 static int can_overcommit(struct btrfs_root *root,
4317                           struct btrfs_space_info *space_info, u64 bytes,
4318                           enum btrfs_reserve_flush_enum flush)
4319 {
4320         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4321         u64 profile = btrfs_get_alloc_profile(root, 0);
4322         u64 space_size;
4323         u64 avail;
4324         u64 used;
4325
4326         used = space_info->bytes_used + space_info->bytes_reserved +
4327                 space_info->bytes_pinned + space_info->bytes_readonly;
4328
4329         /*
4330          * We only want to allow over committing if we have lots of actual space
4331          * free, but if we don't have enough space to handle the global reserve
4332          * space then we could end up having a real enospc problem when trying
4333          * to allocate a chunk or some other such important allocation.
4334          */
4335         spin_lock(&global_rsv->lock);
4336         space_size = calc_global_rsv_need_space(global_rsv);
4337         spin_unlock(&global_rsv->lock);
4338         if (used + space_size >= space_info->total_bytes)
4339                 return 0;
4340
4341         used += space_info->bytes_may_use;
4342
4343         spin_lock(&root->fs_info->free_chunk_lock);
4344         avail = root->fs_info->free_chunk_space;
4345         spin_unlock(&root->fs_info->free_chunk_lock);
4346
4347         /*
4348          * If we have dup, raid1 or raid10 then only half of the free
4349          * space is actually useable.  For raid56, the space info used
4350          * doesn't include the parity drive, so we don't have to
4351          * change the math
4352          */
4353         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4354                        BTRFS_BLOCK_GROUP_RAID1 |
4355                        BTRFS_BLOCK_GROUP_RAID10))
4356                 avail >>= 1;
4357
4358         /*
4359          * If we aren't flushing all things, let us overcommit up to
4360          * 1/2th of the space. If we can flush, don't let us overcommit
4361          * too much, let it overcommit up to 1/8 of the space.
4362          */
4363         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4364                 avail >>= 3;
4365         else
4366                 avail >>= 1;
4367
4368         if (used + bytes < space_info->total_bytes + avail)
4369                 return 1;
4370         return 0;
4371 }
4372
4373 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4374                                          unsigned long nr_pages, int nr_items)
4375 {
4376         struct super_block *sb = root->fs_info->sb;
4377
4378         if (down_read_trylock(&sb->s_umount)) {
4379                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4380                 up_read(&sb->s_umount);
4381         } else {
4382                 /*
4383                  * We needn't worry the filesystem going from r/w to r/o though
4384                  * we don't acquire ->s_umount mutex, because the filesystem
4385                  * should guarantee the delalloc inodes list be empty after
4386                  * the filesystem is readonly(all dirty pages are written to
4387                  * the disk).
4388                  */
4389                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4390                 if (!current->journal_info)
4391                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4392         }
4393 }
4394
4395 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4396 {
4397         u64 bytes;
4398         int nr;
4399
4400         bytes = btrfs_calc_trans_metadata_size(root, 1);
4401         nr = (int)div64_u64(to_reclaim, bytes);
4402         if (!nr)
4403                 nr = 1;
4404         return nr;
4405 }
4406
4407 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4408
4409 /*
4410  * shrink metadata reservation for delalloc
4411  */
4412 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4413                             bool wait_ordered)
4414 {
4415         struct btrfs_block_rsv *block_rsv;
4416         struct btrfs_space_info *space_info;
4417         struct btrfs_trans_handle *trans;
4418         u64 delalloc_bytes;
4419         u64 max_reclaim;
4420         long time_left;
4421         unsigned long nr_pages;
4422         int loops;
4423         int items;
4424         enum btrfs_reserve_flush_enum flush;
4425
4426         /* Calc the number of the pages we need flush for space reservation */
4427         items = calc_reclaim_items_nr(root, to_reclaim);
4428         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4429
4430         trans = (struct btrfs_trans_handle *)current->journal_info;
4431         block_rsv = &root->fs_info->delalloc_block_rsv;
4432         space_info = block_rsv->space_info;
4433
4434         delalloc_bytes = percpu_counter_sum_positive(
4435                                                 &root->fs_info->delalloc_bytes);
4436         if (delalloc_bytes == 0) {
4437                 if (trans)
4438                         return;
4439                 if (wait_ordered)
4440                         btrfs_wait_ordered_roots(root->fs_info, items);
4441                 return;
4442         }
4443
4444         loops = 0;
4445         while (delalloc_bytes && loops < 3) {
4446                 max_reclaim = min(delalloc_bytes, to_reclaim);
4447                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4448                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4449                 /*
4450                  * We need to wait for the async pages to actually start before
4451                  * we do anything.
4452                  */
4453                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4454                 if (!max_reclaim)
4455                         goto skip_async;
4456
4457                 if (max_reclaim <= nr_pages)
4458                         max_reclaim = 0;
4459                 else
4460                         max_reclaim -= nr_pages;
4461
4462                 wait_event(root->fs_info->async_submit_wait,
4463                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4464                            (int)max_reclaim);
4465 skip_async:
4466                 if (!trans)
4467                         flush = BTRFS_RESERVE_FLUSH_ALL;
4468                 else
4469                         flush = BTRFS_RESERVE_NO_FLUSH;
4470                 spin_lock(&space_info->lock);
4471                 if (can_overcommit(root, space_info, orig, flush)) {
4472                         spin_unlock(&space_info->lock);
4473                         break;
4474                 }
4475                 spin_unlock(&space_info->lock);
4476
4477                 loops++;
4478                 if (wait_ordered && !trans) {
4479                         btrfs_wait_ordered_roots(root->fs_info, items);
4480                 } else {
4481                         time_left = schedule_timeout_killable(1);
4482                         if (time_left)
4483                                 break;
4484                 }
4485                 delalloc_bytes = percpu_counter_sum_positive(
4486                                                 &root->fs_info->delalloc_bytes);
4487         }
4488 }
4489
4490 /**
4491  * maybe_commit_transaction - possibly commit the transaction if its ok to
4492  * @root - the root we're allocating for
4493  * @bytes - the number of bytes we want to reserve
4494  * @force - force the commit
4495  *
4496  * This will check to make sure that committing the transaction will actually
4497  * get us somewhere and then commit the transaction if it does.  Otherwise it
4498  * will return -ENOSPC.
4499  */
4500 static int may_commit_transaction(struct btrfs_root *root,
4501                                   struct btrfs_space_info *space_info,
4502                                   u64 bytes, int force)
4503 {
4504         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4505         struct btrfs_trans_handle *trans;
4506
4507         trans = (struct btrfs_trans_handle *)current->journal_info;
4508         if (trans)
4509                 return -EAGAIN;
4510
4511         if (force)
4512                 goto commit;
4513
4514         /* See if there is enough pinned space to make this reservation */
4515         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4516                                    bytes) >= 0)
4517                 goto commit;
4518
4519         /*
4520          * See if there is some space in the delayed insertion reservation for
4521          * this reservation.
4522          */
4523         if (space_info != delayed_rsv->space_info)
4524                 return -ENOSPC;
4525
4526         spin_lock(&delayed_rsv->lock);
4527         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4528                                    bytes - delayed_rsv->size) >= 0) {
4529                 spin_unlock(&delayed_rsv->lock);
4530                 return -ENOSPC;
4531         }
4532         spin_unlock(&delayed_rsv->lock);
4533
4534 commit:
4535         trans = btrfs_join_transaction(root);
4536         if (IS_ERR(trans))
4537                 return -ENOSPC;
4538
4539         return btrfs_commit_transaction(trans, root);
4540 }
4541
4542 enum flush_state {
4543         FLUSH_DELAYED_ITEMS_NR  =       1,
4544         FLUSH_DELAYED_ITEMS     =       2,
4545         FLUSH_DELALLOC          =       3,
4546         FLUSH_DELALLOC_WAIT     =       4,
4547         ALLOC_CHUNK             =       5,
4548         COMMIT_TRANS            =       6,
4549 };
4550
4551 static int flush_space(struct btrfs_root *root,
4552                        struct btrfs_space_info *space_info, u64 num_bytes,
4553                        u64 orig_bytes, int state)
4554 {
4555         struct btrfs_trans_handle *trans;
4556         int nr;
4557         int ret = 0;
4558
4559         switch (state) {
4560         case FLUSH_DELAYED_ITEMS_NR:
4561         case FLUSH_DELAYED_ITEMS:
4562                 if (state == FLUSH_DELAYED_ITEMS_NR)
4563                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4564                 else
4565                         nr = -1;
4566
4567                 trans = btrfs_join_transaction(root);
4568                 if (IS_ERR(trans)) {
4569                         ret = PTR_ERR(trans);
4570                         break;
4571                 }
4572                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4573                 btrfs_end_transaction(trans, root);
4574                 break;
4575         case FLUSH_DELALLOC:
4576         case FLUSH_DELALLOC_WAIT:
4577                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4578                                 state == FLUSH_DELALLOC_WAIT);
4579                 break;
4580         case ALLOC_CHUNK:
4581                 trans = btrfs_join_transaction(root);
4582                 if (IS_ERR(trans)) {
4583                         ret = PTR_ERR(trans);
4584                         break;
4585                 }
4586                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4587                                      btrfs_get_alloc_profile(root, 0),
4588                                      CHUNK_ALLOC_NO_FORCE);
4589                 btrfs_end_transaction(trans, root);
4590                 if (ret == -ENOSPC)
4591                         ret = 0;
4592                 break;
4593         case COMMIT_TRANS:
4594                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4595                 break;
4596         default:
4597                 ret = -ENOSPC;
4598                 break;
4599         }
4600
4601         return ret;
4602 }
4603
4604 static inline u64
4605 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4606                                  struct btrfs_space_info *space_info)
4607 {
4608         u64 used;
4609         u64 expected;
4610         u64 to_reclaim;
4611
4612         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4613                                 16 * 1024 * 1024);
4614         spin_lock(&space_info->lock);
4615         if (can_overcommit(root, space_info, to_reclaim,
4616                            BTRFS_RESERVE_FLUSH_ALL)) {
4617                 to_reclaim = 0;
4618                 goto out;
4619         }
4620
4621         used = space_info->bytes_used + space_info->bytes_reserved +
4622                space_info->bytes_pinned + space_info->bytes_readonly +
4623                space_info->bytes_may_use;
4624         if (can_overcommit(root, space_info, 1024 * 1024,
4625                            BTRFS_RESERVE_FLUSH_ALL))
4626                 expected = div_factor_fine(space_info->total_bytes, 95);
4627         else
4628                 expected = div_factor_fine(space_info->total_bytes, 90);
4629
4630         if (used > expected)
4631                 to_reclaim = used - expected;
4632         else
4633                 to_reclaim = 0;
4634         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4635                                      space_info->bytes_reserved);
4636 out:
4637         spin_unlock(&space_info->lock);
4638
4639         return to_reclaim;
4640 }
4641
4642 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4643                                         struct btrfs_fs_info *fs_info, u64 used)
4644 {
4645         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4646
4647         /* If we're just plain full then async reclaim just slows us down. */
4648         if (space_info->bytes_used >= thresh)
4649                 return 0;
4650
4651         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4652                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4653 }
4654
4655 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4656                                        struct btrfs_fs_info *fs_info,
4657                                        int flush_state)
4658 {
4659         u64 used;
4660
4661         spin_lock(&space_info->lock);
4662         /*
4663          * We run out of space and have not got any free space via flush_space,
4664          * so don't bother doing async reclaim.
4665          */
4666         if (flush_state > COMMIT_TRANS && space_info->full) {
4667                 spin_unlock(&space_info->lock);
4668                 return 0;
4669         }
4670
4671         used = space_info->bytes_used + space_info->bytes_reserved +
4672                space_info->bytes_pinned + space_info->bytes_readonly +
4673                space_info->bytes_may_use;
4674         if (need_do_async_reclaim(space_info, fs_info, used)) {
4675                 spin_unlock(&space_info->lock);
4676                 return 1;
4677         }
4678         spin_unlock(&space_info->lock);
4679
4680         return 0;
4681 }
4682
4683 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4684 {
4685         struct btrfs_fs_info *fs_info;
4686         struct btrfs_space_info *space_info;
4687         u64 to_reclaim;
4688         int flush_state;
4689
4690         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4691         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4692
4693         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4694                                                       space_info);
4695         if (!to_reclaim)
4696                 return;
4697
4698         flush_state = FLUSH_DELAYED_ITEMS_NR;
4699         do {
4700                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4701                             to_reclaim, flush_state);
4702                 flush_state++;
4703                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4704                                                  flush_state))
4705                         return;
4706         } while (flush_state < COMMIT_TRANS);
4707 }
4708
4709 void btrfs_init_async_reclaim_work(struct work_struct *work)
4710 {
4711         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4712 }
4713
4714 /**
4715  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4716  * @root - the root we're allocating for
4717  * @block_rsv - the block_rsv we're allocating for
4718  * @orig_bytes - the number of bytes we want
4719  * @flush - whether or not we can flush to make our reservation
4720  *
4721  * This will reserve orgi_bytes number of bytes from the space info associated
4722  * with the block_rsv.  If there is not enough space it will make an attempt to
4723  * flush out space to make room.  It will do this by flushing delalloc if
4724  * possible or committing the transaction.  If flush is 0 then no attempts to
4725  * regain reservations will be made and this will fail if there is not enough
4726  * space already.
4727  */
4728 static int reserve_metadata_bytes(struct btrfs_root *root,
4729                                   struct btrfs_block_rsv *block_rsv,
4730                                   u64 orig_bytes,
4731                                   enum btrfs_reserve_flush_enum flush)
4732 {
4733         struct btrfs_space_info *space_info = block_rsv->space_info;
4734         u64 used;
4735         u64 num_bytes = orig_bytes;
4736         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4737         int ret = 0;
4738         bool flushing = false;
4739
4740 again:
4741         ret = 0;
4742         spin_lock(&space_info->lock);
4743         /*
4744          * We only want to wait if somebody other than us is flushing and we
4745          * are actually allowed to flush all things.
4746          */
4747         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4748                space_info->flush) {
4749                 spin_unlock(&space_info->lock);
4750                 /*
4751                  * If we have a trans handle we can't wait because the flusher
4752                  * may have to commit the transaction, which would mean we would
4753                  * deadlock since we are waiting for the flusher to finish, but
4754                  * hold the current transaction open.
4755                  */
4756                 if (current->journal_info)
4757                         return -EAGAIN;
4758                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4759                 /* Must have been killed, return */
4760                 if (ret)
4761                         return -EINTR;
4762
4763                 spin_lock(&space_info->lock);
4764         }
4765
4766         ret = -ENOSPC;
4767         used = space_info->bytes_used + space_info->bytes_reserved +
4768                 space_info->bytes_pinned + space_info->bytes_readonly +
4769                 space_info->bytes_may_use;
4770
4771         /*
4772          * The idea here is that we've not already over-reserved the block group
4773          * then we can go ahead and save our reservation first and then start
4774          * flushing if we need to.  Otherwise if we've already overcommitted
4775          * lets start flushing stuff first and then come back and try to make
4776          * our reservation.
4777          */
4778         if (used <= space_info->total_bytes) {
4779                 if (used + orig_bytes <= space_info->total_bytes) {
4780                         space_info->bytes_may_use += orig_bytes;
4781                         trace_btrfs_space_reservation(root->fs_info,
4782                                 "space_info", space_info->flags, orig_bytes, 1);
4783                         ret = 0;
4784                 } else {
4785                         /*
4786                          * Ok set num_bytes to orig_bytes since we aren't
4787                          * overocmmitted, this way we only try and reclaim what
4788                          * we need.
4789                          */
4790                         num_bytes = orig_bytes;
4791                 }
4792         } else {
4793                 /*
4794                  * Ok we're over committed, set num_bytes to the overcommitted
4795                  * amount plus the amount of bytes that we need for this
4796                  * reservation.
4797                  */
4798                 num_bytes = used - space_info->total_bytes +
4799                         (orig_bytes * 2);
4800         }
4801
4802         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4803                 space_info->bytes_may_use += orig_bytes;
4804                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4805                                               space_info->flags, orig_bytes,
4806                                               1);
4807                 ret = 0;
4808         }
4809
4810         /*
4811          * Couldn't make our reservation, save our place so while we're trying
4812          * to reclaim space we can actually use it instead of somebody else
4813          * stealing it from us.
4814          *
4815          * We make the other tasks wait for the flush only when we can flush
4816          * all things.
4817          */
4818         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4819                 flushing = true;
4820                 space_info->flush = 1;
4821         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4822                 used += orig_bytes;
4823                 /*
4824                  * We will do the space reservation dance during log replay,
4825                  * which means we won't have fs_info->fs_root set, so don't do
4826                  * the async reclaim as we will panic.
4827                  */
4828                 if (!root->fs_info->log_root_recovering &&
4829                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4830                     !work_busy(&root->fs_info->async_reclaim_work))
4831                         queue_work(system_unbound_wq,
4832                                    &root->fs_info->async_reclaim_work);
4833         }
4834         spin_unlock(&space_info->lock);
4835
4836         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4837                 goto out;
4838
4839         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4840                           flush_state);
4841         flush_state++;
4842
4843         /*
4844          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4845          * would happen. So skip delalloc flush.
4846          */
4847         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4848             (flush_state == FLUSH_DELALLOC ||
4849              flush_state == FLUSH_DELALLOC_WAIT))
4850                 flush_state = ALLOC_CHUNK;
4851
4852         if (!ret)
4853                 goto again;
4854         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4855                  flush_state < COMMIT_TRANS)
4856                 goto again;
4857         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4858                  flush_state <= COMMIT_TRANS)
4859                 goto again;
4860
4861 out:
4862         if (ret == -ENOSPC &&
4863             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4864                 struct btrfs_block_rsv *global_rsv =
4865                         &root->fs_info->global_block_rsv;
4866
4867                 if (block_rsv != global_rsv &&
4868                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4869                         ret = 0;
4870         }
4871         if (ret == -ENOSPC)
4872                 trace_btrfs_space_reservation(root->fs_info,
4873                                               "space_info:enospc",
4874                                               space_info->flags, orig_bytes, 1);
4875         if (flushing) {
4876                 spin_lock(&space_info->lock);
4877                 space_info->flush = 0;
4878                 wake_up_all(&space_info->wait);
4879                 spin_unlock(&space_info->lock);
4880         }
4881         return ret;
4882 }
4883
4884 static struct btrfs_block_rsv *get_block_rsv(
4885                                         const struct btrfs_trans_handle *trans,
4886                                         const struct btrfs_root *root)
4887 {
4888         struct btrfs_block_rsv *block_rsv = NULL;
4889
4890         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4891                 block_rsv = trans->block_rsv;
4892
4893         if (root == root->fs_info->csum_root && trans->adding_csums)
4894                 block_rsv = trans->block_rsv;
4895
4896         if (root == root->fs_info->uuid_root)
4897                 block_rsv = trans->block_rsv;
4898
4899         if (!block_rsv)
4900                 block_rsv = root->block_rsv;
4901
4902         if (!block_rsv)
4903                 block_rsv = &root->fs_info->empty_block_rsv;
4904
4905         return block_rsv;
4906 }
4907
4908 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4909                                u64 num_bytes)
4910 {
4911         int ret = -ENOSPC;
4912         spin_lock(&block_rsv->lock);
4913         if (block_rsv->reserved >= num_bytes) {
4914                 block_rsv->reserved -= num_bytes;
4915                 if (block_rsv->reserved < block_rsv->size)
4916                         block_rsv->full = 0;
4917                 ret = 0;
4918         }
4919         spin_unlock(&block_rsv->lock);
4920         return ret;
4921 }
4922
4923 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4924                                 u64 num_bytes, int update_size)
4925 {
4926         spin_lock(&block_rsv->lock);
4927         block_rsv->reserved += num_bytes;
4928         if (update_size)
4929                 block_rsv->size += num_bytes;
4930         else if (block_rsv->reserved >= block_rsv->size)
4931                 block_rsv->full = 1;
4932         spin_unlock(&block_rsv->lock);
4933 }
4934
4935 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4936                              struct btrfs_block_rsv *dest, u64 num_bytes,
4937                              int min_factor)
4938 {
4939         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4940         u64 min_bytes;
4941
4942         if (global_rsv->space_info != dest->space_info)
4943                 return -ENOSPC;
4944
4945         spin_lock(&global_rsv->lock);
4946         min_bytes = div_factor(global_rsv->size, min_factor);
4947         if (global_rsv->reserved < min_bytes + num_bytes) {
4948                 spin_unlock(&global_rsv->lock);
4949                 return -ENOSPC;
4950         }
4951         global_rsv->reserved -= num_bytes;
4952         if (global_rsv->reserved < global_rsv->size)
4953                 global_rsv->full = 0;
4954         spin_unlock(&global_rsv->lock);
4955
4956         block_rsv_add_bytes(dest, num_bytes, 1);
4957         return 0;
4958 }
4959
4960 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4961                                     struct btrfs_block_rsv *block_rsv,
4962                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4963 {
4964         struct btrfs_space_info *space_info = block_rsv->space_info;
4965
4966         spin_lock(&block_rsv->lock);
4967         if (num_bytes == (u64)-1)
4968                 num_bytes = block_rsv->size;
4969         block_rsv->size -= num_bytes;
4970         if (block_rsv->reserved >= block_rsv->size) {
4971                 num_bytes = block_rsv->reserved - block_rsv->size;
4972                 block_rsv->reserved = block_rsv->size;
4973                 block_rsv->full = 1;
4974         } else {
4975                 num_bytes = 0;
4976         }
4977         spin_unlock(&block_rsv->lock);
4978
4979         if (num_bytes > 0) {
4980                 if (dest) {
4981                         spin_lock(&dest->lock);
4982                         if (!dest->full) {
4983                                 u64 bytes_to_add;
4984
4985                                 bytes_to_add = dest->size - dest->reserved;
4986                                 bytes_to_add = min(num_bytes, bytes_to_add);
4987                                 dest->reserved += bytes_to_add;
4988                                 if (dest->reserved >= dest->size)
4989                                         dest->full = 1;
4990                                 num_bytes -= bytes_to_add;
4991                         }
4992                         spin_unlock(&dest->lock);
4993                 }
4994                 if (num_bytes) {
4995                         spin_lock(&space_info->lock);
4996                         space_info->bytes_may_use -= num_bytes;
4997                         trace_btrfs_space_reservation(fs_info, "space_info",
4998                                         space_info->flags, num_bytes, 0);
4999                         spin_unlock(&space_info->lock);
5000                 }
5001         }
5002 }
5003
5004 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
5005                                    struct btrfs_block_rsv *dst, u64 num_bytes)
5006 {
5007         int ret;
5008
5009         ret = block_rsv_use_bytes(src, num_bytes);
5010         if (ret)
5011                 return ret;
5012
5013         block_rsv_add_bytes(dst, num_bytes, 1);
5014         return 0;
5015 }
5016
5017 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5018 {
5019         memset(rsv, 0, sizeof(*rsv));
5020         spin_lock_init(&rsv->lock);
5021         rsv->type = type;
5022 }
5023
5024 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5025                                               unsigned short type)
5026 {
5027         struct btrfs_block_rsv *block_rsv;
5028         struct btrfs_fs_info *fs_info = root->fs_info;
5029
5030         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5031         if (!block_rsv)
5032                 return NULL;
5033
5034         btrfs_init_block_rsv(block_rsv, type);
5035         block_rsv->space_info = __find_space_info(fs_info,
5036                                                   BTRFS_BLOCK_GROUP_METADATA);
5037         return block_rsv;
5038 }
5039
5040 void btrfs_free_block_rsv(struct btrfs_root *root,
5041                           struct btrfs_block_rsv *rsv)
5042 {
5043         if (!rsv)
5044                 return;
5045         btrfs_block_rsv_release(root, rsv, (u64)-1);
5046         kfree(rsv);
5047 }
5048
5049 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5050 {
5051         kfree(rsv);
5052 }
5053
5054 int btrfs_block_rsv_add(struct btrfs_root *root,
5055                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5056                         enum btrfs_reserve_flush_enum flush)
5057 {
5058         int ret;
5059
5060         if (num_bytes == 0)
5061                 return 0;
5062
5063         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5064         if (!ret) {
5065                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5066                 return 0;
5067         }
5068
5069         return ret;
5070 }
5071
5072 int btrfs_block_rsv_check(struct btrfs_root *root,
5073                           struct btrfs_block_rsv *block_rsv, int min_factor)
5074 {
5075         u64 num_bytes = 0;
5076         int ret = -ENOSPC;
5077
5078         if (!block_rsv)
5079                 return 0;
5080
5081         spin_lock(&block_rsv->lock);
5082         num_bytes = div_factor(block_rsv->size, min_factor);
5083         if (block_rsv->reserved >= num_bytes)
5084                 ret = 0;
5085         spin_unlock(&block_rsv->lock);
5086
5087         return ret;
5088 }
5089
5090 int btrfs_block_rsv_refill(struct btrfs_root *root,
5091                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5092                            enum btrfs_reserve_flush_enum flush)
5093 {
5094         u64 num_bytes = 0;
5095         int ret = -ENOSPC;
5096
5097         if (!block_rsv)
5098                 return 0;
5099
5100         spin_lock(&block_rsv->lock);
5101         num_bytes = min_reserved;
5102         if (block_rsv->reserved >= num_bytes)
5103                 ret = 0;
5104         else
5105                 num_bytes -= block_rsv->reserved;
5106         spin_unlock(&block_rsv->lock);
5107
5108         if (!ret)
5109                 return 0;
5110
5111         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5112         if (!ret) {
5113                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5114                 return 0;
5115         }
5116
5117         return ret;
5118 }
5119
5120 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5121                             struct btrfs_block_rsv *dst_rsv,
5122                             u64 num_bytes)
5123 {
5124         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5125 }
5126
5127 void btrfs_block_rsv_release(struct btrfs_root *root,
5128                              struct btrfs_block_rsv *block_rsv,
5129                              u64 num_bytes)
5130 {
5131         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5132         if (global_rsv == block_rsv ||
5133             block_rsv->space_info != global_rsv->space_info)
5134                 global_rsv = NULL;
5135         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5136                                 num_bytes);
5137 }
5138
5139 /*
5140  * helper to calculate size of global block reservation.
5141  * the desired value is sum of space used by extent tree,
5142  * checksum tree and root tree
5143  */
5144 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5145 {
5146         struct btrfs_space_info *sinfo;
5147         u64 num_bytes;
5148         u64 meta_used;
5149         u64 data_used;
5150         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5151
5152         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5153         spin_lock(&sinfo->lock);
5154         data_used = sinfo->bytes_used;
5155         spin_unlock(&sinfo->lock);
5156
5157         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5158         spin_lock(&sinfo->lock);
5159         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5160                 data_used = 0;
5161         meta_used = sinfo->bytes_used;
5162         spin_unlock(&sinfo->lock);
5163
5164         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5165                     csum_size * 2;
5166         num_bytes += div_u64(data_used + meta_used, 50);
5167
5168         if (num_bytes * 3 > meta_used)
5169                 num_bytes = div_u64(meta_used, 3);
5170
5171         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5172 }
5173
5174 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5175 {
5176         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5177         struct btrfs_space_info *sinfo = block_rsv->space_info;
5178         u64 num_bytes;
5179
5180         num_bytes = calc_global_metadata_size(fs_info);
5181
5182         spin_lock(&sinfo->lock);
5183         spin_lock(&block_rsv->lock);
5184
5185         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
5186
5187         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5188                     sinfo->bytes_reserved + sinfo->bytes_readonly +
5189                     sinfo->bytes_may_use;
5190
5191         if (sinfo->total_bytes > num_bytes) {
5192                 num_bytes = sinfo->total_bytes - num_bytes;
5193                 block_rsv->reserved += num_bytes;
5194                 sinfo->bytes_may_use += num_bytes;
5195                 trace_btrfs_space_reservation(fs_info, "space_info",
5196                                       sinfo->flags, num_bytes, 1);
5197         }
5198
5199         if (block_rsv->reserved >= block_rsv->size) {
5200                 num_bytes = block_rsv->reserved - block_rsv->size;
5201                 sinfo->bytes_may_use -= num_bytes;
5202                 trace_btrfs_space_reservation(fs_info, "space_info",
5203                                       sinfo->flags, num_bytes, 0);
5204                 block_rsv->reserved = block_rsv->size;
5205                 block_rsv->full = 1;
5206         }
5207
5208         spin_unlock(&block_rsv->lock);
5209         spin_unlock(&sinfo->lock);
5210 }
5211
5212 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5213 {
5214         struct btrfs_space_info *space_info;
5215
5216         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5217         fs_info->chunk_block_rsv.space_info = space_info;
5218
5219         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5220         fs_info->global_block_rsv.space_info = space_info;
5221         fs_info->delalloc_block_rsv.space_info = space_info;
5222         fs_info->trans_block_rsv.space_info = space_info;
5223         fs_info->empty_block_rsv.space_info = space_info;
5224         fs_info->delayed_block_rsv.space_info = space_info;
5225
5226         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5227         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5228         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5229         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5230         if (fs_info->quota_root)
5231                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5232         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5233
5234         update_global_block_rsv(fs_info);
5235 }
5236
5237 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5238 {
5239         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5240                                 (u64)-1);
5241         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5242         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5243         WARN_ON(fs_info->trans_block_rsv.size > 0);
5244         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5245         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5246         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5247         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5248         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5249 }
5250
5251 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5252                                   struct btrfs_root *root)
5253 {
5254         if (!trans->block_rsv)
5255                 return;
5256
5257         if (!trans->bytes_reserved)
5258                 return;
5259
5260         trace_btrfs_space_reservation(root->fs_info, "transaction",
5261                                       trans->transid, trans->bytes_reserved, 0);
5262         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5263         trans->bytes_reserved = 0;
5264 }
5265
5266 /*
5267  * To be called after all the new block groups attached to the transaction
5268  * handle have been created (btrfs_create_pending_block_groups()).
5269  */
5270 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5271 {
5272         struct btrfs_fs_info *fs_info = trans->root->fs_info;
5273
5274         if (!trans->chunk_bytes_reserved)
5275                 return;
5276
5277         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5278
5279         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5280                                 trans->chunk_bytes_reserved);
5281         trans->chunk_bytes_reserved = 0;
5282 }
5283
5284 /* Can only return 0 or -ENOSPC */
5285 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5286                                   struct inode *inode)
5287 {
5288         struct btrfs_root *root = BTRFS_I(inode)->root;
5289         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5290         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5291
5292         /*
5293          * We need to hold space in order to delete our orphan item once we've
5294          * added it, so this takes the reservation so we can release it later
5295          * when we are truly done with the orphan item.
5296          */
5297         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5298         trace_btrfs_space_reservation(root->fs_info, "orphan",
5299                                       btrfs_ino(inode), num_bytes, 1);
5300         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5301 }
5302
5303 void btrfs_orphan_release_metadata(struct inode *inode)
5304 {
5305         struct btrfs_root *root = BTRFS_I(inode)->root;
5306         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5307         trace_btrfs_space_reservation(root->fs_info, "orphan",
5308                                       btrfs_ino(inode), num_bytes, 0);
5309         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5310 }
5311
5312 /*
5313  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5314  * root: the root of the parent directory
5315  * rsv: block reservation
5316  * items: the number of items that we need do reservation
5317  * qgroup_reserved: used to return the reserved size in qgroup
5318  *
5319  * This function is used to reserve the space for snapshot/subvolume
5320  * creation and deletion. Those operations are different with the
5321  * common file/directory operations, they change two fs/file trees
5322  * and root tree, the number of items that the qgroup reserves is
5323  * different with the free space reservation. So we can not use
5324  * the space reseravtion mechanism in start_transaction().
5325  */
5326 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5327                                      struct btrfs_block_rsv *rsv,
5328                                      int items,
5329                                      u64 *qgroup_reserved,
5330                                      bool use_global_rsv)
5331 {
5332         u64 num_bytes;
5333         int ret;
5334         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5335
5336         if (root->fs_info->quota_enabled) {
5337                 /* One for parent inode, two for dir entries */
5338                 num_bytes = 3 * root->nodesize;
5339                 ret = btrfs_qgroup_reserve(root, num_bytes);
5340                 if (ret)
5341                         return ret;
5342         } else {
5343                 num_bytes = 0;
5344         }
5345
5346         *qgroup_reserved = num_bytes;
5347
5348         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5349         rsv->space_info = __find_space_info(root->fs_info,
5350                                             BTRFS_BLOCK_GROUP_METADATA);
5351         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5352                                   BTRFS_RESERVE_FLUSH_ALL);
5353
5354         if (ret == -ENOSPC && use_global_rsv)
5355                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5356
5357         if (ret) {
5358                 if (*qgroup_reserved)
5359                         btrfs_qgroup_free(root, *qgroup_reserved);
5360         }
5361
5362         return ret;
5363 }
5364
5365 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5366                                       struct btrfs_block_rsv *rsv,
5367                                       u64 qgroup_reserved)
5368 {
5369         btrfs_block_rsv_release(root, rsv, (u64)-1);
5370 }
5371
5372 /**
5373  * drop_outstanding_extent - drop an outstanding extent
5374  * @inode: the inode we're dropping the extent for
5375  * @num_bytes: the number of bytes we're relaseing.
5376  *
5377  * This is called when we are freeing up an outstanding extent, either called
5378  * after an error or after an extent is written.  This will return the number of
5379  * reserved extents that need to be freed.  This must be called with
5380  * BTRFS_I(inode)->lock held.
5381  */
5382 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5383 {
5384         unsigned drop_inode_space = 0;
5385         unsigned dropped_extents = 0;
5386         unsigned num_extents = 0;
5387
5388         num_extents = (unsigned)div64_u64(num_bytes +
5389                                           BTRFS_MAX_EXTENT_SIZE - 1,
5390                                           BTRFS_MAX_EXTENT_SIZE);
5391         ASSERT(num_extents);
5392         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5393         BTRFS_I(inode)->outstanding_extents -= num_extents;
5394
5395         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5396             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5397                                &BTRFS_I(inode)->runtime_flags))
5398                 drop_inode_space = 1;
5399
5400         /*
5401          * If we have more or the same amount of outsanding extents than we have
5402          * reserved then we need to leave the reserved extents count alone.
5403          */
5404         if (BTRFS_I(inode)->outstanding_extents >=
5405             BTRFS_I(inode)->reserved_extents)
5406                 return drop_inode_space;
5407
5408         dropped_extents = BTRFS_I(inode)->reserved_extents -
5409                 BTRFS_I(inode)->outstanding_extents;
5410         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5411         return dropped_extents + drop_inode_space;
5412 }
5413
5414 /**
5415  * calc_csum_metadata_size - return the amount of metada space that must be
5416  *      reserved/free'd for the given bytes.
5417  * @inode: the inode we're manipulating
5418  * @num_bytes: the number of bytes in question
5419  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5420  *
5421  * This adjusts the number of csum_bytes in the inode and then returns the
5422  * correct amount of metadata that must either be reserved or freed.  We
5423  * calculate how many checksums we can fit into one leaf and then divide the
5424  * number of bytes that will need to be checksumed by this value to figure out
5425  * how many checksums will be required.  If we are adding bytes then the number
5426  * may go up and we will return the number of additional bytes that must be
5427  * reserved.  If it is going down we will return the number of bytes that must
5428  * be freed.
5429  *
5430  * This must be called with BTRFS_I(inode)->lock held.
5431  */
5432 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5433                                    int reserve)
5434 {
5435         struct btrfs_root *root = BTRFS_I(inode)->root;
5436         u64 old_csums, num_csums;
5437
5438         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5439             BTRFS_I(inode)->csum_bytes == 0)
5440                 return 0;
5441
5442         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5443         if (reserve)
5444                 BTRFS_I(inode)->csum_bytes += num_bytes;
5445         else
5446                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5447         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5448
5449         /* No change, no need to reserve more */
5450         if (old_csums == num_csums)
5451                 return 0;
5452
5453         if (reserve)
5454                 return btrfs_calc_trans_metadata_size(root,
5455                                                       num_csums - old_csums);
5456
5457         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5458 }
5459
5460 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5461 {
5462         struct btrfs_root *root = BTRFS_I(inode)->root;
5463         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5464         u64 to_reserve = 0;
5465         u64 csum_bytes;
5466         unsigned nr_extents = 0;
5467         int extra_reserve = 0;
5468         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5469         int ret = 0;
5470         bool delalloc_lock = true;
5471         u64 to_free = 0;
5472         unsigned dropped;
5473
5474         /* If we are a free space inode we need to not flush since we will be in
5475          * the middle of a transaction commit.  We also don't need the delalloc
5476          * mutex since we won't race with anybody.  We need this mostly to make
5477          * lockdep shut its filthy mouth.
5478          */
5479         if (btrfs_is_free_space_inode(inode)) {
5480                 flush = BTRFS_RESERVE_NO_FLUSH;
5481                 delalloc_lock = false;
5482         }
5483
5484         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5485             btrfs_transaction_in_commit(root->fs_info))
5486                 schedule_timeout(1);
5487
5488         if (delalloc_lock)
5489                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5490
5491         num_bytes = ALIGN(num_bytes, root->sectorsize);
5492
5493         spin_lock(&BTRFS_I(inode)->lock);
5494         nr_extents = (unsigned)div64_u64(num_bytes +
5495                                          BTRFS_MAX_EXTENT_SIZE - 1,
5496                                          BTRFS_MAX_EXTENT_SIZE);
5497         BTRFS_I(inode)->outstanding_extents += nr_extents;
5498         nr_extents = 0;
5499
5500         if (BTRFS_I(inode)->outstanding_extents >
5501             BTRFS_I(inode)->reserved_extents)
5502                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5503                         BTRFS_I(inode)->reserved_extents;
5504
5505         /*
5506          * Add an item to reserve for updating the inode when we complete the
5507          * delalloc io.
5508          */
5509         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5510                       &BTRFS_I(inode)->runtime_flags)) {
5511                 nr_extents++;
5512                 extra_reserve = 1;
5513         }
5514
5515         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5516         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5517         csum_bytes = BTRFS_I(inode)->csum_bytes;
5518         spin_unlock(&BTRFS_I(inode)->lock);
5519
5520         if (root->fs_info->quota_enabled) {
5521                 ret = btrfs_qgroup_reserve(root, nr_extents * root->nodesize);
5522                 if (ret)
5523                         goto out_fail;
5524         }
5525
5526         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5527         if (unlikely(ret)) {
5528                 if (root->fs_info->quota_enabled)
5529                         btrfs_qgroup_free(root, nr_extents * root->nodesize);
5530                 goto out_fail;
5531         }
5532
5533         spin_lock(&BTRFS_I(inode)->lock);
5534         if (extra_reserve) {
5535                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5536                         &BTRFS_I(inode)->runtime_flags);
5537                 nr_extents--;
5538         }
5539         BTRFS_I(inode)->reserved_extents += nr_extents;
5540         spin_unlock(&BTRFS_I(inode)->lock);
5541
5542         if (delalloc_lock)
5543                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5544
5545         if (to_reserve)
5546                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5547                                               btrfs_ino(inode), to_reserve, 1);
5548         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5549
5550         return 0;
5551
5552 out_fail:
5553         spin_lock(&BTRFS_I(inode)->lock);
5554         dropped = drop_outstanding_extent(inode, num_bytes);
5555         /*
5556          * If the inodes csum_bytes is the same as the original
5557          * csum_bytes then we know we haven't raced with any free()ers
5558          * so we can just reduce our inodes csum bytes and carry on.
5559          */
5560         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5561                 calc_csum_metadata_size(inode, num_bytes, 0);
5562         } else {
5563                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5564                 u64 bytes;
5565
5566                 /*
5567                  * This is tricky, but first we need to figure out how much we
5568                  * free'd from any free-ers that occured during this
5569                  * reservation, so we reset ->csum_bytes to the csum_bytes
5570                  * before we dropped our lock, and then call the free for the
5571                  * number of bytes that were freed while we were trying our
5572                  * reservation.
5573                  */
5574                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5575                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5576                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5577
5578
5579                 /*
5580                  * Now we need to see how much we would have freed had we not
5581                  * been making this reservation and our ->csum_bytes were not
5582                  * artificially inflated.
5583                  */
5584                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5585                 bytes = csum_bytes - orig_csum_bytes;
5586                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5587
5588                 /*
5589                  * Now reset ->csum_bytes to what it should be.  If bytes is
5590                  * more than to_free then we would have free'd more space had we
5591                  * not had an artificially high ->csum_bytes, so we need to free
5592                  * the remainder.  If bytes is the same or less then we don't
5593                  * need to do anything, the other free-ers did the correct
5594                  * thing.
5595                  */
5596                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5597                 if (bytes > to_free)
5598                         to_free = bytes - to_free;
5599                 else
5600                         to_free = 0;
5601         }
5602         spin_unlock(&BTRFS_I(inode)->lock);
5603         if (dropped)
5604                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5605
5606         if (to_free) {
5607                 btrfs_block_rsv_release(root, block_rsv, to_free);
5608                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5609                                               btrfs_ino(inode), to_free, 0);
5610         }
5611         if (delalloc_lock)
5612                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5613         return ret;
5614 }
5615
5616 /**
5617  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5618  * @inode: the inode to release the reservation for
5619  * @num_bytes: the number of bytes we're releasing
5620  *
5621  * This will release the metadata reservation for an inode.  This can be called
5622  * once we complete IO for a given set of bytes to release their metadata
5623  * reservations.
5624  */
5625 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5626 {
5627         struct btrfs_root *root = BTRFS_I(inode)->root;
5628         u64 to_free = 0;
5629         unsigned dropped;
5630
5631         num_bytes = ALIGN(num_bytes, root->sectorsize);
5632         spin_lock(&BTRFS_I(inode)->lock);
5633         dropped = drop_outstanding_extent(inode, num_bytes);
5634
5635         if (num_bytes)
5636                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5637         spin_unlock(&BTRFS_I(inode)->lock);
5638         if (dropped > 0)
5639                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5640
5641         if (btrfs_test_is_dummy_root(root))
5642                 return;
5643
5644         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5645                                       btrfs_ino(inode), to_free, 0);
5646
5647         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5648                                 to_free);
5649 }
5650
5651 /**
5652  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5653  * @inode: inode we're writing to
5654  * @num_bytes: the number of bytes we want to allocate
5655  *
5656  * This will do the following things
5657  *
5658  * o reserve space in the data space info for num_bytes
5659  * o reserve space in the metadata space info based on number of outstanding
5660  *   extents and how much csums will be needed
5661  * o add to the inodes ->delalloc_bytes
5662  * o add it to the fs_info's delalloc inodes list.
5663  *
5664  * This will return 0 for success and -ENOSPC if there is no space left.
5665  */
5666 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5667 {
5668         int ret;
5669
5670         ret = btrfs_check_data_free_space(inode, num_bytes, num_bytes);
5671         if (ret)
5672                 return ret;
5673
5674         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5675         if (ret) {
5676                 btrfs_free_reserved_data_space(inode, num_bytes);
5677                 return ret;
5678         }
5679
5680         return 0;
5681 }
5682
5683 /**
5684  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5685  * @inode: inode we're releasing space for
5686  * @num_bytes: the number of bytes we want to free up
5687  *
5688  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5689  * called in the case that we don't need the metadata AND data reservations
5690  * anymore.  So if there is an error or we insert an inline extent.
5691  *
5692  * This function will release the metadata space that was not used and will
5693  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5694  * list if there are no delalloc bytes left.
5695  */
5696 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5697 {
5698         btrfs_delalloc_release_metadata(inode, num_bytes);
5699         btrfs_free_reserved_data_space(inode, num_bytes);
5700 }
5701
5702 static int update_block_group(struct btrfs_trans_handle *trans,
5703                               struct btrfs_root *root, u64 bytenr,
5704                               u64 num_bytes, int alloc)
5705 {
5706         struct btrfs_block_group_cache *cache = NULL;
5707         struct btrfs_fs_info *info = root->fs_info;
5708         u64 total = num_bytes;
5709         u64 old_val;
5710         u64 byte_in_group;
5711         int factor;
5712
5713         /* block accounting for super block */
5714         spin_lock(&info->delalloc_root_lock);
5715         old_val = btrfs_super_bytes_used(info->super_copy);
5716         if (alloc)
5717                 old_val += num_bytes;
5718         else
5719                 old_val -= num_bytes;
5720         btrfs_set_super_bytes_used(info->super_copy, old_val);
5721         spin_unlock(&info->delalloc_root_lock);
5722
5723         while (total) {
5724                 cache = btrfs_lookup_block_group(info, bytenr);
5725                 if (!cache)
5726                         return -ENOENT;
5727                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5728                                     BTRFS_BLOCK_GROUP_RAID1 |
5729                                     BTRFS_BLOCK_GROUP_RAID10))
5730                         factor = 2;
5731                 else
5732                         factor = 1;
5733                 /*
5734                  * If this block group has free space cache written out, we
5735                  * need to make sure to load it if we are removing space.  This
5736                  * is because we need the unpinning stage to actually add the
5737                  * space back to the block group, otherwise we will leak space.
5738                  */
5739                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5740                         cache_block_group(cache, 1);
5741
5742                 byte_in_group = bytenr - cache->key.objectid;
5743                 WARN_ON(byte_in_group > cache->key.offset);
5744
5745                 spin_lock(&cache->space_info->lock);
5746                 spin_lock(&cache->lock);
5747
5748                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5749                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5750                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5751
5752                 old_val = btrfs_block_group_used(&cache->item);
5753                 num_bytes = min(total, cache->key.offset - byte_in_group);
5754                 if (alloc) {
5755                         old_val += num_bytes;
5756                         btrfs_set_block_group_used(&cache->item, old_val);
5757                         cache->reserved -= num_bytes;
5758                         cache->space_info->bytes_reserved -= num_bytes;
5759                         cache->space_info->bytes_used += num_bytes;
5760                         cache->space_info->disk_used += num_bytes * factor;
5761                         spin_unlock(&cache->lock);
5762                         spin_unlock(&cache->space_info->lock);
5763                 } else {
5764                         old_val -= num_bytes;
5765                         btrfs_set_block_group_used(&cache->item, old_val);
5766                         cache->pinned += num_bytes;
5767                         cache->space_info->bytes_pinned += num_bytes;
5768                         cache->space_info->bytes_used -= num_bytes;
5769                         cache->space_info->disk_used -= num_bytes * factor;
5770                         spin_unlock(&cache->lock);
5771                         spin_unlock(&cache->space_info->lock);
5772
5773                         set_extent_dirty(info->pinned_extents,
5774                                          bytenr, bytenr + num_bytes - 1,
5775                                          GFP_NOFS | __GFP_NOFAIL);
5776                         /*
5777                          * No longer have used bytes in this block group, queue
5778                          * it for deletion.
5779                          */
5780                         if (old_val == 0) {
5781                                 spin_lock(&info->unused_bgs_lock);
5782                                 if (list_empty(&cache->bg_list)) {
5783                                         btrfs_get_block_group(cache);
5784                                         list_add_tail(&cache->bg_list,
5785                                                       &info->unused_bgs);
5786                                 }
5787                                 spin_unlock(&info->unused_bgs_lock);
5788                         }
5789                 }
5790
5791                 spin_lock(&trans->transaction->dirty_bgs_lock);
5792                 if (list_empty(&cache->dirty_list)) {
5793                         list_add_tail(&cache->dirty_list,
5794                                       &trans->transaction->dirty_bgs);
5795                                 trans->transaction->num_dirty_bgs++;
5796                         btrfs_get_block_group(cache);
5797                 }
5798                 spin_unlock(&trans->transaction->dirty_bgs_lock);
5799
5800                 btrfs_put_block_group(cache);
5801                 total -= num_bytes;
5802                 bytenr += num_bytes;
5803         }
5804         return 0;
5805 }
5806
5807 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5808 {
5809         struct btrfs_block_group_cache *cache;
5810         u64 bytenr;
5811
5812         spin_lock(&root->fs_info->block_group_cache_lock);
5813         bytenr = root->fs_info->first_logical_byte;
5814         spin_unlock(&root->fs_info->block_group_cache_lock);
5815
5816         if (bytenr < (u64)-1)
5817                 return bytenr;
5818
5819         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5820         if (!cache)
5821                 return 0;
5822
5823         bytenr = cache->key.objectid;
5824         btrfs_put_block_group(cache);
5825
5826         return bytenr;
5827 }
5828
5829 static int pin_down_extent(struct btrfs_root *root,
5830                            struct btrfs_block_group_cache *cache,
5831                            u64 bytenr, u64 num_bytes, int reserved)
5832 {
5833         spin_lock(&cache->space_info->lock);
5834         spin_lock(&cache->lock);
5835         cache->pinned += num_bytes;
5836         cache->space_info->bytes_pinned += num_bytes;
5837         if (reserved) {
5838                 cache->reserved -= num_bytes;
5839                 cache->space_info->bytes_reserved -= num_bytes;
5840         }
5841         spin_unlock(&cache->lock);
5842         spin_unlock(&cache->space_info->lock);
5843
5844         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5845                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5846         if (reserved)
5847                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5848         return 0;
5849 }
5850
5851 /*
5852  * this function must be called within transaction
5853  */
5854 int btrfs_pin_extent(struct btrfs_root *root,
5855                      u64 bytenr, u64 num_bytes, int reserved)
5856 {
5857         struct btrfs_block_group_cache *cache;
5858
5859         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5860         BUG_ON(!cache); /* Logic error */
5861
5862         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5863
5864         btrfs_put_block_group(cache);
5865         return 0;
5866 }
5867
5868 /*
5869  * this function must be called within transaction
5870  */
5871 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5872                                     u64 bytenr, u64 num_bytes)
5873 {
5874         struct btrfs_block_group_cache *cache;
5875         int ret;
5876
5877         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5878         if (!cache)
5879                 return -EINVAL;
5880
5881         /*
5882          * pull in the free space cache (if any) so that our pin
5883          * removes the free space from the cache.  We have load_only set
5884          * to one because the slow code to read in the free extents does check
5885          * the pinned extents.
5886          */
5887         cache_block_group(cache, 1);
5888
5889         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5890
5891         /* remove us from the free space cache (if we're there at all) */
5892         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5893         btrfs_put_block_group(cache);
5894         return ret;
5895 }
5896
5897 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5898 {
5899         int ret;
5900         struct btrfs_block_group_cache *block_group;
5901         struct btrfs_caching_control *caching_ctl;
5902
5903         block_group = btrfs_lookup_block_group(root->fs_info, start);
5904         if (!block_group)
5905                 return -EINVAL;
5906
5907         cache_block_group(block_group, 0);
5908         caching_ctl = get_caching_control(block_group);
5909
5910         if (!caching_ctl) {
5911                 /* Logic error */
5912                 BUG_ON(!block_group_cache_done(block_group));
5913                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5914         } else {
5915                 mutex_lock(&caching_ctl->mutex);
5916
5917                 if (start >= caching_ctl->progress) {
5918                         ret = add_excluded_extent(root, start, num_bytes);
5919                 } else if (start + num_bytes <= caching_ctl->progress) {
5920                         ret = btrfs_remove_free_space(block_group,
5921                                                       start, num_bytes);
5922                 } else {
5923                         num_bytes = caching_ctl->progress - start;
5924                         ret = btrfs_remove_free_space(block_group,
5925                                                       start, num_bytes);
5926                         if (ret)
5927                                 goto out_lock;
5928
5929                         num_bytes = (start + num_bytes) -
5930                                 caching_ctl->progress;
5931                         start = caching_ctl->progress;
5932                         ret = add_excluded_extent(root, start, num_bytes);
5933                 }
5934 out_lock:
5935                 mutex_unlock(&caching_ctl->mutex);
5936                 put_caching_control(caching_ctl);
5937         }
5938         btrfs_put_block_group(block_group);
5939         return ret;
5940 }
5941
5942 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5943                                  struct extent_buffer *eb)
5944 {
5945         struct btrfs_file_extent_item *item;
5946         struct btrfs_key key;
5947         int found_type;
5948         int i;
5949
5950         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5951                 return 0;
5952
5953         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5954                 btrfs_item_key_to_cpu(eb, &key, i);
5955                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5956                         continue;
5957                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5958                 found_type = btrfs_file_extent_type(eb, item);
5959                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5960                         continue;
5961                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5962                         continue;
5963                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5964                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5965                 __exclude_logged_extent(log, key.objectid, key.offset);
5966         }
5967
5968         return 0;
5969 }
5970
5971 /**
5972  * btrfs_update_reserved_bytes - update the block_group and space info counters
5973  * @cache:      The cache we are manipulating
5974  * @num_bytes:  The number of bytes in question
5975  * @reserve:    One of the reservation enums
5976  * @delalloc:   The blocks are allocated for the delalloc write
5977  *
5978  * This is called by the allocator when it reserves space, or by somebody who is
5979  * freeing space that was never actually used on disk.  For example if you
5980  * reserve some space for a new leaf in transaction A and before transaction A
5981  * commits you free that leaf, you call this with reserve set to 0 in order to
5982  * clear the reservation.
5983  *
5984  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5985  * ENOSPC accounting.  For data we handle the reservation through clearing the
5986  * delalloc bits in the io_tree.  We have to do this since we could end up
5987  * allocating less disk space for the amount of data we have reserved in the
5988  * case of compression.
5989  *
5990  * If this is a reservation and the block group has become read only we cannot
5991  * make the reservation and return -EAGAIN, otherwise this function always
5992  * succeeds.
5993  */
5994 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5995                                        u64 num_bytes, int reserve, int delalloc)
5996 {
5997         struct btrfs_space_info *space_info = cache->space_info;
5998         int ret = 0;
5999
6000         spin_lock(&space_info->lock);
6001         spin_lock(&cache->lock);
6002         if (reserve != RESERVE_FREE) {
6003                 if (cache->ro) {
6004                         ret = -EAGAIN;
6005                 } else {
6006                         cache->reserved += num_bytes;
6007                         space_info->bytes_reserved += num_bytes;
6008                         if (reserve == RESERVE_ALLOC) {
6009                                 trace_btrfs_space_reservation(cache->fs_info,
6010                                                 "space_info", space_info->flags,
6011                                                 num_bytes, 0);
6012                                 space_info->bytes_may_use -= num_bytes;
6013                         }
6014
6015                         if (delalloc)
6016                                 cache->delalloc_bytes += num_bytes;
6017                 }
6018         } else {
6019                 if (cache->ro)
6020                         space_info->bytes_readonly += num_bytes;
6021                 cache->reserved -= num_bytes;
6022                 space_info->bytes_reserved -= num_bytes;
6023
6024                 if (delalloc)
6025                         cache->delalloc_bytes -= num_bytes;
6026         }
6027         spin_unlock(&cache->lock);
6028         spin_unlock(&space_info->lock);
6029         return ret;
6030 }
6031
6032 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6033                                 struct btrfs_root *root)
6034 {
6035         struct btrfs_fs_info *fs_info = root->fs_info;
6036         struct btrfs_caching_control *next;
6037         struct btrfs_caching_control *caching_ctl;
6038         struct btrfs_block_group_cache *cache;
6039
6040         down_write(&fs_info->commit_root_sem);
6041
6042         list_for_each_entry_safe(caching_ctl, next,
6043                                  &fs_info->caching_block_groups, list) {
6044                 cache = caching_ctl->block_group;
6045                 if (block_group_cache_done(cache)) {
6046                         cache->last_byte_to_unpin = (u64)-1;
6047                         list_del_init(&caching_ctl->list);
6048                         put_caching_control(caching_ctl);
6049                 } else {
6050                         cache->last_byte_to_unpin = caching_ctl->progress;
6051                 }
6052         }
6053
6054         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6055                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6056         else
6057                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6058
6059         up_write(&fs_info->commit_root_sem);
6060
6061         update_global_block_rsv(fs_info);
6062 }
6063
6064 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6065                               const bool return_free_space)
6066 {
6067         struct btrfs_fs_info *fs_info = root->fs_info;
6068         struct btrfs_block_group_cache *cache = NULL;
6069         struct btrfs_space_info *space_info;
6070         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6071         u64 len;
6072         bool readonly;
6073
6074         while (start <= end) {
6075                 readonly = false;
6076                 if (!cache ||
6077                     start >= cache->key.objectid + cache->key.offset) {
6078                         if (cache)
6079                                 btrfs_put_block_group(cache);
6080                         cache = btrfs_lookup_block_group(fs_info, start);
6081                         BUG_ON(!cache); /* Logic error */
6082                 }
6083
6084                 len = cache->key.objectid + cache->key.offset - start;
6085                 len = min(len, end + 1 - start);
6086
6087                 if (start < cache->last_byte_to_unpin) {
6088                         len = min(len, cache->last_byte_to_unpin - start);
6089                         if (return_free_space)
6090                                 btrfs_add_free_space(cache, start, len);
6091                 }
6092
6093                 start += len;
6094                 space_info = cache->space_info;
6095
6096                 spin_lock(&space_info->lock);
6097                 spin_lock(&cache->lock);
6098                 cache->pinned -= len;
6099                 space_info->bytes_pinned -= len;
6100                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6101                 if (cache->ro) {
6102                         space_info->bytes_readonly += len;
6103                         readonly = true;
6104                 }
6105                 spin_unlock(&cache->lock);
6106                 if (!readonly && global_rsv->space_info == space_info) {
6107                         spin_lock(&global_rsv->lock);
6108                         if (!global_rsv->full) {
6109                                 len = min(len, global_rsv->size -
6110                                           global_rsv->reserved);
6111                                 global_rsv->reserved += len;
6112                                 space_info->bytes_may_use += len;
6113                                 if (global_rsv->reserved >= global_rsv->size)
6114                                         global_rsv->full = 1;
6115                         }
6116                         spin_unlock(&global_rsv->lock);
6117                 }
6118                 spin_unlock(&space_info->lock);
6119         }
6120
6121         if (cache)
6122                 btrfs_put_block_group(cache);
6123         return 0;
6124 }
6125
6126 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6127                                struct btrfs_root *root)
6128 {
6129         struct btrfs_fs_info *fs_info = root->fs_info;
6130         struct btrfs_block_group_cache *block_group, *tmp;
6131         struct list_head *deleted_bgs;
6132         struct extent_io_tree *unpin;
6133         u64 start;
6134         u64 end;
6135         int ret;
6136
6137         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6138                 unpin = &fs_info->freed_extents[1];
6139         else
6140                 unpin = &fs_info->freed_extents[0];
6141
6142         while (!trans->aborted) {
6143                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6144                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6145                                             EXTENT_DIRTY, NULL);
6146                 if (ret) {
6147                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6148                         break;
6149                 }
6150
6151                 if (btrfs_test_opt(root, DISCARD))
6152                         ret = btrfs_discard_extent(root, start,
6153                                                    end + 1 - start, NULL);
6154
6155                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
6156                 unpin_extent_range(root, start, end, true);
6157                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6158                 cond_resched();
6159         }
6160
6161         /*
6162          * Transaction is finished.  We don't need the lock anymore.  We
6163          * do need to clean up the block groups in case of a transaction
6164          * abort.
6165          */
6166         deleted_bgs = &trans->transaction->deleted_bgs;
6167         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6168                 u64 trimmed = 0;
6169
6170                 ret = -EROFS;
6171                 if (!trans->aborted)
6172                         ret = btrfs_discard_extent(root,
6173                                                    block_group->key.objectid,
6174                                                    block_group->key.offset,
6175                                                    &trimmed);
6176
6177                 list_del_init(&block_group->bg_list);
6178                 btrfs_put_block_group_trimming(block_group);
6179                 btrfs_put_block_group(block_group);
6180
6181                 if (ret) {
6182                         const char *errstr = btrfs_decode_error(ret);
6183                         btrfs_warn(fs_info,
6184                                    "Discard failed while removing blockgroup: errno=%d %s\n",
6185                                    ret, errstr);
6186                 }
6187         }
6188
6189         return 0;
6190 }
6191
6192 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6193                              u64 owner, u64 root_objectid)
6194 {
6195         struct btrfs_space_info *space_info;
6196         u64 flags;
6197
6198         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6199                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6200                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6201                 else
6202                         flags = BTRFS_BLOCK_GROUP_METADATA;
6203         } else {
6204                 flags = BTRFS_BLOCK_GROUP_DATA;
6205         }
6206
6207         space_info = __find_space_info(fs_info, flags);
6208         BUG_ON(!space_info); /* Logic bug */
6209         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6210 }
6211
6212
6213 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6214                                 struct btrfs_root *root,
6215                                 struct btrfs_delayed_ref_node *node, u64 parent,
6216                                 u64 root_objectid, u64 owner_objectid,
6217                                 u64 owner_offset, int refs_to_drop,
6218                                 struct btrfs_delayed_extent_op *extent_op)
6219 {
6220         struct btrfs_key key;
6221         struct btrfs_path *path;
6222         struct btrfs_fs_info *info = root->fs_info;
6223         struct btrfs_root *extent_root = info->extent_root;
6224         struct extent_buffer *leaf;
6225         struct btrfs_extent_item *ei;
6226         struct btrfs_extent_inline_ref *iref;
6227         int ret;
6228         int is_data;
6229         int extent_slot = 0;
6230         int found_extent = 0;
6231         int num_to_del = 1;
6232         int no_quota = node->no_quota;
6233         u32 item_size;
6234         u64 refs;
6235         u64 bytenr = node->bytenr;
6236         u64 num_bytes = node->num_bytes;
6237         int last_ref = 0;
6238         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6239                                                  SKINNY_METADATA);
6240
6241         if (!info->quota_enabled || !is_fstree(root_objectid))
6242                 no_quota = 1;
6243
6244         path = btrfs_alloc_path();
6245         if (!path)
6246                 return -ENOMEM;
6247
6248         path->reada = 1;
6249         path->leave_spinning = 1;
6250
6251         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6252         BUG_ON(!is_data && refs_to_drop != 1);
6253
6254         if (is_data)
6255                 skinny_metadata = 0;
6256
6257         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6258                                     bytenr, num_bytes, parent,
6259                                     root_objectid, owner_objectid,
6260                                     owner_offset);
6261         if (ret == 0) {
6262                 extent_slot = path->slots[0];
6263                 while (extent_slot >= 0) {
6264                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6265                                               extent_slot);
6266                         if (key.objectid != bytenr)
6267                                 break;
6268                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6269                             key.offset == num_bytes) {
6270                                 found_extent = 1;
6271                                 break;
6272                         }
6273                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6274                             key.offset == owner_objectid) {
6275                                 found_extent = 1;
6276                                 break;
6277                         }
6278                         if (path->slots[0] - extent_slot > 5)
6279                                 break;
6280                         extent_slot--;
6281                 }
6282 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6283                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6284                 if (found_extent && item_size < sizeof(*ei))
6285                         found_extent = 0;
6286 #endif
6287                 if (!found_extent) {
6288                         BUG_ON(iref);
6289                         ret = remove_extent_backref(trans, extent_root, path,
6290                                                     NULL, refs_to_drop,
6291                                                     is_data, &last_ref);
6292                         if (ret) {
6293                                 btrfs_abort_transaction(trans, extent_root, ret);
6294                                 goto out;
6295                         }
6296                         btrfs_release_path(path);
6297                         path->leave_spinning = 1;
6298
6299                         key.objectid = bytenr;
6300                         key.type = BTRFS_EXTENT_ITEM_KEY;
6301                         key.offset = num_bytes;
6302
6303                         if (!is_data && skinny_metadata) {
6304                                 key.type = BTRFS_METADATA_ITEM_KEY;
6305                                 key.offset = owner_objectid;
6306                         }
6307
6308                         ret = btrfs_search_slot(trans, extent_root,
6309                                                 &key, path, -1, 1);
6310                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6311                                 /*
6312                                  * Couldn't find our skinny metadata item,
6313                                  * see if we have ye olde extent item.
6314                                  */
6315                                 path->slots[0]--;
6316                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6317                                                       path->slots[0]);
6318                                 if (key.objectid == bytenr &&
6319                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6320                                     key.offset == num_bytes)
6321                                         ret = 0;
6322                         }
6323
6324                         if (ret > 0 && skinny_metadata) {
6325                                 skinny_metadata = false;
6326                                 key.objectid = bytenr;
6327                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6328                                 key.offset = num_bytes;
6329                                 btrfs_release_path(path);
6330                                 ret = btrfs_search_slot(trans, extent_root,
6331                                                         &key, path, -1, 1);
6332                         }
6333
6334                         if (ret) {
6335                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6336                                         ret, bytenr);
6337                                 if (ret > 0)
6338                                         btrfs_print_leaf(extent_root,
6339                                                          path->nodes[0]);
6340                         }
6341                         if (ret < 0) {
6342                                 btrfs_abort_transaction(trans, extent_root, ret);
6343                                 goto out;
6344                         }
6345                         extent_slot = path->slots[0];
6346                 }
6347         } else if (WARN_ON(ret == -ENOENT)) {
6348                 btrfs_print_leaf(extent_root, path->nodes[0]);
6349                 btrfs_err(info,
6350                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6351                         bytenr, parent, root_objectid, owner_objectid,
6352                         owner_offset);
6353                 btrfs_abort_transaction(trans, extent_root, ret);
6354                 goto out;
6355         } else {
6356                 btrfs_abort_transaction(trans, extent_root, ret);
6357                 goto out;
6358         }
6359
6360         leaf = path->nodes[0];
6361         item_size = btrfs_item_size_nr(leaf, extent_slot);
6362 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6363         if (item_size < sizeof(*ei)) {
6364                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6365                 ret = convert_extent_item_v0(trans, extent_root, path,
6366                                              owner_objectid, 0);
6367                 if (ret < 0) {
6368                         btrfs_abort_transaction(trans, extent_root, ret);
6369                         goto out;
6370                 }
6371
6372                 btrfs_release_path(path);
6373                 path->leave_spinning = 1;
6374
6375                 key.objectid = bytenr;
6376                 key.type = BTRFS_EXTENT_ITEM_KEY;
6377                 key.offset = num_bytes;
6378
6379                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6380                                         -1, 1);
6381                 if (ret) {
6382                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6383                                 ret, bytenr);
6384                         btrfs_print_leaf(extent_root, path->nodes[0]);
6385                 }
6386                 if (ret < 0) {
6387                         btrfs_abort_transaction(trans, extent_root, ret);
6388                         goto out;
6389                 }
6390
6391                 extent_slot = path->slots[0];
6392                 leaf = path->nodes[0];
6393                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6394         }
6395 #endif
6396         BUG_ON(item_size < sizeof(*ei));
6397         ei = btrfs_item_ptr(leaf, extent_slot,
6398                             struct btrfs_extent_item);
6399         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6400             key.type == BTRFS_EXTENT_ITEM_KEY) {
6401                 struct btrfs_tree_block_info *bi;
6402                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6403                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6404                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6405         }
6406
6407         refs = btrfs_extent_refs(leaf, ei);
6408         if (refs < refs_to_drop) {
6409                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6410                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6411                 ret = -EINVAL;
6412                 btrfs_abort_transaction(trans, extent_root, ret);
6413                 goto out;
6414         }
6415         refs -= refs_to_drop;
6416
6417         if (refs > 0) {
6418                 if (extent_op)
6419                         __run_delayed_extent_op(extent_op, leaf, ei);
6420                 /*
6421                  * In the case of inline back ref, reference count will
6422                  * be updated by remove_extent_backref
6423                  */
6424                 if (iref) {
6425                         BUG_ON(!found_extent);
6426                 } else {
6427                         btrfs_set_extent_refs(leaf, ei, refs);
6428                         btrfs_mark_buffer_dirty(leaf);
6429                 }
6430                 if (found_extent) {
6431                         ret = remove_extent_backref(trans, extent_root, path,
6432                                                     iref, refs_to_drop,
6433                                                     is_data, &last_ref);
6434                         if (ret) {
6435                                 btrfs_abort_transaction(trans, extent_root, ret);
6436                                 goto out;
6437                         }
6438                 }
6439                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6440                                  root_objectid);
6441         } else {
6442                 if (found_extent) {
6443                         BUG_ON(is_data && refs_to_drop !=
6444                                extent_data_ref_count(path, iref));
6445                         if (iref) {
6446                                 BUG_ON(path->slots[0] != extent_slot);
6447                         } else {
6448                                 BUG_ON(path->slots[0] != extent_slot + 1);
6449                                 path->slots[0] = extent_slot;
6450                                 num_to_del = 2;
6451                         }
6452                 }
6453
6454                 last_ref = 1;
6455                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6456                                       num_to_del);
6457                 if (ret) {
6458                         btrfs_abort_transaction(trans, extent_root, ret);
6459                         goto out;
6460                 }
6461                 btrfs_release_path(path);
6462
6463                 if (is_data) {
6464                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6465                         if (ret) {
6466                                 btrfs_abort_transaction(trans, extent_root, ret);
6467                                 goto out;
6468                         }
6469                 }
6470
6471                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6472                 if (ret) {
6473                         btrfs_abort_transaction(trans, extent_root, ret);
6474                         goto out;
6475                 }
6476         }
6477         btrfs_release_path(path);
6478
6479 out:
6480         btrfs_free_path(path);
6481         return ret;
6482 }
6483
6484 /*
6485  * when we free an block, it is possible (and likely) that we free the last
6486  * delayed ref for that extent as well.  This searches the delayed ref tree for
6487  * a given extent, and if there are no other delayed refs to be processed, it
6488  * removes it from the tree.
6489  */
6490 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6491                                       struct btrfs_root *root, u64 bytenr)
6492 {
6493         struct btrfs_delayed_ref_head *head;
6494         struct btrfs_delayed_ref_root *delayed_refs;
6495         int ret = 0;
6496
6497         delayed_refs = &trans->transaction->delayed_refs;
6498         spin_lock(&delayed_refs->lock);
6499         head = btrfs_find_delayed_ref_head(trans, bytenr);
6500         if (!head)
6501                 goto out_delayed_unlock;
6502
6503         spin_lock(&head->lock);
6504         if (!list_empty(&head->ref_list))
6505                 goto out;
6506
6507         if (head->extent_op) {
6508                 if (!head->must_insert_reserved)
6509                         goto out;
6510                 btrfs_free_delayed_extent_op(head->extent_op);
6511                 head->extent_op = NULL;
6512         }
6513
6514         /*
6515          * waiting for the lock here would deadlock.  If someone else has it
6516          * locked they are already in the process of dropping it anyway
6517          */
6518         if (!mutex_trylock(&head->mutex))
6519                 goto out;
6520
6521         /*
6522          * at this point we have a head with no other entries.  Go
6523          * ahead and process it.
6524          */
6525         head->node.in_tree = 0;
6526         rb_erase(&head->href_node, &delayed_refs->href_root);
6527
6528         atomic_dec(&delayed_refs->num_entries);
6529
6530         /*
6531          * we don't take a ref on the node because we're removing it from the
6532          * tree, so we just steal the ref the tree was holding.
6533          */
6534         delayed_refs->num_heads--;
6535         if (head->processing == 0)
6536                 delayed_refs->num_heads_ready--;
6537         head->processing = 0;
6538         spin_unlock(&head->lock);
6539         spin_unlock(&delayed_refs->lock);
6540
6541         BUG_ON(head->extent_op);
6542         if (head->must_insert_reserved)
6543                 ret = 1;
6544
6545         mutex_unlock(&head->mutex);
6546         btrfs_put_delayed_ref(&head->node);
6547         return ret;
6548 out:
6549         spin_unlock(&head->lock);
6550
6551 out_delayed_unlock:
6552         spin_unlock(&delayed_refs->lock);
6553         return 0;
6554 }
6555
6556 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6557                            struct btrfs_root *root,
6558                            struct extent_buffer *buf,
6559                            u64 parent, int last_ref)
6560 {
6561         int pin = 1;
6562         int ret;
6563
6564         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6565                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6566                                         buf->start, buf->len,
6567                                         parent, root->root_key.objectid,
6568                                         btrfs_header_level(buf),
6569                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6570                 BUG_ON(ret); /* -ENOMEM */
6571         }
6572
6573         if (!last_ref)
6574                 return;
6575
6576         if (btrfs_header_generation(buf) == trans->transid) {
6577                 struct btrfs_block_group_cache *cache;
6578
6579                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6580                         ret = check_ref_cleanup(trans, root, buf->start);
6581                         if (!ret)
6582                                 goto out;
6583                 }
6584
6585                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6586
6587                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6588                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6589                         btrfs_put_block_group(cache);
6590                         goto out;
6591                 }
6592
6593                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6594
6595                 btrfs_add_free_space(cache, buf->start, buf->len);
6596                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6597                 btrfs_put_block_group(cache);
6598                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6599                 pin = 0;
6600         }
6601 out:
6602         if (pin)
6603                 add_pinned_bytes(root->fs_info, buf->len,
6604                                  btrfs_header_level(buf),
6605                                  root->root_key.objectid);
6606
6607         /*
6608          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6609          * anymore.
6610          */
6611         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6612 }
6613
6614 /* Can return -ENOMEM */
6615 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6616                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6617                       u64 owner, u64 offset, int no_quota)
6618 {
6619         int ret;
6620         struct btrfs_fs_info *fs_info = root->fs_info;
6621
6622         if (btrfs_test_is_dummy_root(root))
6623                 return 0;
6624
6625         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6626
6627         /*
6628          * tree log blocks never actually go into the extent allocation
6629          * tree, just update pinning info and exit early.
6630          */
6631         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6632                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6633                 /* unlocks the pinned mutex */
6634                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6635                 ret = 0;
6636         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6637                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6638                                         num_bytes,
6639                                         parent, root_objectid, (int)owner,
6640                                         BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6641         } else {
6642                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6643                                                 num_bytes,
6644                                                 parent, root_objectid, owner,
6645                                                 offset, BTRFS_DROP_DELAYED_REF,
6646                                                 NULL, no_quota);
6647         }
6648         return ret;
6649 }
6650
6651 /*
6652  * when we wait for progress in the block group caching, its because
6653  * our allocation attempt failed at least once.  So, we must sleep
6654  * and let some progress happen before we try again.
6655  *
6656  * This function will sleep at least once waiting for new free space to
6657  * show up, and then it will check the block group free space numbers
6658  * for our min num_bytes.  Another option is to have it go ahead
6659  * and look in the rbtree for a free extent of a given size, but this
6660  * is a good start.
6661  *
6662  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6663  * any of the information in this block group.
6664  */
6665 static noinline void
6666 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6667                                 u64 num_bytes)
6668 {
6669         struct btrfs_caching_control *caching_ctl;
6670
6671         caching_ctl = get_caching_control(cache);
6672         if (!caching_ctl)
6673                 return;
6674
6675         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6676                    (cache->free_space_ctl->free_space >= num_bytes));
6677
6678         put_caching_control(caching_ctl);
6679 }
6680
6681 static noinline int
6682 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6683 {
6684         struct btrfs_caching_control *caching_ctl;
6685         int ret = 0;
6686
6687         caching_ctl = get_caching_control(cache);
6688         if (!caching_ctl)
6689                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6690
6691         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6692         if (cache->cached == BTRFS_CACHE_ERROR)
6693                 ret = -EIO;
6694         put_caching_control(caching_ctl);
6695         return ret;
6696 }
6697
6698 int __get_raid_index(u64 flags)
6699 {
6700         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6701                 return BTRFS_RAID_RAID10;
6702         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6703                 return BTRFS_RAID_RAID1;
6704         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6705                 return BTRFS_RAID_DUP;
6706         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6707                 return BTRFS_RAID_RAID0;
6708         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6709                 return BTRFS_RAID_RAID5;
6710         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6711                 return BTRFS_RAID_RAID6;
6712
6713         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6714 }
6715
6716 int get_block_group_index(struct btrfs_block_group_cache *cache)
6717 {
6718         return __get_raid_index(cache->flags);
6719 }
6720
6721 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6722         [BTRFS_RAID_RAID10]     = "raid10",
6723         [BTRFS_RAID_RAID1]      = "raid1",
6724         [BTRFS_RAID_DUP]        = "dup",
6725         [BTRFS_RAID_RAID0]      = "raid0",
6726         [BTRFS_RAID_SINGLE]     = "single",
6727         [BTRFS_RAID_RAID5]      = "raid5",
6728         [BTRFS_RAID_RAID6]      = "raid6",
6729 };
6730
6731 static const char *get_raid_name(enum btrfs_raid_types type)
6732 {
6733         if (type >= BTRFS_NR_RAID_TYPES)
6734                 return NULL;
6735
6736         return btrfs_raid_type_names[type];
6737 }
6738
6739 enum btrfs_loop_type {
6740         LOOP_CACHING_NOWAIT = 0,
6741         LOOP_CACHING_WAIT = 1,
6742         LOOP_ALLOC_CHUNK = 2,
6743         LOOP_NO_EMPTY_SIZE = 3,
6744 };
6745
6746 static inline void
6747 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6748                        int delalloc)
6749 {
6750         if (delalloc)
6751                 down_read(&cache->data_rwsem);
6752 }
6753
6754 static inline void
6755 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6756                        int delalloc)
6757 {
6758         btrfs_get_block_group(cache);
6759         if (delalloc)
6760                 down_read(&cache->data_rwsem);
6761 }
6762
6763 static struct btrfs_block_group_cache *
6764 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6765                    struct btrfs_free_cluster *cluster,
6766                    int delalloc)
6767 {
6768         struct btrfs_block_group_cache *used_bg;
6769         bool locked = false;
6770 again:
6771         spin_lock(&cluster->refill_lock);
6772         if (locked) {
6773                 if (used_bg == cluster->block_group)
6774                         return used_bg;
6775
6776                 up_read(&used_bg->data_rwsem);
6777                 btrfs_put_block_group(used_bg);
6778         }
6779
6780         used_bg = cluster->block_group;
6781         if (!used_bg)
6782                 return NULL;
6783
6784         if (used_bg == block_group)
6785                 return used_bg;
6786
6787         btrfs_get_block_group(used_bg);
6788
6789         if (!delalloc)
6790                 return used_bg;
6791
6792         if (down_read_trylock(&used_bg->data_rwsem))
6793                 return used_bg;
6794
6795         spin_unlock(&cluster->refill_lock);
6796         down_read(&used_bg->data_rwsem);
6797         locked = true;
6798         goto again;
6799 }
6800
6801 static inline void
6802 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6803                          int delalloc)
6804 {
6805         if (delalloc)
6806                 up_read(&cache->data_rwsem);
6807         btrfs_put_block_group(cache);
6808 }
6809
6810 /*
6811  * walks the btree of allocated extents and find a hole of a given size.
6812  * The key ins is changed to record the hole:
6813  * ins->objectid == start position
6814  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6815  * ins->offset == the size of the hole.
6816  * Any available blocks before search_start are skipped.
6817  *
6818  * If there is no suitable free space, we will record the max size of
6819  * the free space extent currently.
6820  */
6821 static noinline int find_free_extent(struct btrfs_root *orig_root,
6822                                      u64 num_bytes, u64 empty_size,
6823                                      u64 hint_byte, struct btrfs_key *ins,
6824                                      u64 flags, int delalloc)
6825 {
6826         int ret = 0;
6827         struct btrfs_root *root = orig_root->fs_info->extent_root;
6828         struct btrfs_free_cluster *last_ptr = NULL;
6829         struct btrfs_block_group_cache *block_group = NULL;
6830         u64 search_start = 0;
6831         u64 max_extent_size = 0;
6832         int empty_cluster = 2 * 1024 * 1024;
6833         struct btrfs_space_info *space_info;
6834         int loop = 0;
6835         int index = __get_raid_index(flags);
6836         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6837                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6838         bool failed_cluster_refill = false;
6839         bool failed_alloc = false;
6840         bool use_cluster = true;
6841         bool have_caching_bg = false;
6842
6843         WARN_ON(num_bytes < root->sectorsize);
6844         ins->type = BTRFS_EXTENT_ITEM_KEY;
6845         ins->objectid = 0;
6846         ins->offset = 0;
6847
6848         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6849
6850         space_info = __find_space_info(root->fs_info, flags);
6851         if (!space_info) {
6852                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6853                 return -ENOSPC;
6854         }
6855
6856         /*
6857          * If the space info is for both data and metadata it means we have a
6858          * small filesystem and we can't use the clustering stuff.
6859          */
6860         if (btrfs_mixed_space_info(space_info))
6861                 use_cluster = false;
6862
6863         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6864                 last_ptr = &root->fs_info->meta_alloc_cluster;
6865                 if (!btrfs_test_opt(root, SSD))
6866                         empty_cluster = 64 * 1024;
6867         }
6868
6869         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6870             btrfs_test_opt(root, SSD)) {
6871                 last_ptr = &root->fs_info->data_alloc_cluster;
6872         }
6873
6874         if (last_ptr) {
6875                 spin_lock(&last_ptr->lock);
6876                 if (last_ptr->block_group)
6877                         hint_byte = last_ptr->window_start;
6878                 spin_unlock(&last_ptr->lock);
6879         }
6880
6881         search_start = max(search_start, first_logical_byte(root, 0));
6882         search_start = max(search_start, hint_byte);
6883
6884         if (!last_ptr)
6885                 empty_cluster = 0;
6886
6887         if (search_start == hint_byte) {
6888                 block_group = btrfs_lookup_block_group(root->fs_info,
6889                                                        search_start);
6890                 /*
6891                  * we don't want to use the block group if it doesn't match our
6892                  * allocation bits, or if its not cached.
6893                  *
6894                  * However if we are re-searching with an ideal block group
6895                  * picked out then we don't care that the block group is cached.
6896                  */
6897                 if (block_group && block_group_bits(block_group, flags) &&
6898                     block_group->cached != BTRFS_CACHE_NO) {
6899                         down_read(&space_info->groups_sem);
6900                         if (list_empty(&block_group->list) ||
6901                             block_group->ro) {
6902                                 /*
6903                                  * someone is removing this block group,
6904                                  * we can't jump into the have_block_group
6905                                  * target because our list pointers are not
6906                                  * valid
6907                                  */
6908                                 btrfs_put_block_group(block_group);
6909                                 up_read(&space_info->groups_sem);
6910                         } else {
6911                                 index = get_block_group_index(block_group);
6912                                 btrfs_lock_block_group(block_group, delalloc);
6913                                 goto have_block_group;
6914                         }
6915                 } else if (block_group) {
6916                         btrfs_put_block_group(block_group);
6917                 }
6918         }
6919 search:
6920         have_caching_bg = false;
6921         down_read(&space_info->groups_sem);
6922         list_for_each_entry(block_group, &space_info->block_groups[index],
6923                             list) {
6924                 u64 offset;
6925                 int cached;
6926
6927                 btrfs_grab_block_group(block_group, delalloc);
6928                 search_start = block_group->key.objectid;
6929
6930                 /*
6931                  * this can happen if we end up cycling through all the
6932                  * raid types, but we want to make sure we only allocate
6933                  * for the proper type.
6934                  */
6935                 if (!block_group_bits(block_group, flags)) {
6936                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6937                                 BTRFS_BLOCK_GROUP_RAID1 |
6938                                 BTRFS_BLOCK_GROUP_RAID5 |
6939                                 BTRFS_BLOCK_GROUP_RAID6 |
6940                                 BTRFS_BLOCK_GROUP_RAID10;
6941
6942                         /*
6943                          * if they asked for extra copies and this block group
6944                          * doesn't provide them, bail.  This does allow us to
6945                          * fill raid0 from raid1.
6946                          */
6947                         if ((flags & extra) && !(block_group->flags & extra))
6948                                 goto loop;
6949                 }
6950
6951 have_block_group:
6952                 cached = block_group_cache_done(block_group);
6953                 if (unlikely(!cached)) {
6954                         ret = cache_block_group(block_group, 0);
6955                         BUG_ON(ret < 0);
6956                         ret = 0;
6957                 }
6958
6959                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6960                         goto loop;
6961                 if (unlikely(block_group->ro))
6962                         goto loop;
6963
6964                 /*
6965                  * Ok we want to try and use the cluster allocator, so
6966                  * lets look there
6967                  */
6968                 if (last_ptr) {
6969                         struct btrfs_block_group_cache *used_block_group;
6970                         unsigned long aligned_cluster;
6971                         /*
6972                          * the refill lock keeps out other
6973                          * people trying to start a new cluster
6974                          */
6975                         used_block_group = btrfs_lock_cluster(block_group,
6976                                                               last_ptr,
6977                                                               delalloc);
6978                         if (!used_block_group)
6979                                 goto refill_cluster;
6980
6981                         if (used_block_group != block_group &&
6982                             (used_block_group->ro ||
6983                              !block_group_bits(used_block_group, flags)))
6984                                 goto release_cluster;
6985
6986                         offset = btrfs_alloc_from_cluster(used_block_group,
6987                                                 last_ptr,
6988                                                 num_bytes,
6989                                                 used_block_group->key.objectid,
6990                                                 &max_extent_size);
6991                         if (offset) {
6992                                 /* we have a block, we're done */
6993                                 spin_unlock(&last_ptr->refill_lock);
6994                                 trace_btrfs_reserve_extent_cluster(root,
6995                                                 used_block_group,
6996                                                 search_start, num_bytes);
6997                                 if (used_block_group != block_group) {
6998                                         btrfs_release_block_group(block_group,
6999                                                                   delalloc);
7000                                         block_group = used_block_group;
7001                                 }
7002                                 goto checks;
7003                         }
7004
7005                         WARN_ON(last_ptr->block_group != used_block_group);
7006 release_cluster:
7007                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7008                          * set up a new clusters, so lets just skip it
7009                          * and let the allocator find whatever block
7010                          * it can find.  If we reach this point, we
7011                          * will have tried the cluster allocator
7012                          * plenty of times and not have found
7013                          * anything, so we are likely way too
7014                          * fragmented for the clustering stuff to find
7015                          * anything.
7016                          *
7017                          * However, if the cluster is taken from the
7018                          * current block group, release the cluster
7019                          * first, so that we stand a better chance of
7020                          * succeeding in the unclustered
7021                          * allocation.  */
7022                         if (loop >= LOOP_NO_EMPTY_SIZE &&
7023                             used_block_group != block_group) {
7024                                 spin_unlock(&last_ptr->refill_lock);
7025                                 btrfs_release_block_group(used_block_group,
7026                                                           delalloc);
7027                                 goto unclustered_alloc;
7028                         }
7029
7030                         /*
7031                          * this cluster didn't work out, free it and
7032                          * start over
7033                          */
7034                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7035
7036                         if (used_block_group != block_group)
7037                                 btrfs_release_block_group(used_block_group,
7038                                                           delalloc);
7039 refill_cluster:
7040                         if (loop >= LOOP_NO_EMPTY_SIZE) {
7041                                 spin_unlock(&last_ptr->refill_lock);
7042                                 goto unclustered_alloc;
7043                         }
7044
7045                         aligned_cluster = max_t(unsigned long,
7046                                                 empty_cluster + empty_size,
7047                                               block_group->full_stripe_len);
7048
7049                         /* allocate a cluster in this block group */
7050                         ret = btrfs_find_space_cluster(root, block_group,
7051                                                        last_ptr, search_start,
7052                                                        num_bytes,
7053                                                        aligned_cluster);
7054                         if (ret == 0) {
7055                                 /*
7056                                  * now pull our allocation out of this
7057                                  * cluster
7058                                  */
7059                                 offset = btrfs_alloc_from_cluster(block_group,
7060                                                         last_ptr,
7061                                                         num_bytes,
7062                                                         search_start,
7063                                                         &max_extent_size);
7064                                 if (offset) {
7065                                         /* we found one, proceed */
7066                                         spin_unlock(&last_ptr->refill_lock);
7067                                         trace_btrfs_reserve_extent_cluster(root,
7068                                                 block_group, search_start,
7069                                                 num_bytes);
7070                                         goto checks;
7071                                 }
7072                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
7073                                    && !failed_cluster_refill) {
7074                                 spin_unlock(&last_ptr->refill_lock);
7075
7076                                 failed_cluster_refill = true;
7077                                 wait_block_group_cache_progress(block_group,
7078                                        num_bytes + empty_cluster + empty_size);
7079                                 goto have_block_group;
7080                         }
7081
7082                         /*
7083                          * at this point we either didn't find a cluster
7084                          * or we weren't able to allocate a block from our
7085                          * cluster.  Free the cluster we've been trying
7086                          * to use, and go to the next block group
7087                          */
7088                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7089                         spin_unlock(&last_ptr->refill_lock);
7090                         goto loop;
7091                 }
7092
7093 unclustered_alloc:
7094                 spin_lock(&block_group->free_space_ctl->tree_lock);
7095                 if (cached &&
7096                     block_group->free_space_ctl->free_space <
7097                     num_bytes + empty_cluster + empty_size) {
7098                         if (block_group->free_space_ctl->free_space >
7099                             max_extent_size)
7100                                 max_extent_size =
7101                                         block_group->free_space_ctl->free_space;
7102                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7103                         goto loop;
7104                 }
7105                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7106
7107                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7108                                                     num_bytes, empty_size,
7109                                                     &max_extent_size);
7110                 /*
7111                  * If we didn't find a chunk, and we haven't failed on this
7112                  * block group before, and this block group is in the middle of
7113                  * caching and we are ok with waiting, then go ahead and wait
7114                  * for progress to be made, and set failed_alloc to true.
7115                  *
7116                  * If failed_alloc is true then we've already waited on this
7117                  * block group once and should move on to the next block group.
7118                  */
7119                 if (!offset && !failed_alloc && !cached &&
7120                     loop > LOOP_CACHING_NOWAIT) {
7121                         wait_block_group_cache_progress(block_group,
7122                                                 num_bytes + empty_size);
7123                         failed_alloc = true;
7124                         goto have_block_group;
7125                 } else if (!offset) {
7126                         if (!cached)
7127                                 have_caching_bg = true;
7128                         goto loop;
7129                 }
7130 checks:
7131                 search_start = ALIGN(offset, root->stripesize);
7132
7133                 /* move on to the next group */
7134                 if (search_start + num_bytes >
7135                     block_group->key.objectid + block_group->key.offset) {
7136                         btrfs_add_free_space(block_group, offset, num_bytes);
7137                         goto loop;
7138                 }
7139
7140                 if (offset < search_start)
7141                         btrfs_add_free_space(block_group, offset,
7142                                              search_start - offset);
7143                 BUG_ON(offset > search_start);
7144
7145                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7146                                                   alloc_type, delalloc);
7147                 if (ret == -EAGAIN) {
7148                         btrfs_add_free_space(block_group, offset, num_bytes);
7149                         goto loop;
7150                 }
7151
7152                 /* we are all good, lets return */
7153                 ins->objectid = search_start;
7154                 ins->offset = num_bytes;
7155
7156                 trace_btrfs_reserve_extent(orig_root, block_group,
7157                                            search_start, num_bytes);
7158                 btrfs_release_block_group(block_group, delalloc);
7159                 break;
7160 loop:
7161                 failed_cluster_refill = false;
7162                 failed_alloc = false;
7163                 BUG_ON(index != get_block_group_index(block_group));
7164                 btrfs_release_block_group(block_group, delalloc);
7165         }
7166         up_read(&space_info->groups_sem);
7167
7168         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7169                 goto search;
7170
7171         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7172                 goto search;
7173
7174         /*
7175          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7176          *                      caching kthreads as we move along
7177          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7178          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7179          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7180          *                      again
7181          */
7182         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7183                 index = 0;
7184                 loop++;
7185                 if (loop == LOOP_ALLOC_CHUNK) {
7186                         struct btrfs_trans_handle *trans;
7187                         int exist = 0;
7188
7189                         trans = current->journal_info;
7190                         if (trans)
7191                                 exist = 1;
7192                         else
7193                                 trans = btrfs_join_transaction(root);
7194
7195                         if (IS_ERR(trans)) {
7196                                 ret = PTR_ERR(trans);
7197                                 goto out;
7198                         }
7199
7200                         ret = do_chunk_alloc(trans, root, flags,
7201                                              CHUNK_ALLOC_FORCE);
7202                         /*
7203                          * Do not bail out on ENOSPC since we
7204                          * can do more things.
7205                          */
7206                         if (ret < 0 && ret != -ENOSPC)
7207                                 btrfs_abort_transaction(trans,
7208                                                         root, ret);
7209                         else
7210                                 ret = 0;
7211                         if (!exist)
7212                                 btrfs_end_transaction(trans, root);
7213                         if (ret)
7214                                 goto out;
7215                 }
7216
7217                 if (loop == LOOP_NO_EMPTY_SIZE) {
7218                         empty_size = 0;
7219                         empty_cluster = 0;
7220                 }
7221
7222                 goto search;
7223         } else if (!ins->objectid) {
7224                 ret = -ENOSPC;
7225         } else if (ins->objectid) {
7226                 ret = 0;
7227         }
7228 out:
7229         if (ret == -ENOSPC)
7230                 ins->offset = max_extent_size;
7231         return ret;
7232 }
7233
7234 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7235                             int dump_block_groups)
7236 {
7237         struct btrfs_block_group_cache *cache;
7238         int index = 0;
7239
7240         spin_lock(&info->lock);
7241         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7242                info->flags,
7243                info->total_bytes - info->bytes_used - info->bytes_pinned -
7244                info->bytes_reserved - info->bytes_readonly,
7245                (info->full) ? "" : "not ");
7246         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7247                "reserved=%llu, may_use=%llu, readonly=%llu\n",
7248                info->total_bytes, info->bytes_used, info->bytes_pinned,
7249                info->bytes_reserved, info->bytes_may_use,
7250                info->bytes_readonly);
7251         spin_unlock(&info->lock);
7252
7253         if (!dump_block_groups)
7254                 return;
7255
7256         down_read(&info->groups_sem);
7257 again:
7258         list_for_each_entry(cache, &info->block_groups[index], list) {
7259                 spin_lock(&cache->lock);
7260                 printk(KERN_INFO "BTRFS: "
7261                            "block group %llu has %llu bytes, "
7262                            "%llu used %llu pinned %llu reserved %s\n",
7263                        cache->key.objectid, cache->key.offset,
7264                        btrfs_block_group_used(&cache->item), cache->pinned,
7265                        cache->reserved, cache->ro ? "[readonly]" : "");
7266                 btrfs_dump_free_space(cache, bytes);
7267                 spin_unlock(&cache->lock);
7268         }
7269         if (++index < BTRFS_NR_RAID_TYPES)
7270                 goto again;
7271         up_read(&info->groups_sem);
7272 }
7273
7274 int btrfs_reserve_extent(struct btrfs_root *root,
7275                          u64 num_bytes, u64 min_alloc_size,
7276                          u64 empty_size, u64 hint_byte,
7277                          struct btrfs_key *ins, int is_data, int delalloc)
7278 {
7279         bool final_tried = false;
7280         u64 flags;
7281         int ret;
7282
7283         flags = btrfs_get_alloc_profile(root, is_data);
7284 again:
7285         WARN_ON(num_bytes < root->sectorsize);
7286         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7287                                flags, delalloc);
7288
7289         if (ret == -ENOSPC) {
7290                 if (!final_tried && ins->offset) {
7291                         num_bytes = min(num_bytes >> 1, ins->offset);
7292                         num_bytes = round_down(num_bytes, root->sectorsize);
7293                         num_bytes = max(num_bytes, min_alloc_size);
7294                         if (num_bytes == min_alloc_size)
7295                                 final_tried = true;
7296                         goto again;
7297                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7298                         struct btrfs_space_info *sinfo;
7299
7300                         sinfo = __find_space_info(root->fs_info, flags);
7301                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7302                                 flags, num_bytes);
7303                         if (sinfo)
7304                                 dump_space_info(sinfo, num_bytes, 1);
7305                 }
7306         }
7307
7308         return ret;
7309 }
7310
7311 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7312                                         u64 start, u64 len,
7313                                         int pin, int delalloc)
7314 {
7315         struct btrfs_block_group_cache *cache;
7316         int ret = 0;
7317
7318         cache = btrfs_lookup_block_group(root->fs_info, start);
7319         if (!cache) {
7320                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7321                         start);
7322                 return -ENOSPC;
7323         }
7324
7325         if (pin)
7326                 pin_down_extent(root, cache, start, len, 1);
7327         else {
7328                 if (btrfs_test_opt(root, DISCARD))
7329                         ret = btrfs_discard_extent(root, start, len, NULL);
7330                 btrfs_add_free_space(cache, start, len);
7331                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7332         }
7333
7334         btrfs_put_block_group(cache);
7335
7336         trace_btrfs_reserved_extent_free(root, start, len);
7337
7338         return ret;
7339 }
7340
7341 int btrfs_free_reserved_extent(struct btrfs_root *root,
7342                                u64 start, u64 len, int delalloc)
7343 {
7344         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7345 }
7346
7347 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7348                                        u64 start, u64 len)
7349 {
7350         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7351 }
7352
7353 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7354                                       struct btrfs_root *root,
7355                                       u64 parent, u64 root_objectid,
7356                                       u64 flags, u64 owner, u64 offset,
7357                                       struct btrfs_key *ins, int ref_mod)
7358 {
7359         int ret;
7360         struct btrfs_fs_info *fs_info = root->fs_info;
7361         struct btrfs_extent_item *extent_item;
7362         struct btrfs_extent_inline_ref *iref;
7363         struct btrfs_path *path;
7364         struct extent_buffer *leaf;
7365         int type;
7366         u32 size;
7367
7368         if (parent > 0)
7369                 type = BTRFS_SHARED_DATA_REF_KEY;
7370         else
7371                 type = BTRFS_EXTENT_DATA_REF_KEY;
7372
7373         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7374
7375         path = btrfs_alloc_path();
7376         if (!path)
7377                 return -ENOMEM;
7378
7379         path->leave_spinning = 1;
7380         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7381                                       ins, size);
7382         if (ret) {
7383                 btrfs_free_path(path);
7384                 return ret;
7385         }
7386
7387         leaf = path->nodes[0];
7388         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7389                                      struct btrfs_extent_item);
7390         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7391         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7392         btrfs_set_extent_flags(leaf, extent_item,
7393                                flags | BTRFS_EXTENT_FLAG_DATA);
7394
7395         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7396         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7397         if (parent > 0) {
7398                 struct btrfs_shared_data_ref *ref;
7399                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7400                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7401                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7402         } else {
7403                 struct btrfs_extent_data_ref *ref;
7404                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7405                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7406                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7407                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7408                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7409         }
7410
7411         btrfs_mark_buffer_dirty(path->nodes[0]);
7412         btrfs_free_path(path);
7413
7414         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7415         if (ret) { /* -ENOENT, logic error */
7416                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7417                         ins->objectid, ins->offset);
7418                 BUG();
7419         }
7420         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7421         return ret;
7422 }
7423
7424 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7425                                      struct btrfs_root *root,
7426                                      u64 parent, u64 root_objectid,
7427                                      u64 flags, struct btrfs_disk_key *key,
7428                                      int level, struct btrfs_key *ins,
7429                                      int no_quota)
7430 {
7431         int ret;
7432         struct btrfs_fs_info *fs_info = root->fs_info;
7433         struct btrfs_extent_item *extent_item;
7434         struct btrfs_tree_block_info *block_info;
7435         struct btrfs_extent_inline_ref *iref;
7436         struct btrfs_path *path;
7437         struct extent_buffer *leaf;
7438         u32 size = sizeof(*extent_item) + sizeof(*iref);
7439         u64 num_bytes = ins->offset;
7440         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7441                                                  SKINNY_METADATA);
7442
7443         if (!skinny_metadata)
7444                 size += sizeof(*block_info);
7445
7446         path = btrfs_alloc_path();
7447         if (!path) {
7448                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7449                                                    root->nodesize);
7450                 return -ENOMEM;
7451         }
7452
7453         path->leave_spinning = 1;
7454         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7455                                       ins, size);
7456         if (ret) {
7457                 btrfs_free_path(path);
7458                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7459                                                    root->nodesize);
7460                 return ret;
7461         }
7462
7463         leaf = path->nodes[0];
7464         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7465                                      struct btrfs_extent_item);
7466         btrfs_set_extent_refs(leaf, extent_item, 1);
7467         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7468         btrfs_set_extent_flags(leaf, extent_item,
7469                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7470
7471         if (skinny_metadata) {
7472                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7473                 num_bytes = root->nodesize;
7474         } else {
7475                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7476                 btrfs_set_tree_block_key(leaf, block_info, key);
7477                 btrfs_set_tree_block_level(leaf, block_info, level);
7478                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7479         }
7480
7481         if (parent > 0) {
7482                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7483                 btrfs_set_extent_inline_ref_type(leaf, iref,
7484                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7485                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7486         } else {
7487                 btrfs_set_extent_inline_ref_type(leaf, iref,
7488                                                  BTRFS_TREE_BLOCK_REF_KEY);
7489                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7490         }
7491
7492         btrfs_mark_buffer_dirty(leaf);
7493         btrfs_free_path(path);
7494
7495         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7496                                  1);
7497         if (ret) { /* -ENOENT, logic error */
7498                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7499                         ins->objectid, ins->offset);
7500                 BUG();
7501         }
7502
7503         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7504         return ret;
7505 }
7506
7507 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7508                                      struct btrfs_root *root,
7509                                      u64 root_objectid, u64 owner,
7510                                      u64 offset, struct btrfs_key *ins)
7511 {
7512         int ret;
7513
7514         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7515
7516         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7517                                          ins->offset, 0,
7518                                          root_objectid, owner, offset,
7519                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7520         return ret;
7521 }
7522
7523 /*
7524  * this is used by the tree logging recovery code.  It records that
7525  * an extent has been allocated and makes sure to clear the free
7526  * space cache bits as well
7527  */
7528 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7529                                    struct btrfs_root *root,
7530                                    u64 root_objectid, u64 owner, u64 offset,
7531                                    struct btrfs_key *ins)
7532 {
7533         int ret;
7534         struct btrfs_block_group_cache *block_group;
7535
7536         /*
7537          * Mixed block groups will exclude before processing the log so we only
7538          * need to do the exlude dance if this fs isn't mixed.
7539          */
7540         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7541                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7542                 if (ret)
7543                         return ret;
7544         }
7545
7546         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7547         if (!block_group)
7548                 return -EINVAL;
7549
7550         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7551                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7552         BUG_ON(ret); /* logic error */
7553         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7554                                          0, owner, offset, ins, 1);
7555         btrfs_put_block_group(block_group);
7556         return ret;
7557 }
7558
7559 static struct extent_buffer *
7560 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7561                       u64 bytenr, int level)
7562 {
7563         struct extent_buffer *buf;
7564
7565         buf = btrfs_find_create_tree_block(root, bytenr);
7566         if (!buf)
7567                 return ERR_PTR(-ENOMEM);
7568         btrfs_set_header_generation(buf, trans->transid);
7569         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7570         btrfs_tree_lock(buf);
7571         clean_tree_block(trans, root->fs_info, buf);
7572         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7573
7574         btrfs_set_lock_blocking(buf);
7575         btrfs_set_buffer_uptodate(buf);
7576
7577         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7578                 buf->log_index = root->log_transid % 2;
7579                 /*
7580                  * we allow two log transactions at a time, use different
7581                  * EXENT bit to differentiate dirty pages.
7582                  */
7583                 if (buf->log_index == 0)
7584                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7585                                         buf->start + buf->len - 1, GFP_NOFS);
7586                 else
7587                         set_extent_new(&root->dirty_log_pages, buf->start,
7588                                         buf->start + buf->len - 1, GFP_NOFS);
7589         } else {
7590                 buf->log_index = -1;
7591                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7592                          buf->start + buf->len - 1, GFP_NOFS);
7593         }
7594         trans->blocks_used++;
7595         /* this returns a buffer locked for blocking */
7596         return buf;
7597 }
7598
7599 static struct btrfs_block_rsv *
7600 use_block_rsv(struct btrfs_trans_handle *trans,
7601               struct btrfs_root *root, u32 blocksize)
7602 {
7603         struct btrfs_block_rsv *block_rsv;
7604         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7605         int ret;
7606         bool global_updated = false;
7607
7608         block_rsv = get_block_rsv(trans, root);
7609
7610         if (unlikely(block_rsv->size == 0))
7611                 goto try_reserve;
7612 again:
7613         ret = block_rsv_use_bytes(block_rsv, blocksize);
7614         if (!ret)
7615                 return block_rsv;
7616
7617         if (block_rsv->failfast)
7618                 return ERR_PTR(ret);
7619
7620         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7621                 global_updated = true;
7622                 update_global_block_rsv(root->fs_info);
7623                 goto again;
7624         }
7625
7626         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7627                 static DEFINE_RATELIMIT_STATE(_rs,
7628                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7629                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7630                 if (__ratelimit(&_rs))
7631                         WARN(1, KERN_DEBUG
7632                                 "BTRFS: block rsv returned %d\n", ret);
7633         }
7634 try_reserve:
7635         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7636                                      BTRFS_RESERVE_NO_FLUSH);
7637         if (!ret)
7638                 return block_rsv;
7639         /*
7640          * If we couldn't reserve metadata bytes try and use some from
7641          * the global reserve if its space type is the same as the global
7642          * reservation.
7643          */
7644         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7645             block_rsv->space_info == global_rsv->space_info) {
7646                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7647                 if (!ret)
7648                         return global_rsv;
7649         }
7650         return ERR_PTR(ret);
7651 }
7652
7653 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7654                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7655 {
7656         block_rsv_add_bytes(block_rsv, blocksize, 0);
7657         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7658 }
7659
7660 /*
7661  * finds a free extent and does all the dirty work required for allocation
7662  * returns the tree buffer or an ERR_PTR on error.
7663  */
7664 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7665                                         struct btrfs_root *root,
7666                                         u64 parent, u64 root_objectid,
7667                                         struct btrfs_disk_key *key, int level,
7668                                         u64 hint, u64 empty_size)
7669 {
7670         struct btrfs_key ins;
7671         struct btrfs_block_rsv *block_rsv;
7672         struct extent_buffer *buf;
7673         struct btrfs_delayed_extent_op *extent_op;
7674         u64 flags = 0;
7675         int ret;
7676         u32 blocksize = root->nodesize;
7677         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7678                                                  SKINNY_METADATA);
7679
7680         if (btrfs_test_is_dummy_root(root)) {
7681                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7682                                             level);
7683                 if (!IS_ERR(buf))
7684                         root->alloc_bytenr += blocksize;
7685                 return buf;
7686         }
7687
7688         block_rsv = use_block_rsv(trans, root, blocksize);
7689         if (IS_ERR(block_rsv))
7690                 return ERR_CAST(block_rsv);
7691
7692         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7693                                    empty_size, hint, &ins, 0, 0);
7694         if (ret)
7695                 goto out_unuse;
7696
7697         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7698         if (IS_ERR(buf)) {
7699                 ret = PTR_ERR(buf);
7700                 goto out_free_reserved;
7701         }
7702
7703         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7704                 if (parent == 0)
7705                         parent = ins.objectid;
7706                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7707         } else
7708                 BUG_ON(parent > 0);
7709
7710         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7711                 extent_op = btrfs_alloc_delayed_extent_op();
7712                 if (!extent_op) {
7713                         ret = -ENOMEM;
7714                         goto out_free_buf;
7715                 }
7716                 if (key)
7717                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7718                 else
7719                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7720                 extent_op->flags_to_set = flags;
7721                 if (skinny_metadata)
7722                         extent_op->update_key = 0;
7723                 else
7724                         extent_op->update_key = 1;
7725                 extent_op->update_flags = 1;
7726                 extent_op->is_data = 0;
7727                 extent_op->level = level;
7728
7729                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7730                                                  ins.objectid, ins.offset,
7731                                                  parent, root_objectid, level,
7732                                                  BTRFS_ADD_DELAYED_EXTENT,
7733                                                  extent_op, 0);
7734                 if (ret)
7735                         goto out_free_delayed;
7736         }
7737         return buf;
7738
7739 out_free_delayed:
7740         btrfs_free_delayed_extent_op(extent_op);
7741 out_free_buf:
7742         free_extent_buffer(buf);
7743 out_free_reserved:
7744         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
7745 out_unuse:
7746         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7747         return ERR_PTR(ret);
7748 }
7749
7750 struct walk_control {
7751         u64 refs[BTRFS_MAX_LEVEL];
7752         u64 flags[BTRFS_MAX_LEVEL];
7753         struct btrfs_key update_progress;
7754         int stage;
7755         int level;
7756         int shared_level;
7757         int update_ref;
7758         int keep_locks;
7759         int reada_slot;
7760         int reada_count;
7761         int for_reloc;
7762 };
7763
7764 #define DROP_REFERENCE  1
7765 #define UPDATE_BACKREF  2
7766
7767 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7768                                      struct btrfs_root *root,
7769                                      struct walk_control *wc,
7770                                      struct btrfs_path *path)
7771 {
7772         u64 bytenr;
7773         u64 generation;
7774         u64 refs;
7775         u64 flags;
7776         u32 nritems;
7777         u32 blocksize;
7778         struct btrfs_key key;
7779         struct extent_buffer *eb;
7780         int ret;
7781         int slot;
7782         int nread = 0;
7783
7784         if (path->slots[wc->level] < wc->reada_slot) {
7785                 wc->reada_count = wc->reada_count * 2 / 3;
7786                 wc->reada_count = max(wc->reada_count, 2);
7787         } else {
7788                 wc->reada_count = wc->reada_count * 3 / 2;
7789                 wc->reada_count = min_t(int, wc->reada_count,
7790                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7791         }
7792
7793         eb = path->nodes[wc->level];
7794         nritems = btrfs_header_nritems(eb);
7795         blocksize = root->nodesize;
7796
7797         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7798                 if (nread >= wc->reada_count)
7799                         break;
7800
7801                 cond_resched();
7802                 bytenr = btrfs_node_blockptr(eb, slot);
7803                 generation = btrfs_node_ptr_generation(eb, slot);
7804
7805                 if (slot == path->slots[wc->level])
7806                         goto reada;
7807
7808                 if (wc->stage == UPDATE_BACKREF &&
7809                     generation <= root->root_key.offset)
7810                         continue;
7811
7812                 /* We don't lock the tree block, it's OK to be racy here */
7813                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7814                                                wc->level - 1, 1, &refs,
7815                                                &flags);
7816                 /* We don't care about errors in readahead. */
7817                 if (ret < 0)
7818                         continue;
7819                 BUG_ON(refs == 0);
7820
7821                 if (wc->stage == DROP_REFERENCE) {
7822                         if (refs == 1)
7823                                 goto reada;
7824
7825                         if (wc->level == 1 &&
7826                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7827                                 continue;
7828                         if (!wc->update_ref ||
7829                             generation <= root->root_key.offset)
7830                                 continue;
7831                         btrfs_node_key_to_cpu(eb, &key, slot);
7832                         ret = btrfs_comp_cpu_keys(&key,
7833                                                   &wc->update_progress);
7834                         if (ret < 0)
7835                                 continue;
7836                 } else {
7837                         if (wc->level == 1 &&
7838                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7839                                 continue;
7840                 }
7841 reada:
7842                 readahead_tree_block(root, bytenr);
7843                 nread++;
7844         }
7845         wc->reada_slot = slot;
7846 }
7847
7848 /*
7849  * TODO: Modify related function to add related node/leaf to dirty_extent_root,
7850  * for later qgroup accounting.
7851  *
7852  * Current, this function does nothing.
7853  */
7854 static int account_leaf_items(struct btrfs_trans_handle *trans,
7855                               struct btrfs_root *root,
7856                               struct extent_buffer *eb)
7857 {
7858         int nr = btrfs_header_nritems(eb);
7859         int i, extent_type;
7860         struct btrfs_key key;
7861         struct btrfs_file_extent_item *fi;
7862         u64 bytenr, num_bytes;
7863
7864         for (i = 0; i < nr; i++) {
7865                 btrfs_item_key_to_cpu(eb, &key, i);
7866
7867                 if (key.type != BTRFS_EXTENT_DATA_KEY)
7868                         continue;
7869
7870                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7871                 /* filter out non qgroup-accountable extents  */
7872                 extent_type = btrfs_file_extent_type(eb, fi);
7873
7874                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7875                         continue;
7876
7877                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7878                 if (!bytenr)
7879                         continue;
7880
7881                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7882         }
7883         return 0;
7884 }
7885
7886 /*
7887  * Walk up the tree from the bottom, freeing leaves and any interior
7888  * nodes which have had all slots visited. If a node (leaf or
7889  * interior) is freed, the node above it will have it's slot
7890  * incremented. The root node will never be freed.
7891  *
7892  * At the end of this function, we should have a path which has all
7893  * slots incremented to the next position for a search. If we need to
7894  * read a new node it will be NULL and the node above it will have the
7895  * correct slot selected for a later read.
7896  *
7897  * If we increment the root nodes slot counter past the number of
7898  * elements, 1 is returned to signal completion of the search.
7899  */
7900 static int adjust_slots_upwards(struct btrfs_root *root,
7901                                 struct btrfs_path *path, int root_level)
7902 {
7903         int level = 0;
7904         int nr, slot;
7905         struct extent_buffer *eb;
7906
7907         if (root_level == 0)
7908                 return 1;
7909
7910         while (level <= root_level) {
7911                 eb = path->nodes[level];
7912                 nr = btrfs_header_nritems(eb);
7913                 path->slots[level]++;
7914                 slot = path->slots[level];
7915                 if (slot >= nr || level == 0) {
7916                         /*
7917                          * Don't free the root -  we will detect this
7918                          * condition after our loop and return a
7919                          * positive value for caller to stop walking the tree.
7920                          */
7921                         if (level != root_level) {
7922                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7923                                 path->locks[level] = 0;
7924
7925                                 free_extent_buffer(eb);
7926                                 path->nodes[level] = NULL;
7927                                 path->slots[level] = 0;
7928                         }
7929                 } else {
7930                         /*
7931                          * We have a valid slot to walk back down
7932                          * from. Stop here so caller can process these
7933                          * new nodes.
7934                          */
7935                         break;
7936                 }
7937
7938                 level++;
7939         }
7940
7941         eb = path->nodes[root_level];
7942         if (path->slots[root_level] >= btrfs_header_nritems(eb))
7943                 return 1;
7944
7945         return 0;
7946 }
7947
7948 /*
7949  * root_eb is the subtree root and is locked before this function is called.
7950  * TODO: Modify this function to mark all (including complete shared node)
7951  * to dirty_extent_root to allow it get accounted in qgroup.
7952  */
7953 static int account_shared_subtree(struct btrfs_trans_handle *trans,
7954                                   struct btrfs_root *root,
7955                                   struct extent_buffer *root_eb,
7956                                   u64 root_gen,
7957                                   int root_level)
7958 {
7959         int ret = 0;
7960         int level;
7961         struct extent_buffer *eb = root_eb;
7962         struct btrfs_path *path = NULL;
7963
7964         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
7965         BUG_ON(root_eb == NULL);
7966
7967         if (!root->fs_info->quota_enabled)
7968                 return 0;
7969
7970         if (!extent_buffer_uptodate(root_eb)) {
7971                 ret = btrfs_read_buffer(root_eb, root_gen);
7972                 if (ret)
7973                         goto out;
7974         }
7975
7976         if (root_level == 0) {
7977                 ret = account_leaf_items(trans, root, root_eb);
7978                 goto out;
7979         }
7980
7981         path = btrfs_alloc_path();
7982         if (!path)
7983                 return -ENOMEM;
7984
7985         /*
7986          * Walk down the tree.  Missing extent blocks are filled in as
7987          * we go. Metadata is accounted every time we read a new
7988          * extent block.
7989          *
7990          * When we reach a leaf, we account for file extent items in it,
7991          * walk back up the tree (adjusting slot pointers as we go)
7992          * and restart the search process.
7993          */
7994         extent_buffer_get(root_eb); /* For path */
7995         path->nodes[root_level] = root_eb;
7996         path->slots[root_level] = 0;
7997         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
7998 walk_down:
7999         level = root_level;
8000         while (level >= 0) {
8001                 if (path->nodes[level] == NULL) {
8002                         int parent_slot;
8003                         u64 child_gen;
8004                         u64 child_bytenr;
8005
8006                         /* We need to get child blockptr/gen from
8007                          * parent before we can read it. */
8008                         eb = path->nodes[level + 1];
8009                         parent_slot = path->slots[level + 1];
8010                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8011                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8012
8013                         eb = read_tree_block(root, child_bytenr, child_gen);
8014                         if (IS_ERR(eb)) {
8015                                 ret = PTR_ERR(eb);
8016                                 goto out;
8017                         } else if (!extent_buffer_uptodate(eb)) {
8018                                 free_extent_buffer(eb);
8019                                 ret = -EIO;
8020                                 goto out;
8021                         }
8022
8023                         path->nodes[level] = eb;
8024                         path->slots[level] = 0;
8025
8026                         btrfs_tree_read_lock(eb);
8027                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8028                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8029                 }
8030
8031                 if (level == 0) {
8032                         ret = account_leaf_items(trans, root, path->nodes[level]);
8033                         if (ret)
8034                                 goto out;
8035
8036                         /* Nonzero return here means we completed our search */
8037                         ret = adjust_slots_upwards(root, path, root_level);
8038                         if (ret)
8039                                 break;
8040
8041                         /* Restart search with new slots */
8042                         goto walk_down;
8043                 }
8044
8045                 level--;
8046         }
8047
8048         ret = 0;
8049 out:
8050         btrfs_free_path(path);
8051
8052         return ret;
8053 }
8054
8055 /*
8056  * helper to process tree block while walking down the tree.
8057  *
8058  * when wc->stage == UPDATE_BACKREF, this function updates
8059  * back refs for pointers in the block.
8060  *
8061  * NOTE: return value 1 means we should stop walking down.
8062  */
8063 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8064                                    struct btrfs_root *root,
8065                                    struct btrfs_path *path,
8066                                    struct walk_control *wc, int lookup_info)
8067 {
8068         int level = wc->level;
8069         struct extent_buffer *eb = path->nodes[level];
8070         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8071         int ret;
8072
8073         if (wc->stage == UPDATE_BACKREF &&
8074             btrfs_header_owner(eb) != root->root_key.objectid)
8075                 return 1;
8076
8077         /*
8078          * when reference count of tree block is 1, it won't increase
8079          * again. once full backref flag is set, we never clear it.
8080          */
8081         if (lookup_info &&
8082             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8083              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8084                 BUG_ON(!path->locks[level]);
8085                 ret = btrfs_lookup_extent_info(trans, root,
8086                                                eb->start, level, 1,
8087                                                &wc->refs[level],
8088                                                &wc->flags[level]);
8089                 BUG_ON(ret == -ENOMEM);
8090                 if (ret)
8091                         return ret;
8092                 BUG_ON(wc->refs[level] == 0);
8093         }
8094
8095         if (wc->stage == DROP_REFERENCE) {
8096                 if (wc->refs[level] > 1)
8097                         return 1;
8098
8099                 if (path->locks[level] && !wc->keep_locks) {
8100                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8101                         path->locks[level] = 0;
8102                 }
8103                 return 0;
8104         }
8105
8106         /* wc->stage == UPDATE_BACKREF */
8107         if (!(wc->flags[level] & flag)) {
8108                 BUG_ON(!path->locks[level]);
8109                 ret = btrfs_inc_ref(trans, root, eb, 1);
8110                 BUG_ON(ret); /* -ENOMEM */
8111                 ret = btrfs_dec_ref(trans, root, eb, 0);
8112                 BUG_ON(ret); /* -ENOMEM */
8113                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8114                                                   eb->len, flag,
8115                                                   btrfs_header_level(eb), 0);
8116                 BUG_ON(ret); /* -ENOMEM */
8117                 wc->flags[level] |= flag;
8118         }
8119
8120         /*
8121          * the block is shared by multiple trees, so it's not good to
8122          * keep the tree lock
8123          */
8124         if (path->locks[level] && level > 0) {
8125                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8126                 path->locks[level] = 0;
8127         }
8128         return 0;
8129 }
8130
8131 /*
8132  * helper to process tree block pointer.
8133  *
8134  * when wc->stage == DROP_REFERENCE, this function checks
8135  * reference count of the block pointed to. if the block
8136  * is shared and we need update back refs for the subtree
8137  * rooted at the block, this function changes wc->stage to
8138  * UPDATE_BACKREF. if the block is shared and there is no
8139  * need to update back, this function drops the reference
8140  * to the block.
8141  *
8142  * NOTE: return value 1 means we should stop walking down.
8143  */
8144 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8145                                  struct btrfs_root *root,
8146                                  struct btrfs_path *path,
8147                                  struct walk_control *wc, int *lookup_info)
8148 {
8149         u64 bytenr;
8150         u64 generation;
8151         u64 parent;
8152         u32 blocksize;
8153         struct btrfs_key key;
8154         struct extent_buffer *next;
8155         int level = wc->level;
8156         int reada = 0;
8157         int ret = 0;
8158         bool need_account = false;
8159
8160         generation = btrfs_node_ptr_generation(path->nodes[level],
8161                                                path->slots[level]);
8162         /*
8163          * if the lower level block was created before the snapshot
8164          * was created, we know there is no need to update back refs
8165          * for the subtree
8166          */
8167         if (wc->stage == UPDATE_BACKREF &&
8168             generation <= root->root_key.offset) {
8169                 *lookup_info = 1;
8170                 return 1;
8171         }
8172
8173         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8174         blocksize = root->nodesize;
8175
8176         next = btrfs_find_tree_block(root->fs_info, bytenr);
8177         if (!next) {
8178                 next = btrfs_find_create_tree_block(root, bytenr);
8179                 if (!next)
8180                         return -ENOMEM;
8181                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8182                                                level - 1);
8183                 reada = 1;
8184         }
8185         btrfs_tree_lock(next);
8186         btrfs_set_lock_blocking(next);
8187
8188         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8189                                        &wc->refs[level - 1],
8190                                        &wc->flags[level - 1]);
8191         if (ret < 0) {
8192                 btrfs_tree_unlock(next);
8193                 return ret;
8194         }
8195
8196         if (unlikely(wc->refs[level - 1] == 0)) {
8197                 btrfs_err(root->fs_info, "Missing references.");
8198                 BUG();
8199         }
8200         *lookup_info = 0;
8201
8202         if (wc->stage == DROP_REFERENCE) {
8203                 if (wc->refs[level - 1] > 1) {
8204                         need_account = true;
8205                         if (level == 1 &&
8206                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8207                                 goto skip;
8208
8209                         if (!wc->update_ref ||
8210                             generation <= root->root_key.offset)
8211                                 goto skip;
8212
8213                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8214                                               path->slots[level]);
8215                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8216                         if (ret < 0)
8217                                 goto skip;
8218
8219                         wc->stage = UPDATE_BACKREF;
8220                         wc->shared_level = level - 1;
8221                 }
8222         } else {
8223                 if (level == 1 &&
8224                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8225                         goto skip;
8226         }
8227
8228         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8229                 btrfs_tree_unlock(next);
8230                 free_extent_buffer(next);
8231                 next = NULL;
8232                 *lookup_info = 1;
8233         }
8234
8235         if (!next) {
8236                 if (reada && level == 1)
8237                         reada_walk_down(trans, root, wc, path);
8238                 next = read_tree_block(root, bytenr, generation);
8239                 if (IS_ERR(next)) {
8240                         return PTR_ERR(next);
8241                 } else if (!extent_buffer_uptodate(next)) {
8242                         free_extent_buffer(next);
8243                         return -EIO;
8244                 }
8245                 btrfs_tree_lock(next);
8246                 btrfs_set_lock_blocking(next);
8247         }
8248
8249         level--;
8250         BUG_ON(level != btrfs_header_level(next));
8251         path->nodes[level] = next;
8252         path->slots[level] = 0;
8253         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8254         wc->level = level;
8255         if (wc->level == 1)
8256                 wc->reada_slot = 0;
8257         return 0;
8258 skip:
8259         wc->refs[level - 1] = 0;
8260         wc->flags[level - 1] = 0;
8261         if (wc->stage == DROP_REFERENCE) {
8262                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8263                         parent = path->nodes[level]->start;
8264                 } else {
8265                         BUG_ON(root->root_key.objectid !=
8266                                btrfs_header_owner(path->nodes[level]));
8267                         parent = 0;
8268                 }
8269
8270                 if (need_account) {
8271                         ret = account_shared_subtree(trans, root, next,
8272                                                      generation, level - 1);
8273                         if (ret) {
8274                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8275                                         "%d accounting shared subtree. Quota "
8276                                         "is out of sync, rescan required.\n",
8277                                         root->fs_info->sb->s_id, ret);
8278                         }
8279                 }
8280                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8281                                 root->root_key.objectid, level - 1, 0, 0);
8282                 BUG_ON(ret); /* -ENOMEM */
8283         }
8284         btrfs_tree_unlock(next);
8285         free_extent_buffer(next);
8286         *lookup_info = 1;
8287         return 1;
8288 }
8289
8290 /*
8291  * helper to process tree block while walking up the tree.
8292  *
8293  * when wc->stage == DROP_REFERENCE, this function drops
8294  * reference count on the block.
8295  *
8296  * when wc->stage == UPDATE_BACKREF, this function changes
8297  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8298  * to UPDATE_BACKREF previously while processing the block.
8299  *
8300  * NOTE: return value 1 means we should stop walking up.
8301  */
8302 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8303                                  struct btrfs_root *root,
8304                                  struct btrfs_path *path,
8305                                  struct walk_control *wc)
8306 {
8307         int ret;
8308         int level = wc->level;
8309         struct extent_buffer *eb = path->nodes[level];
8310         u64 parent = 0;
8311
8312         if (wc->stage == UPDATE_BACKREF) {
8313                 BUG_ON(wc->shared_level < level);
8314                 if (level < wc->shared_level)
8315                         goto out;
8316
8317                 ret = find_next_key(path, level + 1, &wc->update_progress);
8318                 if (ret > 0)
8319                         wc->update_ref = 0;
8320
8321                 wc->stage = DROP_REFERENCE;
8322                 wc->shared_level = -1;
8323                 path->slots[level] = 0;
8324
8325                 /*
8326                  * check reference count again if the block isn't locked.
8327                  * we should start walking down the tree again if reference
8328                  * count is one.
8329                  */
8330                 if (!path->locks[level]) {
8331                         BUG_ON(level == 0);
8332                         btrfs_tree_lock(eb);
8333                         btrfs_set_lock_blocking(eb);
8334                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8335
8336                         ret = btrfs_lookup_extent_info(trans, root,
8337                                                        eb->start, level, 1,
8338                                                        &wc->refs[level],
8339                                                        &wc->flags[level]);
8340                         if (ret < 0) {
8341                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8342                                 path->locks[level] = 0;
8343                                 return ret;
8344                         }
8345                         BUG_ON(wc->refs[level] == 0);
8346                         if (wc->refs[level] == 1) {
8347                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8348                                 path->locks[level] = 0;
8349                                 return 1;
8350                         }
8351                 }
8352         }
8353
8354         /* wc->stage == DROP_REFERENCE */
8355         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8356
8357         if (wc->refs[level] == 1) {
8358                 if (level == 0) {
8359                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8360                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8361                         else
8362                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8363                         BUG_ON(ret); /* -ENOMEM */
8364                         ret = account_leaf_items(trans, root, eb);
8365                         if (ret) {
8366                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8367                                         "%d accounting leaf items. Quota "
8368                                         "is out of sync, rescan required.\n",
8369                                         root->fs_info->sb->s_id, ret);
8370                         }
8371                 }
8372                 /* make block locked assertion in clean_tree_block happy */
8373                 if (!path->locks[level] &&
8374                     btrfs_header_generation(eb) == trans->transid) {
8375                         btrfs_tree_lock(eb);
8376                         btrfs_set_lock_blocking(eb);
8377                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8378                 }
8379                 clean_tree_block(trans, root->fs_info, eb);
8380         }
8381
8382         if (eb == root->node) {
8383                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8384                         parent = eb->start;
8385                 else
8386                         BUG_ON(root->root_key.objectid !=
8387                                btrfs_header_owner(eb));
8388         } else {
8389                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8390                         parent = path->nodes[level + 1]->start;
8391                 else
8392                         BUG_ON(root->root_key.objectid !=
8393                                btrfs_header_owner(path->nodes[level + 1]));
8394         }
8395
8396         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8397 out:
8398         wc->refs[level] = 0;
8399         wc->flags[level] = 0;
8400         return 0;
8401 }
8402
8403 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8404                                    struct btrfs_root *root,
8405                                    struct btrfs_path *path,
8406                                    struct walk_control *wc)
8407 {
8408         int level = wc->level;
8409         int lookup_info = 1;
8410         int ret;
8411
8412         while (level >= 0) {
8413                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8414                 if (ret > 0)
8415                         break;
8416
8417                 if (level == 0)
8418                         break;
8419
8420                 if (path->slots[level] >=
8421                     btrfs_header_nritems(path->nodes[level]))
8422                         break;
8423
8424                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8425                 if (ret > 0) {
8426                         path->slots[level]++;
8427                         continue;
8428                 } else if (ret < 0)
8429                         return ret;
8430                 level = wc->level;
8431         }
8432         return 0;
8433 }
8434
8435 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8436                                  struct btrfs_root *root,
8437                                  struct btrfs_path *path,
8438                                  struct walk_control *wc, int max_level)
8439 {
8440         int level = wc->level;
8441         int ret;
8442
8443         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8444         while (level < max_level && path->nodes[level]) {
8445                 wc->level = level;
8446                 if (path->slots[level] + 1 <
8447                     btrfs_header_nritems(path->nodes[level])) {
8448                         path->slots[level]++;
8449                         return 0;
8450                 } else {
8451                         ret = walk_up_proc(trans, root, path, wc);
8452                         if (ret > 0)
8453                                 return 0;
8454
8455                         if (path->locks[level]) {
8456                                 btrfs_tree_unlock_rw(path->nodes[level],
8457                                                      path->locks[level]);
8458                                 path->locks[level] = 0;
8459                         }
8460                         free_extent_buffer(path->nodes[level]);
8461                         path->nodes[level] = NULL;
8462                         level++;
8463                 }
8464         }
8465         return 1;
8466 }
8467
8468 /*
8469  * drop a subvolume tree.
8470  *
8471  * this function traverses the tree freeing any blocks that only
8472  * referenced by the tree.
8473  *
8474  * when a shared tree block is found. this function decreases its
8475  * reference count by one. if update_ref is true, this function
8476  * also make sure backrefs for the shared block and all lower level
8477  * blocks are properly updated.
8478  *
8479  * If called with for_reloc == 0, may exit early with -EAGAIN
8480  */
8481 int btrfs_drop_snapshot(struct btrfs_root *root,
8482                          struct btrfs_block_rsv *block_rsv, int update_ref,
8483                          int for_reloc)
8484 {
8485         struct btrfs_path *path;
8486         struct btrfs_trans_handle *trans;
8487         struct btrfs_root *tree_root = root->fs_info->tree_root;
8488         struct btrfs_root_item *root_item = &root->root_item;
8489         struct walk_control *wc;
8490         struct btrfs_key key;
8491         int err = 0;
8492         int ret;
8493         int level;
8494         bool root_dropped = false;
8495
8496         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8497
8498         path = btrfs_alloc_path();
8499         if (!path) {
8500                 err = -ENOMEM;
8501                 goto out;
8502         }
8503
8504         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8505         if (!wc) {
8506                 btrfs_free_path(path);
8507                 err = -ENOMEM;
8508                 goto out;
8509         }
8510
8511         trans = btrfs_start_transaction(tree_root, 0);
8512         if (IS_ERR(trans)) {
8513                 err = PTR_ERR(trans);
8514                 goto out_free;
8515         }
8516
8517         if (block_rsv)
8518                 trans->block_rsv = block_rsv;
8519
8520         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8521                 level = btrfs_header_level(root->node);
8522                 path->nodes[level] = btrfs_lock_root_node(root);
8523                 btrfs_set_lock_blocking(path->nodes[level]);
8524                 path->slots[level] = 0;
8525                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8526                 memset(&wc->update_progress, 0,
8527                        sizeof(wc->update_progress));
8528         } else {
8529                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8530                 memcpy(&wc->update_progress, &key,
8531                        sizeof(wc->update_progress));
8532
8533                 level = root_item->drop_level;
8534                 BUG_ON(level == 0);
8535                 path->lowest_level = level;
8536                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8537                 path->lowest_level = 0;
8538                 if (ret < 0) {
8539                         err = ret;
8540                         goto out_end_trans;
8541                 }
8542                 WARN_ON(ret > 0);
8543
8544                 /*
8545                  * unlock our path, this is safe because only this
8546                  * function is allowed to delete this snapshot
8547                  */
8548                 btrfs_unlock_up_safe(path, 0);
8549
8550                 level = btrfs_header_level(root->node);
8551                 while (1) {
8552                         btrfs_tree_lock(path->nodes[level]);
8553                         btrfs_set_lock_blocking(path->nodes[level]);
8554                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8555
8556                         ret = btrfs_lookup_extent_info(trans, root,
8557                                                 path->nodes[level]->start,
8558                                                 level, 1, &wc->refs[level],
8559                                                 &wc->flags[level]);
8560                         if (ret < 0) {
8561                                 err = ret;
8562                                 goto out_end_trans;
8563                         }
8564                         BUG_ON(wc->refs[level] == 0);
8565
8566                         if (level == root_item->drop_level)
8567                                 break;
8568
8569                         btrfs_tree_unlock(path->nodes[level]);
8570                         path->locks[level] = 0;
8571                         WARN_ON(wc->refs[level] != 1);
8572                         level--;
8573                 }
8574         }
8575
8576         wc->level = level;
8577         wc->shared_level = -1;
8578         wc->stage = DROP_REFERENCE;
8579         wc->update_ref = update_ref;
8580         wc->keep_locks = 0;
8581         wc->for_reloc = for_reloc;
8582         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8583
8584         while (1) {
8585
8586                 ret = walk_down_tree(trans, root, path, wc);
8587                 if (ret < 0) {
8588                         err = ret;
8589                         break;
8590                 }
8591
8592                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8593                 if (ret < 0) {
8594                         err = ret;
8595                         break;
8596                 }
8597
8598                 if (ret > 0) {
8599                         BUG_ON(wc->stage != DROP_REFERENCE);
8600                         break;
8601                 }
8602
8603                 if (wc->stage == DROP_REFERENCE) {
8604                         level = wc->level;
8605                         btrfs_node_key(path->nodes[level],
8606                                        &root_item->drop_progress,
8607                                        path->slots[level]);
8608                         root_item->drop_level = level;
8609                 }
8610
8611                 BUG_ON(wc->level == 0);
8612                 if (btrfs_should_end_transaction(trans, tree_root) ||
8613                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8614                         ret = btrfs_update_root(trans, tree_root,
8615                                                 &root->root_key,
8616                                                 root_item);
8617                         if (ret) {
8618                                 btrfs_abort_transaction(trans, tree_root, ret);
8619                                 err = ret;
8620                                 goto out_end_trans;
8621                         }
8622
8623                         btrfs_end_transaction_throttle(trans, tree_root);
8624                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8625                                 pr_debug("BTRFS: drop snapshot early exit\n");
8626                                 err = -EAGAIN;
8627                                 goto out_free;
8628                         }
8629
8630                         trans = btrfs_start_transaction(tree_root, 0);
8631                         if (IS_ERR(trans)) {
8632                                 err = PTR_ERR(trans);
8633                                 goto out_free;
8634                         }
8635                         if (block_rsv)
8636                                 trans->block_rsv = block_rsv;
8637                 }
8638         }
8639         btrfs_release_path(path);
8640         if (err)
8641                 goto out_end_trans;
8642
8643         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8644         if (ret) {
8645                 btrfs_abort_transaction(trans, tree_root, ret);
8646                 goto out_end_trans;
8647         }
8648
8649         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8650                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8651                                       NULL, NULL);
8652                 if (ret < 0) {
8653                         btrfs_abort_transaction(trans, tree_root, ret);
8654                         err = ret;
8655                         goto out_end_trans;
8656                 } else if (ret > 0) {
8657                         /* if we fail to delete the orphan item this time
8658                          * around, it'll get picked up the next time.
8659                          *
8660                          * The most common failure here is just -ENOENT.
8661                          */
8662                         btrfs_del_orphan_item(trans, tree_root,
8663                                               root->root_key.objectid);
8664                 }
8665         }
8666
8667         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8668                 btrfs_add_dropped_root(trans, root);
8669         } else {
8670                 free_extent_buffer(root->node);
8671                 free_extent_buffer(root->commit_root);
8672                 btrfs_put_fs_root(root);
8673         }
8674         root_dropped = true;
8675 out_end_trans:
8676         btrfs_end_transaction_throttle(trans, tree_root);
8677 out_free:
8678         kfree(wc);
8679         btrfs_free_path(path);
8680 out:
8681         /*
8682          * So if we need to stop dropping the snapshot for whatever reason we
8683          * need to make sure to add it back to the dead root list so that we
8684          * keep trying to do the work later.  This also cleans up roots if we
8685          * don't have it in the radix (like when we recover after a power fail
8686          * or unmount) so we don't leak memory.
8687          */
8688         if (!for_reloc && root_dropped == false)
8689                 btrfs_add_dead_root(root);
8690         if (err && err != -EAGAIN)
8691                 btrfs_std_error(root->fs_info, err);
8692         return err;
8693 }
8694
8695 /*
8696  * drop subtree rooted at tree block 'node'.
8697  *
8698  * NOTE: this function will unlock and release tree block 'node'
8699  * only used by relocation code
8700  */
8701 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8702                         struct btrfs_root *root,
8703                         struct extent_buffer *node,
8704                         struct extent_buffer *parent)
8705 {
8706         struct btrfs_path *path;
8707         struct walk_control *wc;
8708         int level;
8709         int parent_level;
8710         int ret = 0;
8711         int wret;
8712
8713         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8714
8715         path = btrfs_alloc_path();
8716         if (!path)
8717                 return -ENOMEM;
8718
8719         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8720         if (!wc) {
8721                 btrfs_free_path(path);
8722                 return -ENOMEM;
8723         }
8724
8725         btrfs_assert_tree_locked(parent);
8726         parent_level = btrfs_header_level(parent);
8727         extent_buffer_get(parent);
8728         path->nodes[parent_level] = parent;
8729         path->slots[parent_level] = btrfs_header_nritems(parent);
8730
8731         btrfs_assert_tree_locked(node);
8732         level = btrfs_header_level(node);
8733         path->nodes[level] = node;
8734         path->slots[level] = 0;
8735         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8736
8737         wc->refs[parent_level] = 1;
8738         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8739         wc->level = level;
8740         wc->shared_level = -1;
8741         wc->stage = DROP_REFERENCE;
8742         wc->update_ref = 0;
8743         wc->keep_locks = 1;
8744         wc->for_reloc = 1;
8745         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8746
8747         while (1) {
8748                 wret = walk_down_tree(trans, root, path, wc);
8749                 if (wret < 0) {
8750                         ret = wret;
8751                         break;
8752                 }
8753
8754                 wret = walk_up_tree(trans, root, path, wc, parent_level);
8755                 if (wret < 0)
8756                         ret = wret;
8757                 if (wret != 0)
8758                         break;
8759         }
8760
8761         kfree(wc);
8762         btrfs_free_path(path);
8763         return ret;
8764 }
8765
8766 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8767 {
8768         u64 num_devices;
8769         u64 stripped;
8770
8771         /*
8772          * if restripe for this chunk_type is on pick target profile and
8773          * return, otherwise do the usual balance
8774          */
8775         stripped = get_restripe_target(root->fs_info, flags);
8776         if (stripped)
8777                 return extended_to_chunk(stripped);
8778
8779         num_devices = root->fs_info->fs_devices->rw_devices;
8780
8781         stripped = BTRFS_BLOCK_GROUP_RAID0 |
8782                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8783                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8784
8785         if (num_devices == 1) {
8786                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8787                 stripped = flags & ~stripped;
8788
8789                 /* turn raid0 into single device chunks */
8790                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
8791                         return stripped;
8792
8793                 /* turn mirroring into duplication */
8794                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8795                              BTRFS_BLOCK_GROUP_RAID10))
8796                         return stripped | BTRFS_BLOCK_GROUP_DUP;
8797         } else {
8798                 /* they already had raid on here, just return */
8799                 if (flags & stripped)
8800                         return flags;
8801
8802                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8803                 stripped = flags & ~stripped;
8804
8805                 /* switch duplicated blocks with raid1 */
8806                 if (flags & BTRFS_BLOCK_GROUP_DUP)
8807                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
8808
8809                 /* this is drive concat, leave it alone */
8810         }
8811
8812         return flags;
8813 }
8814
8815 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8816 {
8817         struct btrfs_space_info *sinfo = cache->space_info;
8818         u64 num_bytes;
8819         u64 min_allocable_bytes;
8820         int ret = -ENOSPC;
8821
8822         /*
8823          * We need some metadata space and system metadata space for
8824          * allocating chunks in some corner cases until we force to set
8825          * it to be readonly.
8826          */
8827         if ((sinfo->flags &
8828              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8829             !force)
8830                 min_allocable_bytes = 1 * 1024 * 1024;
8831         else
8832                 min_allocable_bytes = 0;
8833
8834         spin_lock(&sinfo->lock);
8835         spin_lock(&cache->lock);
8836
8837         if (cache->ro) {
8838                 cache->ro++;
8839                 ret = 0;
8840                 goto out;
8841         }
8842
8843         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8844                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8845
8846         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8847             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8848             min_allocable_bytes <= sinfo->total_bytes) {
8849                 sinfo->bytes_readonly += num_bytes;
8850                 cache->ro++;
8851                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8852                 ret = 0;
8853         }
8854 out:
8855         spin_unlock(&cache->lock);
8856         spin_unlock(&sinfo->lock);
8857         return ret;
8858 }
8859
8860 int btrfs_inc_block_group_ro(struct btrfs_root *root,
8861                              struct btrfs_block_group_cache *cache)
8862
8863 {
8864         struct btrfs_trans_handle *trans;
8865         u64 alloc_flags;
8866         int ret;
8867
8868 again:
8869         trans = btrfs_join_transaction(root);
8870         if (IS_ERR(trans))
8871                 return PTR_ERR(trans);
8872
8873         /*
8874          * we're not allowed to set block groups readonly after the dirty
8875          * block groups cache has started writing.  If it already started,
8876          * back off and let this transaction commit
8877          */
8878         mutex_lock(&root->fs_info->ro_block_group_mutex);
8879         if (trans->transaction->dirty_bg_run) {
8880                 u64 transid = trans->transid;
8881
8882                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
8883                 btrfs_end_transaction(trans, root);
8884
8885                 ret = btrfs_wait_for_commit(root, transid);
8886                 if (ret)
8887                         return ret;
8888                 goto again;
8889         }
8890
8891         /*
8892          * if we are changing raid levels, try to allocate a corresponding
8893          * block group with the new raid level.
8894          */
8895         alloc_flags = update_block_group_flags(root, cache->flags);
8896         if (alloc_flags != cache->flags) {
8897                 ret = do_chunk_alloc(trans, root, alloc_flags,
8898                                      CHUNK_ALLOC_FORCE);
8899                 /*
8900                  * ENOSPC is allowed here, we may have enough space
8901                  * already allocated at the new raid level to
8902                  * carry on
8903                  */
8904                 if (ret == -ENOSPC)
8905                         ret = 0;
8906                 if (ret < 0)
8907                         goto out;
8908         }
8909
8910         ret = inc_block_group_ro(cache, 0);
8911         if (!ret)
8912                 goto out;
8913         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8914         ret = do_chunk_alloc(trans, root, alloc_flags,
8915                              CHUNK_ALLOC_FORCE);
8916         if (ret < 0)
8917                 goto out;
8918         ret = inc_block_group_ro(cache, 0);
8919 out:
8920         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
8921                 alloc_flags = update_block_group_flags(root, cache->flags);
8922                 lock_chunks(root->fs_info->chunk_root);
8923                 check_system_chunk(trans, root, alloc_flags);
8924                 unlock_chunks(root->fs_info->chunk_root);
8925         }
8926         mutex_unlock(&root->fs_info->ro_block_group_mutex);
8927
8928         btrfs_end_transaction(trans, root);
8929         return ret;
8930 }
8931
8932 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8933                             struct btrfs_root *root, u64 type)
8934 {
8935         u64 alloc_flags = get_alloc_profile(root, type);
8936         return do_chunk_alloc(trans, root, alloc_flags,
8937                               CHUNK_ALLOC_FORCE);
8938 }
8939
8940 /*
8941  * helper to account the unused space of all the readonly block group in the
8942  * space_info. takes mirrors into account.
8943  */
8944 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8945 {
8946         struct btrfs_block_group_cache *block_group;
8947         u64 free_bytes = 0;
8948         int factor;
8949
8950         /* It's df, we don't care if it's racey */
8951         if (list_empty(&sinfo->ro_bgs))
8952                 return 0;
8953
8954         spin_lock(&sinfo->lock);
8955         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
8956                 spin_lock(&block_group->lock);
8957
8958                 if (!block_group->ro) {
8959                         spin_unlock(&block_group->lock);
8960                         continue;
8961                 }
8962
8963                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8964                                           BTRFS_BLOCK_GROUP_RAID10 |
8965                                           BTRFS_BLOCK_GROUP_DUP))
8966                         factor = 2;
8967                 else
8968                         factor = 1;
8969
8970                 free_bytes += (block_group->key.offset -
8971                                btrfs_block_group_used(&block_group->item)) *
8972                                factor;
8973
8974                 spin_unlock(&block_group->lock);
8975         }
8976         spin_unlock(&sinfo->lock);
8977
8978         return free_bytes;
8979 }
8980
8981 void btrfs_dec_block_group_ro(struct btrfs_root *root,
8982                               struct btrfs_block_group_cache *cache)
8983 {
8984         struct btrfs_space_info *sinfo = cache->space_info;
8985         u64 num_bytes;
8986
8987         BUG_ON(!cache->ro);
8988
8989         spin_lock(&sinfo->lock);
8990         spin_lock(&cache->lock);
8991         if (!--cache->ro) {
8992                 num_bytes = cache->key.offset - cache->reserved -
8993                             cache->pinned - cache->bytes_super -
8994                             btrfs_block_group_used(&cache->item);
8995                 sinfo->bytes_readonly -= num_bytes;
8996                 list_del_init(&cache->ro_list);
8997         }
8998         spin_unlock(&cache->lock);
8999         spin_unlock(&sinfo->lock);
9000 }
9001
9002 /*
9003  * checks to see if its even possible to relocate this block group.
9004  *
9005  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9006  * ok to go ahead and try.
9007  */
9008 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9009 {
9010         struct btrfs_block_group_cache *block_group;
9011         struct btrfs_space_info *space_info;
9012         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9013         struct btrfs_device *device;
9014         struct btrfs_trans_handle *trans;
9015         u64 min_free;
9016         u64 dev_min = 1;
9017         u64 dev_nr = 0;
9018         u64 target;
9019         int index;
9020         int full = 0;
9021         int ret = 0;
9022
9023         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9024
9025         /* odd, couldn't find the block group, leave it alone */
9026         if (!block_group)
9027                 return -1;
9028
9029         min_free = btrfs_block_group_used(&block_group->item);
9030
9031         /* no bytes used, we're good */
9032         if (!min_free)
9033                 goto out;
9034
9035         space_info = block_group->space_info;
9036         spin_lock(&space_info->lock);
9037
9038         full = space_info->full;
9039
9040         /*
9041          * if this is the last block group we have in this space, we can't
9042          * relocate it unless we're able to allocate a new chunk below.
9043          *
9044          * Otherwise, we need to make sure we have room in the space to handle
9045          * all of the extents from this block group.  If we can, we're good
9046          */
9047         if ((space_info->total_bytes != block_group->key.offset) &&
9048             (space_info->bytes_used + space_info->bytes_reserved +
9049              space_info->bytes_pinned + space_info->bytes_readonly +
9050              min_free < space_info->total_bytes)) {
9051                 spin_unlock(&space_info->lock);
9052                 goto out;
9053         }
9054         spin_unlock(&space_info->lock);
9055
9056         /*
9057          * ok we don't have enough space, but maybe we have free space on our
9058          * devices to allocate new chunks for relocation, so loop through our
9059          * alloc devices and guess if we have enough space.  if this block
9060          * group is going to be restriped, run checks against the target
9061          * profile instead of the current one.
9062          */
9063         ret = -1;
9064
9065         /*
9066          * index:
9067          *      0: raid10
9068          *      1: raid1
9069          *      2: dup
9070          *      3: raid0
9071          *      4: single
9072          */
9073         target = get_restripe_target(root->fs_info, block_group->flags);
9074         if (target) {
9075                 index = __get_raid_index(extended_to_chunk(target));
9076         } else {
9077                 /*
9078                  * this is just a balance, so if we were marked as full
9079                  * we know there is no space for a new chunk
9080                  */
9081                 if (full)
9082                         goto out;
9083
9084                 index = get_block_group_index(block_group);
9085         }
9086
9087         if (index == BTRFS_RAID_RAID10) {
9088                 dev_min = 4;
9089                 /* Divide by 2 */
9090                 min_free >>= 1;
9091         } else if (index == BTRFS_RAID_RAID1) {
9092                 dev_min = 2;
9093         } else if (index == BTRFS_RAID_DUP) {
9094                 /* Multiply by 2 */
9095                 min_free <<= 1;
9096         } else if (index == BTRFS_RAID_RAID0) {
9097                 dev_min = fs_devices->rw_devices;
9098                 min_free = div64_u64(min_free, dev_min);
9099         }
9100
9101         /* We need to do this so that we can look at pending chunks */
9102         trans = btrfs_join_transaction(root);
9103         if (IS_ERR(trans)) {
9104                 ret = PTR_ERR(trans);
9105                 goto out;
9106         }
9107
9108         mutex_lock(&root->fs_info->chunk_mutex);
9109         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9110                 u64 dev_offset;
9111
9112                 /*
9113                  * check to make sure we can actually find a chunk with enough
9114                  * space to fit our block group in.
9115                  */
9116                 if (device->total_bytes > device->bytes_used + min_free &&
9117                     !device->is_tgtdev_for_dev_replace) {
9118                         ret = find_free_dev_extent(trans, device, min_free,
9119                                                    &dev_offset, NULL);
9120                         if (!ret)
9121                                 dev_nr++;
9122
9123                         if (dev_nr >= dev_min)
9124                                 break;
9125
9126                         ret = -1;
9127                 }
9128         }
9129         mutex_unlock(&root->fs_info->chunk_mutex);
9130         btrfs_end_transaction(trans, root);
9131 out:
9132         btrfs_put_block_group(block_group);
9133         return ret;
9134 }
9135
9136 static int find_first_block_group(struct btrfs_root *root,
9137                 struct btrfs_path *path, struct btrfs_key *key)
9138 {
9139         int ret = 0;
9140         struct btrfs_key found_key;
9141         struct extent_buffer *leaf;
9142         int slot;
9143
9144         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9145         if (ret < 0)
9146                 goto out;
9147
9148         while (1) {
9149                 slot = path->slots[0];
9150                 leaf = path->nodes[0];
9151                 if (slot >= btrfs_header_nritems(leaf)) {
9152                         ret = btrfs_next_leaf(root, path);
9153                         if (ret == 0)
9154                                 continue;
9155                         if (ret < 0)
9156                                 goto out;
9157                         break;
9158                 }
9159                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9160
9161                 if (found_key.objectid >= key->objectid &&
9162                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9163                         ret = 0;
9164                         goto out;
9165                 }
9166                 path->slots[0]++;
9167         }
9168 out:
9169         return ret;
9170 }
9171
9172 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9173 {
9174         struct btrfs_block_group_cache *block_group;
9175         u64 last = 0;
9176
9177         while (1) {
9178                 struct inode *inode;
9179
9180                 block_group = btrfs_lookup_first_block_group(info, last);
9181                 while (block_group) {
9182                         spin_lock(&block_group->lock);
9183                         if (block_group->iref)
9184                                 break;
9185                         spin_unlock(&block_group->lock);
9186                         block_group = next_block_group(info->tree_root,
9187                                                        block_group);
9188                 }
9189                 if (!block_group) {
9190                         if (last == 0)
9191                                 break;
9192                         last = 0;
9193                         continue;
9194                 }
9195
9196                 inode = block_group->inode;
9197                 block_group->iref = 0;
9198                 block_group->inode = NULL;
9199                 spin_unlock(&block_group->lock);
9200                 iput(inode);
9201                 last = block_group->key.objectid + block_group->key.offset;
9202                 btrfs_put_block_group(block_group);
9203         }
9204 }
9205
9206 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9207 {
9208         struct btrfs_block_group_cache *block_group;
9209         struct btrfs_space_info *space_info;
9210         struct btrfs_caching_control *caching_ctl;
9211         struct rb_node *n;
9212
9213         down_write(&info->commit_root_sem);
9214         while (!list_empty(&info->caching_block_groups)) {
9215                 caching_ctl = list_entry(info->caching_block_groups.next,
9216                                          struct btrfs_caching_control, list);
9217                 list_del(&caching_ctl->list);
9218                 put_caching_control(caching_ctl);
9219         }
9220         up_write(&info->commit_root_sem);
9221
9222         spin_lock(&info->unused_bgs_lock);
9223         while (!list_empty(&info->unused_bgs)) {
9224                 block_group = list_first_entry(&info->unused_bgs,
9225                                                struct btrfs_block_group_cache,
9226                                                bg_list);
9227                 list_del_init(&block_group->bg_list);
9228                 btrfs_put_block_group(block_group);
9229         }
9230         spin_unlock(&info->unused_bgs_lock);
9231
9232         spin_lock(&info->block_group_cache_lock);
9233         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9234                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9235                                        cache_node);
9236                 rb_erase(&block_group->cache_node,
9237                          &info->block_group_cache_tree);
9238                 RB_CLEAR_NODE(&block_group->cache_node);
9239                 spin_unlock(&info->block_group_cache_lock);
9240
9241                 down_write(&block_group->space_info->groups_sem);
9242                 list_del(&block_group->list);
9243                 up_write(&block_group->space_info->groups_sem);
9244
9245                 if (block_group->cached == BTRFS_CACHE_STARTED)
9246                         wait_block_group_cache_done(block_group);
9247
9248                 /*
9249                  * We haven't cached this block group, which means we could
9250                  * possibly have excluded extents on this block group.
9251                  */
9252                 if (block_group->cached == BTRFS_CACHE_NO ||
9253                     block_group->cached == BTRFS_CACHE_ERROR)
9254                         free_excluded_extents(info->extent_root, block_group);
9255
9256                 btrfs_remove_free_space_cache(block_group);
9257                 btrfs_put_block_group(block_group);
9258
9259                 spin_lock(&info->block_group_cache_lock);
9260         }
9261         spin_unlock(&info->block_group_cache_lock);
9262
9263         /* now that all the block groups are freed, go through and
9264          * free all the space_info structs.  This is only called during
9265          * the final stages of unmount, and so we know nobody is
9266          * using them.  We call synchronize_rcu() once before we start,
9267          * just to be on the safe side.
9268          */
9269         synchronize_rcu();
9270
9271         release_global_block_rsv(info);
9272
9273         while (!list_empty(&info->space_info)) {
9274                 int i;
9275
9276                 space_info = list_entry(info->space_info.next,
9277                                         struct btrfs_space_info,
9278                                         list);
9279                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9280                         if (WARN_ON(space_info->bytes_pinned > 0 ||
9281                             space_info->bytes_reserved > 0 ||
9282                             space_info->bytes_may_use > 0)) {
9283                                 dump_space_info(space_info, 0, 0);
9284                         }
9285                 }
9286                 list_del(&space_info->list);
9287                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9288                         struct kobject *kobj;
9289                         kobj = space_info->block_group_kobjs[i];
9290                         space_info->block_group_kobjs[i] = NULL;
9291                         if (kobj) {
9292                                 kobject_del(kobj);
9293                                 kobject_put(kobj);
9294                         }
9295                 }
9296                 kobject_del(&space_info->kobj);
9297                 kobject_put(&space_info->kobj);
9298         }
9299         return 0;
9300 }
9301
9302 static void __link_block_group(struct btrfs_space_info *space_info,
9303                                struct btrfs_block_group_cache *cache)
9304 {
9305         int index = get_block_group_index(cache);
9306         bool first = false;
9307
9308         down_write(&space_info->groups_sem);
9309         if (list_empty(&space_info->block_groups[index]))
9310                 first = true;
9311         list_add_tail(&cache->list, &space_info->block_groups[index]);
9312         up_write(&space_info->groups_sem);
9313
9314         if (first) {
9315                 struct raid_kobject *rkobj;
9316                 int ret;
9317
9318                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9319                 if (!rkobj)
9320                         goto out_err;
9321                 rkobj->raid_type = index;
9322                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9323                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9324                                   "%s", get_raid_name(index));
9325                 if (ret) {
9326                         kobject_put(&rkobj->kobj);
9327                         goto out_err;
9328                 }
9329                 space_info->block_group_kobjs[index] = &rkobj->kobj;
9330         }
9331
9332         return;
9333 out_err:
9334         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9335 }
9336
9337 static struct btrfs_block_group_cache *
9338 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9339 {
9340         struct btrfs_block_group_cache *cache;
9341
9342         cache = kzalloc(sizeof(*cache), GFP_NOFS);
9343         if (!cache)
9344                 return NULL;
9345
9346         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9347                                         GFP_NOFS);
9348         if (!cache->free_space_ctl) {
9349                 kfree(cache);
9350                 return NULL;
9351         }
9352
9353         cache->key.objectid = start;
9354         cache->key.offset = size;
9355         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9356
9357         cache->sectorsize = root->sectorsize;
9358         cache->fs_info = root->fs_info;
9359         cache->full_stripe_len = btrfs_full_stripe_len(root,
9360                                                &root->fs_info->mapping_tree,
9361                                                start);
9362         atomic_set(&cache->count, 1);
9363         spin_lock_init(&cache->lock);
9364         init_rwsem(&cache->data_rwsem);
9365         INIT_LIST_HEAD(&cache->list);
9366         INIT_LIST_HEAD(&cache->cluster_list);
9367         INIT_LIST_HEAD(&cache->bg_list);
9368         INIT_LIST_HEAD(&cache->ro_list);
9369         INIT_LIST_HEAD(&cache->dirty_list);
9370         INIT_LIST_HEAD(&cache->io_list);
9371         btrfs_init_free_space_ctl(cache);
9372         atomic_set(&cache->trimming, 0);
9373
9374         return cache;
9375 }
9376
9377 int btrfs_read_block_groups(struct btrfs_root *root)
9378 {
9379         struct btrfs_path *path;
9380         int ret;
9381         struct btrfs_block_group_cache *cache;
9382         struct btrfs_fs_info *info = root->fs_info;
9383         struct btrfs_space_info *space_info;
9384         struct btrfs_key key;
9385         struct btrfs_key found_key;
9386         struct extent_buffer *leaf;
9387         int need_clear = 0;
9388         u64 cache_gen;
9389
9390         root = info->extent_root;
9391         key.objectid = 0;
9392         key.offset = 0;
9393         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9394         path = btrfs_alloc_path();
9395         if (!path)
9396                 return -ENOMEM;
9397         path->reada = 1;
9398
9399         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9400         if (btrfs_test_opt(root, SPACE_CACHE) &&
9401             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9402                 need_clear = 1;
9403         if (btrfs_test_opt(root, CLEAR_CACHE))
9404                 need_clear = 1;
9405
9406         while (1) {
9407                 ret = find_first_block_group(root, path, &key);
9408                 if (ret > 0)
9409                         break;
9410                 if (ret != 0)
9411                         goto error;
9412
9413                 leaf = path->nodes[0];
9414                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9415
9416                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9417                                                        found_key.offset);
9418                 if (!cache) {
9419                         ret = -ENOMEM;
9420                         goto error;
9421                 }
9422
9423                 if (need_clear) {
9424                         /*
9425                          * When we mount with old space cache, we need to
9426                          * set BTRFS_DC_CLEAR and set dirty flag.
9427                          *
9428                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9429                          *    truncate the old free space cache inode and
9430                          *    setup a new one.
9431                          * b) Setting 'dirty flag' makes sure that we flush
9432                          *    the new space cache info onto disk.
9433                          */
9434                         if (btrfs_test_opt(root, SPACE_CACHE))
9435                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
9436                 }
9437
9438                 read_extent_buffer(leaf, &cache->item,
9439                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9440                                    sizeof(cache->item));
9441                 cache->flags = btrfs_block_group_flags(&cache->item);
9442
9443                 key.objectid = found_key.objectid + found_key.offset;
9444                 btrfs_release_path(path);
9445
9446                 /*
9447                  * We need to exclude the super stripes now so that the space
9448                  * info has super bytes accounted for, otherwise we'll think
9449                  * we have more space than we actually do.
9450                  */
9451                 ret = exclude_super_stripes(root, cache);
9452                 if (ret) {
9453                         /*
9454                          * We may have excluded something, so call this just in
9455                          * case.
9456                          */
9457                         free_excluded_extents(root, cache);
9458                         btrfs_put_block_group(cache);
9459                         goto error;
9460                 }
9461
9462                 /*
9463                  * check for two cases, either we are full, and therefore
9464                  * don't need to bother with the caching work since we won't
9465                  * find any space, or we are empty, and we can just add all
9466                  * the space in and be done with it.  This saves us _alot_ of
9467                  * time, particularly in the full case.
9468                  */
9469                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9470                         cache->last_byte_to_unpin = (u64)-1;
9471                         cache->cached = BTRFS_CACHE_FINISHED;
9472                         free_excluded_extents(root, cache);
9473                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9474                         cache->last_byte_to_unpin = (u64)-1;
9475                         cache->cached = BTRFS_CACHE_FINISHED;
9476                         add_new_free_space(cache, root->fs_info,
9477                                            found_key.objectid,
9478                                            found_key.objectid +
9479                                            found_key.offset);
9480                         free_excluded_extents(root, cache);
9481                 }
9482
9483                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9484                 if (ret) {
9485                         btrfs_remove_free_space_cache(cache);
9486                         btrfs_put_block_group(cache);
9487                         goto error;
9488                 }
9489
9490                 ret = update_space_info(info, cache->flags, found_key.offset,
9491                                         btrfs_block_group_used(&cache->item),
9492                                         &space_info);
9493                 if (ret) {
9494                         btrfs_remove_free_space_cache(cache);
9495                         spin_lock(&info->block_group_cache_lock);
9496                         rb_erase(&cache->cache_node,
9497                                  &info->block_group_cache_tree);
9498                         RB_CLEAR_NODE(&cache->cache_node);
9499                         spin_unlock(&info->block_group_cache_lock);
9500                         btrfs_put_block_group(cache);
9501                         goto error;
9502                 }
9503
9504                 cache->space_info = space_info;
9505                 spin_lock(&cache->space_info->lock);
9506                 cache->space_info->bytes_readonly += cache->bytes_super;
9507                 spin_unlock(&cache->space_info->lock);
9508
9509                 __link_block_group(space_info, cache);
9510
9511                 set_avail_alloc_bits(root->fs_info, cache->flags);
9512                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9513                         inc_block_group_ro(cache, 1);
9514                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9515                         spin_lock(&info->unused_bgs_lock);
9516                         /* Should always be true but just in case. */
9517                         if (list_empty(&cache->bg_list)) {
9518                                 btrfs_get_block_group(cache);
9519                                 list_add_tail(&cache->bg_list,
9520                                               &info->unused_bgs);
9521                         }
9522                         spin_unlock(&info->unused_bgs_lock);
9523                 }
9524         }
9525
9526         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9527                 if (!(get_alloc_profile(root, space_info->flags) &
9528                       (BTRFS_BLOCK_GROUP_RAID10 |
9529                        BTRFS_BLOCK_GROUP_RAID1 |
9530                        BTRFS_BLOCK_GROUP_RAID5 |
9531                        BTRFS_BLOCK_GROUP_RAID6 |
9532                        BTRFS_BLOCK_GROUP_DUP)))
9533                         continue;
9534                 /*
9535                  * avoid allocating from un-mirrored block group if there are
9536                  * mirrored block groups.
9537                  */
9538                 list_for_each_entry(cache,
9539                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9540                                 list)
9541                         inc_block_group_ro(cache, 1);
9542                 list_for_each_entry(cache,
9543                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9544                                 list)
9545                         inc_block_group_ro(cache, 1);
9546         }
9547
9548         init_global_block_rsv(info);
9549         ret = 0;
9550 error:
9551         btrfs_free_path(path);
9552         return ret;
9553 }
9554
9555 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9556                                        struct btrfs_root *root)
9557 {
9558         struct btrfs_block_group_cache *block_group, *tmp;
9559         struct btrfs_root *extent_root = root->fs_info->extent_root;
9560         struct btrfs_block_group_item item;
9561         struct btrfs_key key;
9562         int ret = 0;
9563
9564         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9565                 if (ret)
9566                         goto next;
9567
9568                 spin_lock(&block_group->lock);
9569                 memcpy(&item, &block_group->item, sizeof(item));
9570                 memcpy(&key, &block_group->key, sizeof(key));
9571                 spin_unlock(&block_group->lock);
9572
9573                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9574                                         sizeof(item));
9575                 if (ret)
9576                         btrfs_abort_transaction(trans, extent_root, ret);
9577                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9578                                                key.objectid, key.offset);
9579                 if (ret)
9580                         btrfs_abort_transaction(trans, extent_root, ret);
9581 next:
9582                 list_del_init(&block_group->bg_list);
9583         }
9584 }
9585
9586 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9587                            struct btrfs_root *root, u64 bytes_used,
9588                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9589                            u64 size)
9590 {
9591         int ret;
9592         struct btrfs_root *extent_root;
9593         struct btrfs_block_group_cache *cache;
9594
9595         extent_root = root->fs_info->extent_root;
9596
9597         btrfs_set_log_full_commit(root->fs_info, trans);
9598
9599         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9600         if (!cache)
9601                 return -ENOMEM;
9602
9603         btrfs_set_block_group_used(&cache->item, bytes_used);
9604         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9605         btrfs_set_block_group_flags(&cache->item, type);
9606
9607         cache->flags = type;
9608         cache->last_byte_to_unpin = (u64)-1;
9609         cache->cached = BTRFS_CACHE_FINISHED;
9610         ret = exclude_super_stripes(root, cache);
9611         if (ret) {
9612                 /*
9613                  * We may have excluded something, so call this just in
9614                  * case.
9615                  */
9616                 free_excluded_extents(root, cache);
9617                 btrfs_put_block_group(cache);
9618                 return ret;
9619         }
9620
9621         add_new_free_space(cache, root->fs_info, chunk_offset,
9622                            chunk_offset + size);
9623
9624         free_excluded_extents(root, cache);
9625
9626         /*
9627          * Call to ensure the corresponding space_info object is created and
9628          * assigned to our block group, but don't update its counters just yet.
9629          * We want our bg to be added to the rbtree with its ->space_info set.
9630          */
9631         ret = update_space_info(root->fs_info, cache->flags, 0, 0,
9632                                 &cache->space_info);
9633         if (ret) {
9634                 btrfs_remove_free_space_cache(cache);
9635                 btrfs_put_block_group(cache);
9636                 return ret;
9637         }
9638
9639         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9640         if (ret) {
9641                 btrfs_remove_free_space_cache(cache);
9642                 btrfs_put_block_group(cache);
9643                 return ret;
9644         }
9645
9646         /*
9647          * Now that our block group has its ->space_info set and is inserted in
9648          * the rbtree, update the space info's counters.
9649          */
9650         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9651                                 &cache->space_info);
9652         if (ret) {
9653                 btrfs_remove_free_space_cache(cache);
9654                 spin_lock(&root->fs_info->block_group_cache_lock);
9655                 rb_erase(&cache->cache_node,
9656                          &root->fs_info->block_group_cache_tree);
9657                 RB_CLEAR_NODE(&cache->cache_node);
9658                 spin_unlock(&root->fs_info->block_group_cache_lock);
9659                 btrfs_put_block_group(cache);
9660                 return ret;
9661         }
9662         update_global_block_rsv(root->fs_info);
9663
9664         spin_lock(&cache->space_info->lock);
9665         cache->space_info->bytes_readonly += cache->bytes_super;
9666         spin_unlock(&cache->space_info->lock);
9667
9668         __link_block_group(cache->space_info, cache);
9669
9670         list_add_tail(&cache->bg_list, &trans->new_bgs);
9671
9672         set_avail_alloc_bits(extent_root->fs_info, type);
9673
9674         return 0;
9675 }
9676
9677 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9678 {
9679         u64 extra_flags = chunk_to_extended(flags) &
9680                                 BTRFS_EXTENDED_PROFILE_MASK;
9681
9682         write_seqlock(&fs_info->profiles_lock);
9683         if (flags & BTRFS_BLOCK_GROUP_DATA)
9684                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9685         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9686                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9687         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9688                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9689         write_sequnlock(&fs_info->profiles_lock);
9690 }
9691
9692 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9693                              struct btrfs_root *root, u64 group_start,
9694                              struct extent_map *em)
9695 {
9696         struct btrfs_path *path;
9697         struct btrfs_block_group_cache *block_group;
9698         struct btrfs_free_cluster *cluster;
9699         struct btrfs_root *tree_root = root->fs_info->tree_root;
9700         struct btrfs_key key;
9701         struct inode *inode;
9702         struct kobject *kobj = NULL;
9703         int ret;
9704         int index;
9705         int factor;
9706         struct btrfs_caching_control *caching_ctl = NULL;
9707         bool remove_em;
9708
9709         root = root->fs_info->extent_root;
9710
9711         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9712         BUG_ON(!block_group);
9713         BUG_ON(!block_group->ro);
9714
9715         /*
9716          * Free the reserved super bytes from this block group before
9717          * remove it.
9718          */
9719         free_excluded_extents(root, block_group);
9720
9721         memcpy(&key, &block_group->key, sizeof(key));
9722         index = get_block_group_index(block_group);
9723         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9724                                   BTRFS_BLOCK_GROUP_RAID1 |
9725                                   BTRFS_BLOCK_GROUP_RAID10))
9726                 factor = 2;
9727         else
9728                 factor = 1;
9729
9730         /* make sure this block group isn't part of an allocation cluster */
9731         cluster = &root->fs_info->data_alloc_cluster;
9732         spin_lock(&cluster->refill_lock);
9733         btrfs_return_cluster_to_free_space(block_group, cluster);
9734         spin_unlock(&cluster->refill_lock);
9735
9736         /*
9737          * make sure this block group isn't part of a metadata
9738          * allocation cluster
9739          */
9740         cluster = &root->fs_info->meta_alloc_cluster;
9741         spin_lock(&cluster->refill_lock);
9742         btrfs_return_cluster_to_free_space(block_group, cluster);
9743         spin_unlock(&cluster->refill_lock);
9744
9745         path = btrfs_alloc_path();
9746         if (!path) {
9747                 ret = -ENOMEM;
9748                 goto out;
9749         }
9750
9751         /*
9752          * get the inode first so any iput calls done for the io_list
9753          * aren't the final iput (no unlinks allowed now)
9754          */
9755         inode = lookup_free_space_inode(tree_root, block_group, path);
9756
9757         mutex_lock(&trans->transaction->cache_write_mutex);
9758         /*
9759          * make sure our free spache cache IO is done before remove the
9760          * free space inode
9761          */
9762         spin_lock(&trans->transaction->dirty_bgs_lock);
9763         if (!list_empty(&block_group->io_list)) {
9764                 list_del_init(&block_group->io_list);
9765
9766                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
9767
9768                 spin_unlock(&trans->transaction->dirty_bgs_lock);
9769                 btrfs_wait_cache_io(root, trans, block_group,
9770                                     &block_group->io_ctl, path,
9771                                     block_group->key.objectid);
9772                 btrfs_put_block_group(block_group);
9773                 spin_lock(&trans->transaction->dirty_bgs_lock);
9774         }
9775
9776         if (!list_empty(&block_group->dirty_list)) {
9777                 list_del_init(&block_group->dirty_list);
9778                 btrfs_put_block_group(block_group);
9779         }
9780         spin_unlock(&trans->transaction->dirty_bgs_lock);
9781         mutex_unlock(&trans->transaction->cache_write_mutex);
9782
9783         if (!IS_ERR(inode)) {
9784                 ret = btrfs_orphan_add(trans, inode);
9785                 if (ret) {
9786                         btrfs_add_delayed_iput(inode);
9787                         goto out;
9788                 }
9789                 clear_nlink(inode);
9790                 /* One for the block groups ref */
9791                 spin_lock(&block_group->lock);
9792                 if (block_group->iref) {
9793                         block_group->iref = 0;
9794                         block_group->inode = NULL;
9795                         spin_unlock(&block_group->lock);
9796                         iput(inode);
9797                 } else {
9798                         spin_unlock(&block_group->lock);
9799                 }
9800                 /* One for our lookup ref */
9801                 btrfs_add_delayed_iput(inode);
9802         }
9803
9804         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9805         key.offset = block_group->key.objectid;
9806         key.type = 0;
9807
9808         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9809         if (ret < 0)
9810                 goto out;
9811         if (ret > 0)
9812                 btrfs_release_path(path);
9813         if (ret == 0) {
9814                 ret = btrfs_del_item(trans, tree_root, path);
9815                 if (ret)
9816                         goto out;
9817                 btrfs_release_path(path);
9818         }
9819
9820         spin_lock(&root->fs_info->block_group_cache_lock);
9821         rb_erase(&block_group->cache_node,
9822                  &root->fs_info->block_group_cache_tree);
9823         RB_CLEAR_NODE(&block_group->cache_node);
9824
9825         if (root->fs_info->first_logical_byte == block_group->key.objectid)
9826                 root->fs_info->first_logical_byte = (u64)-1;
9827         spin_unlock(&root->fs_info->block_group_cache_lock);
9828
9829         down_write(&block_group->space_info->groups_sem);
9830         /*
9831          * we must use list_del_init so people can check to see if they
9832          * are still on the list after taking the semaphore
9833          */
9834         list_del_init(&block_group->list);
9835         if (list_empty(&block_group->space_info->block_groups[index])) {
9836                 kobj = block_group->space_info->block_group_kobjs[index];
9837                 block_group->space_info->block_group_kobjs[index] = NULL;
9838                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
9839         }
9840         up_write(&block_group->space_info->groups_sem);
9841         if (kobj) {
9842                 kobject_del(kobj);
9843                 kobject_put(kobj);
9844         }
9845
9846         if (block_group->has_caching_ctl)
9847                 caching_ctl = get_caching_control(block_group);
9848         if (block_group->cached == BTRFS_CACHE_STARTED)
9849                 wait_block_group_cache_done(block_group);
9850         if (block_group->has_caching_ctl) {
9851                 down_write(&root->fs_info->commit_root_sem);
9852                 if (!caching_ctl) {
9853                         struct btrfs_caching_control *ctl;
9854
9855                         list_for_each_entry(ctl,
9856                                     &root->fs_info->caching_block_groups, list)
9857                                 if (ctl->block_group == block_group) {
9858                                         caching_ctl = ctl;
9859                                         atomic_inc(&caching_ctl->count);
9860                                         break;
9861                                 }
9862                 }
9863                 if (caching_ctl)
9864                         list_del_init(&caching_ctl->list);
9865                 up_write(&root->fs_info->commit_root_sem);
9866                 if (caching_ctl) {
9867                         /* Once for the caching bgs list and once for us. */
9868                         put_caching_control(caching_ctl);
9869                         put_caching_control(caching_ctl);
9870                 }
9871         }
9872
9873         spin_lock(&trans->transaction->dirty_bgs_lock);
9874         if (!list_empty(&block_group->dirty_list)) {
9875                 WARN_ON(1);
9876         }
9877         if (!list_empty(&block_group->io_list)) {
9878                 WARN_ON(1);
9879         }
9880         spin_unlock(&trans->transaction->dirty_bgs_lock);
9881         btrfs_remove_free_space_cache(block_group);
9882
9883         spin_lock(&block_group->space_info->lock);
9884         list_del_init(&block_group->ro_list);
9885
9886         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
9887                 WARN_ON(block_group->space_info->total_bytes
9888                         < block_group->key.offset);
9889                 WARN_ON(block_group->space_info->bytes_readonly
9890                         < block_group->key.offset);
9891                 WARN_ON(block_group->space_info->disk_total
9892                         < block_group->key.offset * factor);
9893         }
9894         block_group->space_info->total_bytes -= block_group->key.offset;
9895         block_group->space_info->bytes_readonly -= block_group->key.offset;
9896         block_group->space_info->disk_total -= block_group->key.offset * factor;
9897
9898         spin_unlock(&block_group->space_info->lock);
9899
9900         memcpy(&key, &block_group->key, sizeof(key));
9901
9902         lock_chunks(root);
9903         if (!list_empty(&em->list)) {
9904                 /* We're in the transaction->pending_chunks list. */
9905                 free_extent_map(em);
9906         }
9907         spin_lock(&block_group->lock);
9908         block_group->removed = 1;
9909         /*
9910          * At this point trimming can't start on this block group, because we
9911          * removed the block group from the tree fs_info->block_group_cache_tree
9912          * so no one can't find it anymore and even if someone already got this
9913          * block group before we removed it from the rbtree, they have already
9914          * incremented block_group->trimming - if they didn't, they won't find
9915          * any free space entries because we already removed them all when we
9916          * called btrfs_remove_free_space_cache().
9917          *
9918          * And we must not remove the extent map from the fs_info->mapping_tree
9919          * to prevent the same logical address range and physical device space
9920          * ranges from being reused for a new block group. This is because our
9921          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
9922          * completely transactionless, so while it is trimming a range the
9923          * currently running transaction might finish and a new one start,
9924          * allowing for new block groups to be created that can reuse the same
9925          * physical device locations unless we take this special care.
9926          *
9927          * There may also be an implicit trim operation if the file system
9928          * is mounted with -odiscard. The same protections must remain
9929          * in place until the extents have been discarded completely when
9930          * the transaction commit has completed.
9931          */
9932         remove_em = (atomic_read(&block_group->trimming) == 0);
9933         /*
9934          * Make sure a trimmer task always sees the em in the pinned_chunks list
9935          * if it sees block_group->removed == 1 (needs to lock block_group->lock
9936          * before checking block_group->removed).
9937          */
9938         if (!remove_em) {
9939                 /*
9940                  * Our em might be in trans->transaction->pending_chunks which
9941                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
9942                  * and so is the fs_info->pinned_chunks list.
9943                  *
9944                  * So at this point we must be holding the chunk_mutex to avoid
9945                  * any races with chunk allocation (more specifically at
9946                  * volumes.c:contains_pending_extent()), to ensure it always
9947                  * sees the em, either in the pending_chunks list or in the
9948                  * pinned_chunks list.
9949                  */
9950                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
9951         }
9952         spin_unlock(&block_group->lock);
9953
9954         if (remove_em) {
9955                 struct extent_map_tree *em_tree;
9956
9957                 em_tree = &root->fs_info->mapping_tree.map_tree;
9958                 write_lock(&em_tree->lock);
9959                 /*
9960                  * The em might be in the pending_chunks list, so make sure the
9961                  * chunk mutex is locked, since remove_extent_mapping() will
9962                  * delete us from that list.
9963                  */
9964                 remove_extent_mapping(em_tree, em);
9965                 write_unlock(&em_tree->lock);
9966                 /* once for the tree */
9967                 free_extent_map(em);
9968         }
9969
9970         unlock_chunks(root);
9971
9972         btrfs_put_block_group(block_group);
9973         btrfs_put_block_group(block_group);
9974
9975         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
9976         if (ret > 0)
9977                 ret = -EIO;
9978         if (ret < 0)
9979                 goto out;
9980
9981         ret = btrfs_del_item(trans, root, path);
9982 out:
9983         btrfs_free_path(path);
9984         return ret;
9985 }
9986
9987 /*
9988  * Process the unused_bgs list and remove any that don't have any allocated
9989  * space inside of them.
9990  */
9991 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9992 {
9993         struct btrfs_block_group_cache *block_group;
9994         struct btrfs_space_info *space_info;
9995         struct btrfs_root *root = fs_info->extent_root;
9996         struct btrfs_trans_handle *trans;
9997         int ret = 0;
9998
9999         if (!fs_info->open)
10000                 return;
10001
10002         spin_lock(&fs_info->unused_bgs_lock);
10003         while (!list_empty(&fs_info->unused_bgs)) {
10004                 u64 start, end;
10005                 int trimming;
10006
10007                 block_group = list_first_entry(&fs_info->unused_bgs,
10008                                                struct btrfs_block_group_cache,
10009                                                bg_list);
10010                 space_info = block_group->space_info;
10011                 list_del_init(&block_group->bg_list);
10012                 if (ret || btrfs_mixed_space_info(space_info)) {
10013                         btrfs_put_block_group(block_group);
10014                         continue;
10015                 }
10016                 spin_unlock(&fs_info->unused_bgs_lock);
10017
10018                 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
10019
10020                 /* Don't want to race with allocators so take the groups_sem */
10021                 down_write(&space_info->groups_sem);
10022                 spin_lock(&block_group->lock);
10023                 if (block_group->reserved ||
10024                     btrfs_block_group_used(&block_group->item) ||
10025                     block_group->ro) {
10026                         /*
10027                          * We want to bail if we made new allocations or have
10028                          * outstanding allocations in this block group.  We do
10029                          * the ro check in case balance is currently acting on
10030                          * this block group.
10031                          */
10032                         spin_unlock(&block_group->lock);
10033                         up_write(&space_info->groups_sem);
10034                         goto next;
10035                 }
10036                 spin_unlock(&block_group->lock);
10037
10038                 /* We don't want to force the issue, only flip if it's ok. */
10039                 ret = inc_block_group_ro(block_group, 0);
10040                 up_write(&space_info->groups_sem);
10041                 if (ret < 0) {
10042                         ret = 0;
10043                         goto next;
10044                 }
10045
10046                 /*
10047                  * Want to do this before we do anything else so we can recover
10048                  * properly if we fail to join the transaction.
10049                  */
10050                 /* 1 for btrfs_orphan_reserve_metadata() */
10051                 trans = btrfs_start_transaction(root, 1);
10052                 if (IS_ERR(trans)) {
10053                         btrfs_dec_block_group_ro(root, block_group);
10054                         ret = PTR_ERR(trans);
10055                         goto next;
10056                 }
10057
10058                 /*
10059                  * We could have pending pinned extents for this block group,
10060                  * just delete them, we don't care about them anymore.
10061                  */
10062                 start = block_group->key.objectid;
10063                 end = start + block_group->key.offset - 1;
10064                 /*
10065                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
10066                  * btrfs_finish_extent_commit(). If we are at transaction N,
10067                  * another task might be running finish_extent_commit() for the
10068                  * previous transaction N - 1, and have seen a range belonging
10069                  * to the block group in freed_extents[] before we were able to
10070                  * clear the whole block group range from freed_extents[]. This
10071                  * means that task can lookup for the block group after we
10072                  * unpinned it from freed_extents[] and removed it, leading to
10073                  * a BUG_ON() at btrfs_unpin_extent_range().
10074                  */
10075                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10076                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10077                                   EXTENT_DIRTY, GFP_NOFS);
10078                 if (ret) {
10079                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10080                         btrfs_dec_block_group_ro(root, block_group);
10081                         goto end_trans;
10082                 }
10083                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10084                                   EXTENT_DIRTY, GFP_NOFS);
10085                 if (ret) {
10086                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10087                         btrfs_dec_block_group_ro(root, block_group);
10088                         goto end_trans;
10089                 }
10090                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10091
10092                 /* Reset pinned so btrfs_put_block_group doesn't complain */
10093                 spin_lock(&space_info->lock);
10094                 spin_lock(&block_group->lock);
10095
10096                 space_info->bytes_pinned -= block_group->pinned;
10097                 space_info->bytes_readonly += block_group->pinned;
10098                 percpu_counter_add(&space_info->total_bytes_pinned,
10099                                    -block_group->pinned);
10100                 block_group->pinned = 0;
10101
10102                 spin_unlock(&block_group->lock);
10103                 spin_unlock(&space_info->lock);
10104
10105                 /* DISCARD can flip during remount */
10106                 trimming = btrfs_test_opt(root, DISCARD);
10107
10108                 /* Implicit trim during transaction commit. */
10109                 if (trimming)
10110                         btrfs_get_block_group_trimming(block_group);
10111
10112                 /*
10113                  * Btrfs_remove_chunk will abort the transaction if things go
10114                  * horribly wrong.
10115                  */
10116                 ret = btrfs_remove_chunk(trans, root,
10117                                          block_group->key.objectid);
10118
10119                 if (ret) {
10120                         if (trimming)
10121                                 btrfs_put_block_group_trimming(block_group);
10122                         goto end_trans;
10123                 }
10124
10125                 /*
10126                  * If we're not mounted with -odiscard, we can just forget
10127                  * about this block group. Otherwise we'll need to wait
10128                  * until transaction commit to do the actual discard.
10129                  */
10130                 if (trimming) {
10131                         WARN_ON(!list_empty(&block_group->bg_list));
10132                         spin_lock(&trans->transaction->deleted_bgs_lock);
10133                         list_move(&block_group->bg_list,
10134                                   &trans->transaction->deleted_bgs);
10135                         spin_unlock(&trans->transaction->deleted_bgs_lock);
10136                         btrfs_get_block_group(block_group);
10137                 }
10138 end_trans:
10139                 btrfs_end_transaction(trans, root);
10140 next:
10141                 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
10142                 btrfs_put_block_group(block_group);
10143                 spin_lock(&fs_info->unused_bgs_lock);
10144         }
10145         spin_unlock(&fs_info->unused_bgs_lock);
10146 }
10147
10148 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10149 {
10150         struct btrfs_space_info *space_info;
10151         struct btrfs_super_block *disk_super;
10152         u64 features;
10153         u64 flags;
10154         int mixed = 0;
10155         int ret;
10156
10157         disk_super = fs_info->super_copy;
10158         if (!btrfs_super_root(disk_super))
10159                 return 1;
10160
10161         features = btrfs_super_incompat_flags(disk_super);
10162         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10163                 mixed = 1;
10164
10165         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10166         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10167         if (ret)
10168                 goto out;
10169
10170         if (mixed) {
10171                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10172                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10173         } else {
10174                 flags = BTRFS_BLOCK_GROUP_METADATA;
10175                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10176                 if (ret)
10177                         goto out;
10178
10179                 flags = BTRFS_BLOCK_GROUP_DATA;
10180                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10181         }
10182 out:
10183         return ret;
10184 }
10185
10186 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10187 {
10188         return unpin_extent_range(root, start, end, false);
10189 }
10190
10191 /*
10192  * It used to be that old block groups would be left around forever.
10193  * Iterating over them would be enough to trim unused space.  Since we
10194  * now automatically remove them, we also need to iterate over unallocated
10195  * space.
10196  *
10197  * We don't want a transaction for this since the discard may take a
10198  * substantial amount of time.  We don't require that a transaction be
10199  * running, but we do need to take a running transaction into account
10200  * to ensure that we're not discarding chunks that were released in
10201  * the current transaction.
10202  *
10203  * Holding the chunks lock will prevent other threads from allocating
10204  * or releasing chunks, but it won't prevent a running transaction
10205  * from committing and releasing the memory that the pending chunks
10206  * list head uses.  For that, we need to take a reference to the
10207  * transaction.
10208  */
10209 static int btrfs_trim_free_extents(struct btrfs_device *device,
10210                                    u64 minlen, u64 *trimmed)
10211 {
10212         u64 start = 0, len = 0;
10213         int ret;
10214
10215         *trimmed = 0;
10216
10217         /* Not writeable = nothing to do. */
10218         if (!device->writeable)
10219                 return 0;
10220
10221         /* No free space = nothing to do. */
10222         if (device->total_bytes <= device->bytes_used)
10223                 return 0;
10224
10225         ret = 0;
10226
10227         while (1) {
10228                 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
10229                 struct btrfs_transaction *trans;
10230                 u64 bytes;
10231
10232                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10233                 if (ret)
10234                         return ret;
10235
10236                 down_read(&fs_info->commit_root_sem);
10237
10238                 spin_lock(&fs_info->trans_lock);
10239                 trans = fs_info->running_transaction;
10240                 if (trans)
10241                         atomic_inc(&trans->use_count);
10242                 spin_unlock(&fs_info->trans_lock);
10243
10244                 ret = find_free_dev_extent_start(trans, device, minlen, start,
10245                                                  &start, &len);
10246                 if (trans)
10247                         btrfs_put_transaction(trans);
10248
10249                 if (ret) {
10250                         up_read(&fs_info->commit_root_sem);
10251                         mutex_unlock(&fs_info->chunk_mutex);
10252                         if (ret == -ENOSPC)
10253                                 ret = 0;
10254                         break;
10255                 }
10256
10257                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10258                 up_read(&fs_info->commit_root_sem);
10259                 mutex_unlock(&fs_info->chunk_mutex);
10260
10261                 if (ret)
10262                         break;
10263
10264                 start += len;
10265                 *trimmed += bytes;
10266
10267                 if (fatal_signal_pending(current)) {
10268                         ret = -ERESTARTSYS;
10269                         break;
10270                 }
10271
10272                 cond_resched();
10273         }
10274
10275         return ret;
10276 }
10277
10278 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10279 {
10280         struct btrfs_fs_info *fs_info = root->fs_info;
10281         struct btrfs_block_group_cache *cache = NULL;
10282         struct btrfs_device *device;
10283         struct list_head *devices;
10284         u64 group_trimmed;
10285         u64 start;
10286         u64 end;
10287         u64 trimmed = 0;
10288         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10289         int ret = 0;
10290
10291         /*
10292          * try to trim all FS space, our block group may start from non-zero.
10293          */
10294         if (range->len == total_bytes)
10295                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10296         else
10297                 cache = btrfs_lookup_block_group(fs_info, range->start);
10298
10299         while (cache) {
10300                 if (cache->key.objectid >= (range->start + range->len)) {
10301                         btrfs_put_block_group(cache);
10302                         break;
10303                 }
10304
10305                 start = max(range->start, cache->key.objectid);
10306                 end = min(range->start + range->len,
10307                                 cache->key.objectid + cache->key.offset);
10308
10309                 if (end - start >= range->minlen) {
10310                         if (!block_group_cache_done(cache)) {
10311                                 ret = cache_block_group(cache, 0);
10312                                 if (ret) {
10313                                         btrfs_put_block_group(cache);
10314                                         break;
10315                                 }
10316                                 ret = wait_block_group_cache_done(cache);
10317                                 if (ret) {
10318                                         btrfs_put_block_group(cache);
10319                                         break;
10320                                 }
10321                         }
10322                         ret = btrfs_trim_block_group(cache,
10323                                                      &group_trimmed,
10324                                                      start,
10325                                                      end,
10326                                                      range->minlen);
10327
10328                         trimmed += group_trimmed;
10329                         if (ret) {
10330                                 btrfs_put_block_group(cache);
10331                                 break;
10332                         }
10333                 }
10334
10335                 cache = next_block_group(fs_info->tree_root, cache);
10336         }
10337
10338         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
10339         devices = &root->fs_info->fs_devices->alloc_list;
10340         list_for_each_entry(device, devices, dev_alloc_list) {
10341                 ret = btrfs_trim_free_extents(device, range->minlen,
10342                                               &group_trimmed);
10343                 if (ret)
10344                         break;
10345
10346                 trimmed += group_trimmed;
10347         }
10348         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
10349
10350         range->len = trimmed;
10351         return ret;
10352 }
10353
10354 /*
10355  * btrfs_{start,end}_write_no_snapshoting() are similar to
10356  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10357  * data into the page cache through nocow before the subvolume is snapshoted,
10358  * but flush the data into disk after the snapshot creation, or to prevent
10359  * operations while snapshoting is ongoing and that cause the snapshot to be
10360  * inconsistent (writes followed by expanding truncates for example).
10361  */
10362 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10363 {
10364         percpu_counter_dec(&root->subv_writers->counter);
10365         /*
10366          * Make sure counter is updated before we wake up
10367          * waiters.
10368          */
10369         smp_mb();
10370         if (waitqueue_active(&root->subv_writers->wait))
10371                 wake_up(&root->subv_writers->wait);
10372 }
10373
10374 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10375 {
10376         if (atomic_read(&root->will_be_snapshoted))
10377                 return 0;
10378
10379         percpu_counter_inc(&root->subv_writers->counter);
10380         /*
10381          * Make sure counter is updated before we check for snapshot creation.
10382          */
10383         smp_mb();
10384         if (atomic_read(&root->will_be_snapshoted)) {
10385                 btrfs_end_write_no_snapshoting(root);
10386                 return 0;
10387         }
10388         return 1;
10389 }