Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
[linux-drm-fsl-dcu.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_trans_handle *trans,
78                               struct btrfs_root *root, u64 bytenr,
79                               u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81                                 struct btrfs_root *root,
82                                 struct btrfs_delayed_ref_node *node, u64 parent,
83                                 u64 root_objectid, u64 owner_objectid,
84                                 u64 owner_offset, int refs_to_drop,
85                                 struct btrfs_delayed_extent_op *extra_op);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87                                     struct extent_buffer *leaf,
88                                     struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90                                       struct btrfs_root *root,
91                                       u64 parent, u64 root_objectid,
92                                       u64 flags, u64 owner, u64 offset,
93                                       struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95                                      struct btrfs_root *root,
96                                      u64 parent, u64 root_objectid,
97                                      u64 flags, struct btrfs_disk_key *key,
98                                      int level, struct btrfs_key *ins,
99                                      int no_quota);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101                           struct btrfs_root *extent_root, u64 flags,
102                           int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104                          struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106                             int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108                                        u64 num_bytes, int reserve,
109                                        int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111                                u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113                      u64 bytenr, u64 num_bytes, int reserved);
114
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118         smp_mb();
119         return cache->cached == BTRFS_CACHE_FINISHED ||
120                 cache->cached == BTRFS_CACHE_ERROR;
121 }
122
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125         return (cache->flags & bits) == bits;
126 }
127
128 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130         atomic_inc(&cache->count);
131 }
132
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135         if (atomic_dec_and_test(&cache->count)) {
136                 WARN_ON(cache->pinned > 0);
137                 WARN_ON(cache->reserved > 0);
138                 kfree(cache->free_space_ctl);
139                 kfree(cache);
140         }
141 }
142
143 /*
144  * this adds the block group to the fs_info rb tree for the block group
145  * cache
146  */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148                                 struct btrfs_block_group_cache *block_group)
149 {
150         struct rb_node **p;
151         struct rb_node *parent = NULL;
152         struct btrfs_block_group_cache *cache;
153
154         spin_lock(&info->block_group_cache_lock);
155         p = &info->block_group_cache_tree.rb_node;
156
157         while (*p) {
158                 parent = *p;
159                 cache = rb_entry(parent, struct btrfs_block_group_cache,
160                                  cache_node);
161                 if (block_group->key.objectid < cache->key.objectid) {
162                         p = &(*p)->rb_left;
163                 } else if (block_group->key.objectid > cache->key.objectid) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         spin_unlock(&info->block_group_cache_lock);
167                         return -EEXIST;
168                 }
169         }
170
171         rb_link_node(&block_group->cache_node, parent, p);
172         rb_insert_color(&block_group->cache_node,
173                         &info->block_group_cache_tree);
174
175         if (info->first_logical_byte > block_group->key.objectid)
176                 info->first_logical_byte = block_group->key.objectid;
177
178         spin_unlock(&info->block_group_cache_lock);
179
180         return 0;
181 }
182
183 /*
184  * This will return the block group at or after bytenr if contains is 0, else
185  * it will return the block group that contains the bytenr
186  */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189                               int contains)
190 {
191         struct btrfs_block_group_cache *cache, *ret = NULL;
192         struct rb_node *n;
193         u64 end, start;
194
195         spin_lock(&info->block_group_cache_lock);
196         n = info->block_group_cache_tree.rb_node;
197
198         while (n) {
199                 cache = rb_entry(n, struct btrfs_block_group_cache,
200                                  cache_node);
201                 end = cache->key.objectid + cache->key.offset - 1;
202                 start = cache->key.objectid;
203
204                 if (bytenr < start) {
205                         if (!contains && (!ret || start < ret->key.objectid))
206                                 ret = cache;
207                         n = n->rb_left;
208                 } else if (bytenr > start) {
209                         if (contains && bytenr <= end) {
210                                 ret = cache;
211                                 break;
212                         }
213                         n = n->rb_right;
214                 } else {
215                         ret = cache;
216                         break;
217                 }
218         }
219         if (ret) {
220                 btrfs_get_block_group(ret);
221                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222                         info->first_logical_byte = ret->key.objectid;
223         }
224         spin_unlock(&info->block_group_cache_lock);
225
226         return ret;
227 }
228
229 static int add_excluded_extent(struct btrfs_root *root,
230                                u64 start, u64 num_bytes)
231 {
232         u64 end = start + num_bytes - 1;
233         set_extent_bits(&root->fs_info->freed_extents[0],
234                         start, end, EXTENT_UPTODATE, GFP_NOFS);
235         set_extent_bits(&root->fs_info->freed_extents[1],
236                         start, end, EXTENT_UPTODATE, GFP_NOFS);
237         return 0;
238 }
239
240 static void free_excluded_extents(struct btrfs_root *root,
241                                   struct btrfs_block_group_cache *cache)
242 {
243         u64 start, end;
244
245         start = cache->key.objectid;
246         end = start + cache->key.offset - 1;
247
248         clear_extent_bits(&root->fs_info->freed_extents[0],
249                           start, end, EXTENT_UPTODATE, GFP_NOFS);
250         clear_extent_bits(&root->fs_info->freed_extents[1],
251                           start, end, EXTENT_UPTODATE, GFP_NOFS);
252 }
253
254 static int exclude_super_stripes(struct btrfs_root *root,
255                                  struct btrfs_block_group_cache *cache)
256 {
257         u64 bytenr;
258         u64 *logical;
259         int stripe_len;
260         int i, nr, ret;
261
262         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264                 cache->bytes_super += stripe_len;
265                 ret = add_excluded_extent(root, cache->key.objectid,
266                                           stripe_len);
267                 if (ret)
268                         return ret;
269         }
270
271         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272                 bytenr = btrfs_sb_offset(i);
273                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274                                        cache->key.objectid, bytenr,
275                                        0, &logical, &nr, &stripe_len);
276                 if (ret)
277                         return ret;
278
279                 while (nr--) {
280                         u64 start, len;
281
282                         if (logical[nr] > cache->key.objectid +
283                             cache->key.offset)
284                                 continue;
285
286                         if (logical[nr] + stripe_len <= cache->key.objectid)
287                                 continue;
288
289                         start = logical[nr];
290                         if (start < cache->key.objectid) {
291                                 start = cache->key.objectid;
292                                 len = (logical[nr] + stripe_len) - start;
293                         } else {
294                                 len = min_t(u64, stripe_len,
295                                             cache->key.objectid +
296                                             cache->key.offset - start);
297                         }
298
299                         cache->bytes_super += len;
300                         ret = add_excluded_extent(root, start, len);
301                         if (ret) {
302                                 kfree(logical);
303                                 return ret;
304                         }
305                 }
306
307                 kfree(logical);
308         }
309         return 0;
310 }
311
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315         struct btrfs_caching_control *ctl;
316
317         spin_lock(&cache->lock);
318         if (!cache->caching_ctl) {
319                 spin_unlock(&cache->lock);
320                 return NULL;
321         }
322
323         ctl = cache->caching_ctl;
324         atomic_inc(&ctl->count);
325         spin_unlock(&cache->lock);
326         return ctl;
327 }
328
329 static void put_caching_control(struct btrfs_caching_control *ctl)
330 {
331         if (atomic_dec_and_test(&ctl->count))
332                 kfree(ctl);
333 }
334
335 /*
336  * this is only called by cache_block_group, since we could have freed extents
337  * we need to check the pinned_extents for any extents that can't be used yet
338  * since their free space will be released as soon as the transaction commits.
339  */
340 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
341                               struct btrfs_fs_info *info, u64 start, u64 end)
342 {
343         u64 extent_start, extent_end, size, total_added = 0;
344         int ret;
345
346         while (start < end) {
347                 ret = find_first_extent_bit(info->pinned_extents, start,
348                                             &extent_start, &extent_end,
349                                             EXTENT_DIRTY | EXTENT_UPTODATE,
350                                             NULL);
351                 if (ret)
352                         break;
353
354                 if (extent_start <= start) {
355                         start = extent_end + 1;
356                 } else if (extent_start > start && extent_start < end) {
357                         size = extent_start - start;
358                         total_added += size;
359                         ret = btrfs_add_free_space(block_group, start,
360                                                    size);
361                         BUG_ON(ret); /* -ENOMEM or logic error */
362                         start = extent_end + 1;
363                 } else {
364                         break;
365                 }
366         }
367
368         if (start < end) {
369                 size = end - start;
370                 total_added += size;
371                 ret = btrfs_add_free_space(block_group, start, size);
372                 BUG_ON(ret); /* -ENOMEM or logic error */
373         }
374
375         return total_added;
376 }
377
378 static noinline void caching_thread(struct btrfs_work *work)
379 {
380         struct btrfs_block_group_cache *block_group;
381         struct btrfs_fs_info *fs_info;
382         struct btrfs_caching_control *caching_ctl;
383         struct btrfs_root *extent_root;
384         struct btrfs_path *path;
385         struct extent_buffer *leaf;
386         struct btrfs_key key;
387         u64 total_found = 0;
388         u64 last = 0;
389         u32 nritems;
390         int ret = -ENOMEM;
391
392         caching_ctl = container_of(work, struct btrfs_caching_control, work);
393         block_group = caching_ctl->block_group;
394         fs_info = block_group->fs_info;
395         extent_root = fs_info->extent_root;
396
397         path = btrfs_alloc_path();
398         if (!path)
399                 goto out;
400
401         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
402
403         /*
404          * We don't want to deadlock with somebody trying to allocate a new
405          * extent for the extent root while also trying to search the extent
406          * root to add free space.  So we skip locking and search the commit
407          * root, since its read-only
408          */
409         path->skip_locking = 1;
410         path->search_commit_root = 1;
411         path->reada = 1;
412
413         key.objectid = last;
414         key.offset = 0;
415         key.type = BTRFS_EXTENT_ITEM_KEY;
416 again:
417         mutex_lock(&caching_ctl->mutex);
418         /* need to make sure the commit_root doesn't disappear */
419         down_read(&fs_info->commit_root_sem);
420
421 next:
422         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
423         if (ret < 0)
424                 goto err;
425
426         leaf = path->nodes[0];
427         nritems = btrfs_header_nritems(leaf);
428
429         while (1) {
430                 if (btrfs_fs_closing(fs_info) > 1) {
431                         last = (u64)-1;
432                         break;
433                 }
434
435                 if (path->slots[0] < nritems) {
436                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
437                 } else {
438                         ret = find_next_key(path, 0, &key);
439                         if (ret)
440                                 break;
441
442                         if (need_resched() ||
443                             rwsem_is_contended(&fs_info->commit_root_sem)) {
444                                 caching_ctl->progress = last;
445                                 btrfs_release_path(path);
446                                 up_read(&fs_info->commit_root_sem);
447                                 mutex_unlock(&caching_ctl->mutex);
448                                 cond_resched();
449                                 goto again;
450                         }
451
452                         ret = btrfs_next_leaf(extent_root, path);
453                         if (ret < 0)
454                                 goto err;
455                         if (ret)
456                                 break;
457                         leaf = path->nodes[0];
458                         nritems = btrfs_header_nritems(leaf);
459                         continue;
460                 }
461
462                 if (key.objectid < last) {
463                         key.objectid = last;
464                         key.offset = 0;
465                         key.type = BTRFS_EXTENT_ITEM_KEY;
466
467                         caching_ctl->progress = last;
468                         btrfs_release_path(path);
469                         goto next;
470                 }
471
472                 if (key.objectid < block_group->key.objectid) {
473                         path->slots[0]++;
474                         continue;
475                 }
476
477                 if (key.objectid >= block_group->key.objectid +
478                     block_group->key.offset)
479                         break;
480
481                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
482                     key.type == BTRFS_METADATA_ITEM_KEY) {
483                         total_found += add_new_free_space(block_group,
484                                                           fs_info, last,
485                                                           key.objectid);
486                         if (key.type == BTRFS_METADATA_ITEM_KEY)
487                                 last = key.objectid +
488                                         fs_info->tree_root->nodesize;
489                         else
490                                 last = key.objectid + key.offset;
491
492                         if (total_found > (1024 * 1024 * 2)) {
493                                 total_found = 0;
494                                 wake_up(&caching_ctl->wait);
495                         }
496                 }
497                 path->slots[0]++;
498         }
499         ret = 0;
500
501         total_found += add_new_free_space(block_group, fs_info, last,
502                                           block_group->key.objectid +
503                                           block_group->key.offset);
504         caching_ctl->progress = (u64)-1;
505
506         spin_lock(&block_group->lock);
507         block_group->caching_ctl = NULL;
508         block_group->cached = BTRFS_CACHE_FINISHED;
509         spin_unlock(&block_group->lock);
510
511 err:
512         btrfs_free_path(path);
513         up_read(&fs_info->commit_root_sem);
514
515         free_excluded_extents(extent_root, block_group);
516
517         mutex_unlock(&caching_ctl->mutex);
518 out:
519         if (ret) {
520                 spin_lock(&block_group->lock);
521                 block_group->caching_ctl = NULL;
522                 block_group->cached = BTRFS_CACHE_ERROR;
523                 spin_unlock(&block_group->lock);
524         }
525         wake_up(&caching_ctl->wait);
526
527         put_caching_control(caching_ctl);
528         btrfs_put_block_group(block_group);
529 }
530
531 static int cache_block_group(struct btrfs_block_group_cache *cache,
532                              int load_cache_only)
533 {
534         DEFINE_WAIT(wait);
535         struct btrfs_fs_info *fs_info = cache->fs_info;
536         struct btrfs_caching_control *caching_ctl;
537         int ret = 0;
538
539         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
540         if (!caching_ctl)
541                 return -ENOMEM;
542
543         INIT_LIST_HEAD(&caching_ctl->list);
544         mutex_init(&caching_ctl->mutex);
545         init_waitqueue_head(&caching_ctl->wait);
546         caching_ctl->block_group = cache;
547         caching_ctl->progress = cache->key.objectid;
548         atomic_set(&caching_ctl->count, 1);
549         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
550                         caching_thread, NULL, NULL);
551
552         spin_lock(&cache->lock);
553         /*
554          * This should be a rare occasion, but this could happen I think in the
555          * case where one thread starts to load the space cache info, and then
556          * some other thread starts a transaction commit which tries to do an
557          * allocation while the other thread is still loading the space cache
558          * info.  The previous loop should have kept us from choosing this block
559          * group, but if we've moved to the state where we will wait on caching
560          * block groups we need to first check if we're doing a fast load here,
561          * so we can wait for it to finish, otherwise we could end up allocating
562          * from a block group who's cache gets evicted for one reason or
563          * another.
564          */
565         while (cache->cached == BTRFS_CACHE_FAST) {
566                 struct btrfs_caching_control *ctl;
567
568                 ctl = cache->caching_ctl;
569                 atomic_inc(&ctl->count);
570                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
571                 spin_unlock(&cache->lock);
572
573                 schedule();
574
575                 finish_wait(&ctl->wait, &wait);
576                 put_caching_control(ctl);
577                 spin_lock(&cache->lock);
578         }
579
580         if (cache->cached != BTRFS_CACHE_NO) {
581                 spin_unlock(&cache->lock);
582                 kfree(caching_ctl);
583                 return 0;
584         }
585         WARN_ON(cache->caching_ctl);
586         cache->caching_ctl = caching_ctl;
587         cache->cached = BTRFS_CACHE_FAST;
588         spin_unlock(&cache->lock);
589
590         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
591                 mutex_lock(&caching_ctl->mutex);
592                 ret = load_free_space_cache(fs_info, cache);
593
594                 spin_lock(&cache->lock);
595                 if (ret == 1) {
596                         cache->caching_ctl = NULL;
597                         cache->cached = BTRFS_CACHE_FINISHED;
598                         cache->last_byte_to_unpin = (u64)-1;
599                         caching_ctl->progress = (u64)-1;
600                 } else {
601                         if (load_cache_only) {
602                                 cache->caching_ctl = NULL;
603                                 cache->cached = BTRFS_CACHE_NO;
604                         } else {
605                                 cache->cached = BTRFS_CACHE_STARTED;
606                                 cache->has_caching_ctl = 1;
607                         }
608                 }
609                 spin_unlock(&cache->lock);
610                 mutex_unlock(&caching_ctl->mutex);
611
612                 wake_up(&caching_ctl->wait);
613                 if (ret == 1) {
614                         put_caching_control(caching_ctl);
615                         free_excluded_extents(fs_info->extent_root, cache);
616                         return 0;
617                 }
618         } else {
619                 /*
620                  * We are not going to do the fast caching, set cached to the
621                  * appropriate value and wakeup any waiters.
622                  */
623                 spin_lock(&cache->lock);
624                 if (load_cache_only) {
625                         cache->caching_ctl = NULL;
626                         cache->cached = BTRFS_CACHE_NO;
627                 } else {
628                         cache->cached = BTRFS_CACHE_STARTED;
629                         cache->has_caching_ctl = 1;
630                 }
631                 spin_unlock(&cache->lock);
632                 wake_up(&caching_ctl->wait);
633         }
634
635         if (load_cache_only) {
636                 put_caching_control(caching_ctl);
637                 return 0;
638         }
639
640         down_write(&fs_info->commit_root_sem);
641         atomic_inc(&caching_ctl->count);
642         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
643         up_write(&fs_info->commit_root_sem);
644
645         btrfs_get_block_group(cache);
646
647         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
648
649         return ret;
650 }
651
652 /*
653  * return the block group that starts at or after bytenr
654  */
655 static struct btrfs_block_group_cache *
656 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
657 {
658         struct btrfs_block_group_cache *cache;
659
660         cache = block_group_cache_tree_search(info, bytenr, 0);
661
662         return cache;
663 }
664
665 /*
666  * return the block group that contains the given bytenr
667  */
668 struct btrfs_block_group_cache *btrfs_lookup_block_group(
669                                                  struct btrfs_fs_info *info,
670                                                  u64 bytenr)
671 {
672         struct btrfs_block_group_cache *cache;
673
674         cache = block_group_cache_tree_search(info, bytenr, 1);
675
676         return cache;
677 }
678
679 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
680                                                   u64 flags)
681 {
682         struct list_head *head = &info->space_info;
683         struct btrfs_space_info *found;
684
685         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
686
687         rcu_read_lock();
688         list_for_each_entry_rcu(found, head, list) {
689                 if (found->flags & flags) {
690                         rcu_read_unlock();
691                         return found;
692                 }
693         }
694         rcu_read_unlock();
695         return NULL;
696 }
697
698 /*
699  * after adding space to the filesystem, we need to clear the full flags
700  * on all the space infos.
701  */
702 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
703 {
704         struct list_head *head = &info->space_info;
705         struct btrfs_space_info *found;
706
707         rcu_read_lock();
708         list_for_each_entry_rcu(found, head, list)
709                 found->full = 0;
710         rcu_read_unlock();
711 }
712
713 /* simple helper to search for an existing data extent at a given offset */
714 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
715 {
716         int ret;
717         struct btrfs_key key;
718         struct btrfs_path *path;
719
720         path = btrfs_alloc_path();
721         if (!path)
722                 return -ENOMEM;
723
724         key.objectid = start;
725         key.offset = len;
726         key.type = BTRFS_EXTENT_ITEM_KEY;
727         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
728                                 0, 0);
729         btrfs_free_path(path);
730         return ret;
731 }
732
733 /*
734  * helper function to lookup reference count and flags of a tree block.
735  *
736  * the head node for delayed ref is used to store the sum of all the
737  * reference count modifications queued up in the rbtree. the head
738  * node may also store the extent flags to set. This way you can check
739  * to see what the reference count and extent flags would be if all of
740  * the delayed refs are not processed.
741  */
742 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
743                              struct btrfs_root *root, u64 bytenr,
744                              u64 offset, int metadata, u64 *refs, u64 *flags)
745 {
746         struct btrfs_delayed_ref_head *head;
747         struct btrfs_delayed_ref_root *delayed_refs;
748         struct btrfs_path *path;
749         struct btrfs_extent_item *ei;
750         struct extent_buffer *leaf;
751         struct btrfs_key key;
752         u32 item_size;
753         u64 num_refs;
754         u64 extent_flags;
755         int ret;
756
757         /*
758          * If we don't have skinny metadata, don't bother doing anything
759          * different
760          */
761         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
762                 offset = root->nodesize;
763                 metadata = 0;
764         }
765
766         path = btrfs_alloc_path();
767         if (!path)
768                 return -ENOMEM;
769
770         if (!trans) {
771                 path->skip_locking = 1;
772                 path->search_commit_root = 1;
773         }
774
775 search_again:
776         key.objectid = bytenr;
777         key.offset = offset;
778         if (metadata)
779                 key.type = BTRFS_METADATA_ITEM_KEY;
780         else
781                 key.type = BTRFS_EXTENT_ITEM_KEY;
782
783         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
784                                 &key, path, 0, 0);
785         if (ret < 0)
786                 goto out_free;
787
788         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
789                 if (path->slots[0]) {
790                         path->slots[0]--;
791                         btrfs_item_key_to_cpu(path->nodes[0], &key,
792                                               path->slots[0]);
793                         if (key.objectid == bytenr &&
794                             key.type == BTRFS_EXTENT_ITEM_KEY &&
795                             key.offset == root->nodesize)
796                                 ret = 0;
797                 }
798         }
799
800         if (ret == 0) {
801                 leaf = path->nodes[0];
802                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
803                 if (item_size >= sizeof(*ei)) {
804                         ei = btrfs_item_ptr(leaf, path->slots[0],
805                                             struct btrfs_extent_item);
806                         num_refs = btrfs_extent_refs(leaf, ei);
807                         extent_flags = btrfs_extent_flags(leaf, ei);
808                 } else {
809 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
810                         struct btrfs_extent_item_v0 *ei0;
811                         BUG_ON(item_size != sizeof(*ei0));
812                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
813                                              struct btrfs_extent_item_v0);
814                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
815                         /* FIXME: this isn't correct for data */
816                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
817 #else
818                         BUG();
819 #endif
820                 }
821                 BUG_ON(num_refs == 0);
822         } else {
823                 num_refs = 0;
824                 extent_flags = 0;
825                 ret = 0;
826         }
827
828         if (!trans)
829                 goto out;
830
831         delayed_refs = &trans->transaction->delayed_refs;
832         spin_lock(&delayed_refs->lock);
833         head = btrfs_find_delayed_ref_head(trans, bytenr);
834         if (head) {
835                 if (!mutex_trylock(&head->mutex)) {
836                         atomic_inc(&head->node.refs);
837                         spin_unlock(&delayed_refs->lock);
838
839                         btrfs_release_path(path);
840
841                         /*
842                          * Mutex was contended, block until it's released and try
843                          * again
844                          */
845                         mutex_lock(&head->mutex);
846                         mutex_unlock(&head->mutex);
847                         btrfs_put_delayed_ref(&head->node);
848                         goto search_again;
849                 }
850                 spin_lock(&head->lock);
851                 if (head->extent_op && head->extent_op->update_flags)
852                         extent_flags |= head->extent_op->flags_to_set;
853                 else
854                         BUG_ON(num_refs == 0);
855
856                 num_refs += head->node.ref_mod;
857                 spin_unlock(&head->lock);
858                 mutex_unlock(&head->mutex);
859         }
860         spin_unlock(&delayed_refs->lock);
861 out:
862         WARN_ON(num_refs == 0);
863         if (refs)
864                 *refs = num_refs;
865         if (flags)
866                 *flags = extent_flags;
867 out_free:
868         btrfs_free_path(path);
869         return ret;
870 }
871
872 /*
873  * Back reference rules.  Back refs have three main goals:
874  *
875  * 1) differentiate between all holders of references to an extent so that
876  *    when a reference is dropped we can make sure it was a valid reference
877  *    before freeing the extent.
878  *
879  * 2) Provide enough information to quickly find the holders of an extent
880  *    if we notice a given block is corrupted or bad.
881  *
882  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
883  *    maintenance.  This is actually the same as #2, but with a slightly
884  *    different use case.
885  *
886  * There are two kinds of back refs. The implicit back refs is optimized
887  * for pointers in non-shared tree blocks. For a given pointer in a block,
888  * back refs of this kind provide information about the block's owner tree
889  * and the pointer's key. These information allow us to find the block by
890  * b-tree searching. The full back refs is for pointers in tree blocks not
891  * referenced by their owner trees. The location of tree block is recorded
892  * in the back refs. Actually the full back refs is generic, and can be
893  * used in all cases the implicit back refs is used. The major shortcoming
894  * of the full back refs is its overhead. Every time a tree block gets
895  * COWed, we have to update back refs entry for all pointers in it.
896  *
897  * For a newly allocated tree block, we use implicit back refs for
898  * pointers in it. This means most tree related operations only involve
899  * implicit back refs. For a tree block created in old transaction, the
900  * only way to drop a reference to it is COW it. So we can detect the
901  * event that tree block loses its owner tree's reference and do the
902  * back refs conversion.
903  *
904  * When a tree block is COW'd through a tree, there are four cases:
905  *
906  * The reference count of the block is one and the tree is the block's
907  * owner tree. Nothing to do in this case.
908  *
909  * The reference count of the block is one and the tree is not the
910  * block's owner tree. In this case, full back refs is used for pointers
911  * in the block. Remove these full back refs, add implicit back refs for
912  * every pointers in the new block.
913  *
914  * The reference count of the block is greater than one and the tree is
915  * the block's owner tree. In this case, implicit back refs is used for
916  * pointers in the block. Add full back refs for every pointers in the
917  * block, increase lower level extents' reference counts. The original
918  * implicit back refs are entailed to the new block.
919  *
920  * The reference count of the block is greater than one and the tree is
921  * not the block's owner tree. Add implicit back refs for every pointer in
922  * the new block, increase lower level extents' reference count.
923  *
924  * Back Reference Key composing:
925  *
926  * The key objectid corresponds to the first byte in the extent,
927  * The key type is used to differentiate between types of back refs.
928  * There are different meanings of the key offset for different types
929  * of back refs.
930  *
931  * File extents can be referenced by:
932  *
933  * - multiple snapshots, subvolumes, or different generations in one subvol
934  * - different files inside a single subvolume
935  * - different offsets inside a file (bookend extents in file.c)
936  *
937  * The extent ref structure for the implicit back refs has fields for:
938  *
939  * - Objectid of the subvolume root
940  * - objectid of the file holding the reference
941  * - original offset in the file
942  * - how many bookend extents
943  *
944  * The key offset for the implicit back refs is hash of the first
945  * three fields.
946  *
947  * The extent ref structure for the full back refs has field for:
948  *
949  * - number of pointers in the tree leaf
950  *
951  * The key offset for the implicit back refs is the first byte of
952  * the tree leaf
953  *
954  * When a file extent is allocated, The implicit back refs is used.
955  * the fields are filled in:
956  *
957  *     (root_key.objectid, inode objectid, offset in file, 1)
958  *
959  * When a file extent is removed file truncation, we find the
960  * corresponding implicit back refs and check the following fields:
961  *
962  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
963  *
964  * Btree extents can be referenced by:
965  *
966  * - Different subvolumes
967  *
968  * Both the implicit back refs and the full back refs for tree blocks
969  * only consist of key. The key offset for the implicit back refs is
970  * objectid of block's owner tree. The key offset for the full back refs
971  * is the first byte of parent block.
972  *
973  * When implicit back refs is used, information about the lowest key and
974  * level of the tree block are required. These information are stored in
975  * tree block info structure.
976  */
977
978 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
979 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
980                                   struct btrfs_root *root,
981                                   struct btrfs_path *path,
982                                   u64 owner, u32 extra_size)
983 {
984         struct btrfs_extent_item *item;
985         struct btrfs_extent_item_v0 *ei0;
986         struct btrfs_extent_ref_v0 *ref0;
987         struct btrfs_tree_block_info *bi;
988         struct extent_buffer *leaf;
989         struct btrfs_key key;
990         struct btrfs_key found_key;
991         u32 new_size = sizeof(*item);
992         u64 refs;
993         int ret;
994
995         leaf = path->nodes[0];
996         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
997
998         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
999         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1000                              struct btrfs_extent_item_v0);
1001         refs = btrfs_extent_refs_v0(leaf, ei0);
1002
1003         if (owner == (u64)-1) {
1004                 while (1) {
1005                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1006                                 ret = btrfs_next_leaf(root, path);
1007                                 if (ret < 0)
1008                                         return ret;
1009                                 BUG_ON(ret > 0); /* Corruption */
1010                                 leaf = path->nodes[0];
1011                         }
1012                         btrfs_item_key_to_cpu(leaf, &found_key,
1013                                               path->slots[0]);
1014                         BUG_ON(key.objectid != found_key.objectid);
1015                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1016                                 path->slots[0]++;
1017                                 continue;
1018                         }
1019                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1020                                               struct btrfs_extent_ref_v0);
1021                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1022                         break;
1023                 }
1024         }
1025         btrfs_release_path(path);
1026
1027         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1028                 new_size += sizeof(*bi);
1029
1030         new_size -= sizeof(*ei0);
1031         ret = btrfs_search_slot(trans, root, &key, path,
1032                                 new_size + extra_size, 1);
1033         if (ret < 0)
1034                 return ret;
1035         BUG_ON(ret); /* Corruption */
1036
1037         btrfs_extend_item(root, path, new_size);
1038
1039         leaf = path->nodes[0];
1040         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1041         btrfs_set_extent_refs(leaf, item, refs);
1042         /* FIXME: get real generation */
1043         btrfs_set_extent_generation(leaf, item, 0);
1044         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1045                 btrfs_set_extent_flags(leaf, item,
1046                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1047                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1048                 bi = (struct btrfs_tree_block_info *)(item + 1);
1049                 /* FIXME: get first key of the block */
1050                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1051                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1052         } else {
1053                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1054         }
1055         btrfs_mark_buffer_dirty(leaf);
1056         return 0;
1057 }
1058 #endif
1059
1060 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1061 {
1062         u32 high_crc = ~(u32)0;
1063         u32 low_crc = ~(u32)0;
1064         __le64 lenum;
1065
1066         lenum = cpu_to_le64(root_objectid);
1067         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1068         lenum = cpu_to_le64(owner);
1069         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1070         lenum = cpu_to_le64(offset);
1071         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1072
1073         return ((u64)high_crc << 31) ^ (u64)low_crc;
1074 }
1075
1076 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1077                                      struct btrfs_extent_data_ref *ref)
1078 {
1079         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1080                                     btrfs_extent_data_ref_objectid(leaf, ref),
1081                                     btrfs_extent_data_ref_offset(leaf, ref));
1082 }
1083
1084 static int match_extent_data_ref(struct extent_buffer *leaf,
1085                                  struct btrfs_extent_data_ref *ref,
1086                                  u64 root_objectid, u64 owner, u64 offset)
1087 {
1088         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1089             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1090             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1091                 return 0;
1092         return 1;
1093 }
1094
1095 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1096                                            struct btrfs_root *root,
1097                                            struct btrfs_path *path,
1098                                            u64 bytenr, u64 parent,
1099                                            u64 root_objectid,
1100                                            u64 owner, u64 offset)
1101 {
1102         struct btrfs_key key;
1103         struct btrfs_extent_data_ref *ref;
1104         struct extent_buffer *leaf;
1105         u32 nritems;
1106         int ret;
1107         int recow;
1108         int err = -ENOENT;
1109
1110         key.objectid = bytenr;
1111         if (parent) {
1112                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1113                 key.offset = parent;
1114         } else {
1115                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1116                 key.offset = hash_extent_data_ref(root_objectid,
1117                                                   owner, offset);
1118         }
1119 again:
1120         recow = 0;
1121         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1122         if (ret < 0) {
1123                 err = ret;
1124                 goto fail;
1125         }
1126
1127         if (parent) {
1128                 if (!ret)
1129                         return 0;
1130 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1131                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1132                 btrfs_release_path(path);
1133                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1134                 if (ret < 0) {
1135                         err = ret;
1136                         goto fail;
1137                 }
1138                 if (!ret)
1139                         return 0;
1140 #endif
1141                 goto fail;
1142         }
1143
1144         leaf = path->nodes[0];
1145         nritems = btrfs_header_nritems(leaf);
1146         while (1) {
1147                 if (path->slots[0] >= nritems) {
1148                         ret = btrfs_next_leaf(root, path);
1149                         if (ret < 0)
1150                                 err = ret;
1151                         if (ret)
1152                                 goto fail;
1153
1154                         leaf = path->nodes[0];
1155                         nritems = btrfs_header_nritems(leaf);
1156                         recow = 1;
1157                 }
1158
1159                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1160                 if (key.objectid != bytenr ||
1161                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1162                         goto fail;
1163
1164                 ref = btrfs_item_ptr(leaf, path->slots[0],
1165                                      struct btrfs_extent_data_ref);
1166
1167                 if (match_extent_data_ref(leaf, ref, root_objectid,
1168                                           owner, offset)) {
1169                         if (recow) {
1170                                 btrfs_release_path(path);
1171                                 goto again;
1172                         }
1173                         err = 0;
1174                         break;
1175                 }
1176                 path->slots[0]++;
1177         }
1178 fail:
1179         return err;
1180 }
1181
1182 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1183                                            struct btrfs_root *root,
1184                                            struct btrfs_path *path,
1185                                            u64 bytenr, u64 parent,
1186                                            u64 root_objectid, u64 owner,
1187                                            u64 offset, int refs_to_add)
1188 {
1189         struct btrfs_key key;
1190         struct extent_buffer *leaf;
1191         u32 size;
1192         u32 num_refs;
1193         int ret;
1194
1195         key.objectid = bytenr;
1196         if (parent) {
1197                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1198                 key.offset = parent;
1199                 size = sizeof(struct btrfs_shared_data_ref);
1200         } else {
1201                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1202                 key.offset = hash_extent_data_ref(root_objectid,
1203                                                   owner, offset);
1204                 size = sizeof(struct btrfs_extent_data_ref);
1205         }
1206
1207         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1208         if (ret && ret != -EEXIST)
1209                 goto fail;
1210
1211         leaf = path->nodes[0];
1212         if (parent) {
1213                 struct btrfs_shared_data_ref *ref;
1214                 ref = btrfs_item_ptr(leaf, path->slots[0],
1215                                      struct btrfs_shared_data_ref);
1216                 if (ret == 0) {
1217                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1218                 } else {
1219                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1220                         num_refs += refs_to_add;
1221                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1222                 }
1223         } else {
1224                 struct btrfs_extent_data_ref *ref;
1225                 while (ret == -EEXIST) {
1226                         ref = btrfs_item_ptr(leaf, path->slots[0],
1227                                              struct btrfs_extent_data_ref);
1228                         if (match_extent_data_ref(leaf, ref, root_objectid,
1229                                                   owner, offset))
1230                                 break;
1231                         btrfs_release_path(path);
1232                         key.offset++;
1233                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1234                                                       size);
1235                         if (ret && ret != -EEXIST)
1236                                 goto fail;
1237
1238                         leaf = path->nodes[0];
1239                 }
1240                 ref = btrfs_item_ptr(leaf, path->slots[0],
1241                                      struct btrfs_extent_data_ref);
1242                 if (ret == 0) {
1243                         btrfs_set_extent_data_ref_root(leaf, ref,
1244                                                        root_objectid);
1245                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1246                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1247                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1248                 } else {
1249                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1250                         num_refs += refs_to_add;
1251                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1252                 }
1253         }
1254         btrfs_mark_buffer_dirty(leaf);
1255         ret = 0;
1256 fail:
1257         btrfs_release_path(path);
1258         return ret;
1259 }
1260
1261 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1262                                            struct btrfs_root *root,
1263                                            struct btrfs_path *path,
1264                                            int refs_to_drop, int *last_ref)
1265 {
1266         struct btrfs_key key;
1267         struct btrfs_extent_data_ref *ref1 = NULL;
1268         struct btrfs_shared_data_ref *ref2 = NULL;
1269         struct extent_buffer *leaf;
1270         u32 num_refs = 0;
1271         int ret = 0;
1272
1273         leaf = path->nodes[0];
1274         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1275
1276         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1277                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1278                                       struct btrfs_extent_data_ref);
1279                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1280         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1281                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1282                                       struct btrfs_shared_data_ref);
1283                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1284 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1285         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1286                 struct btrfs_extent_ref_v0 *ref0;
1287                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1288                                       struct btrfs_extent_ref_v0);
1289                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1290 #endif
1291         } else {
1292                 BUG();
1293         }
1294
1295         BUG_ON(num_refs < refs_to_drop);
1296         num_refs -= refs_to_drop;
1297
1298         if (num_refs == 0) {
1299                 ret = btrfs_del_item(trans, root, path);
1300                 *last_ref = 1;
1301         } else {
1302                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1303                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1304                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1305                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1306 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1307                 else {
1308                         struct btrfs_extent_ref_v0 *ref0;
1309                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1310                                         struct btrfs_extent_ref_v0);
1311                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1312                 }
1313 #endif
1314                 btrfs_mark_buffer_dirty(leaf);
1315         }
1316         return ret;
1317 }
1318
1319 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1320                                           struct btrfs_extent_inline_ref *iref)
1321 {
1322         struct btrfs_key key;
1323         struct extent_buffer *leaf;
1324         struct btrfs_extent_data_ref *ref1;
1325         struct btrfs_shared_data_ref *ref2;
1326         u32 num_refs = 0;
1327
1328         leaf = path->nodes[0];
1329         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1330         if (iref) {
1331                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1332                     BTRFS_EXTENT_DATA_REF_KEY) {
1333                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1334                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1335                 } else {
1336                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1337                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1338                 }
1339         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1340                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1341                                       struct btrfs_extent_data_ref);
1342                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1343         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1344                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1345                                       struct btrfs_shared_data_ref);
1346                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1347 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1348         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1349                 struct btrfs_extent_ref_v0 *ref0;
1350                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1351                                       struct btrfs_extent_ref_v0);
1352                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1353 #endif
1354         } else {
1355                 WARN_ON(1);
1356         }
1357         return num_refs;
1358 }
1359
1360 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1361                                           struct btrfs_root *root,
1362                                           struct btrfs_path *path,
1363                                           u64 bytenr, u64 parent,
1364                                           u64 root_objectid)
1365 {
1366         struct btrfs_key key;
1367         int ret;
1368
1369         key.objectid = bytenr;
1370         if (parent) {
1371                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1372                 key.offset = parent;
1373         } else {
1374                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1375                 key.offset = root_objectid;
1376         }
1377
1378         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1379         if (ret > 0)
1380                 ret = -ENOENT;
1381 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1382         if (ret == -ENOENT && parent) {
1383                 btrfs_release_path(path);
1384                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1385                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1386                 if (ret > 0)
1387                         ret = -ENOENT;
1388         }
1389 #endif
1390         return ret;
1391 }
1392
1393 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1394                                           struct btrfs_root *root,
1395                                           struct btrfs_path *path,
1396                                           u64 bytenr, u64 parent,
1397                                           u64 root_objectid)
1398 {
1399         struct btrfs_key key;
1400         int ret;
1401
1402         key.objectid = bytenr;
1403         if (parent) {
1404                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1405                 key.offset = parent;
1406         } else {
1407                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1408                 key.offset = root_objectid;
1409         }
1410
1411         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1412         btrfs_release_path(path);
1413         return ret;
1414 }
1415
1416 static inline int extent_ref_type(u64 parent, u64 owner)
1417 {
1418         int type;
1419         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1420                 if (parent > 0)
1421                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1422                 else
1423                         type = BTRFS_TREE_BLOCK_REF_KEY;
1424         } else {
1425                 if (parent > 0)
1426                         type = BTRFS_SHARED_DATA_REF_KEY;
1427                 else
1428                         type = BTRFS_EXTENT_DATA_REF_KEY;
1429         }
1430         return type;
1431 }
1432
1433 static int find_next_key(struct btrfs_path *path, int level,
1434                          struct btrfs_key *key)
1435
1436 {
1437         for (; level < BTRFS_MAX_LEVEL; level++) {
1438                 if (!path->nodes[level])
1439                         break;
1440                 if (path->slots[level] + 1 >=
1441                     btrfs_header_nritems(path->nodes[level]))
1442                         continue;
1443                 if (level == 0)
1444                         btrfs_item_key_to_cpu(path->nodes[level], key,
1445                                               path->slots[level] + 1);
1446                 else
1447                         btrfs_node_key_to_cpu(path->nodes[level], key,
1448                                               path->slots[level] + 1);
1449                 return 0;
1450         }
1451         return 1;
1452 }
1453
1454 /*
1455  * look for inline back ref. if back ref is found, *ref_ret is set
1456  * to the address of inline back ref, and 0 is returned.
1457  *
1458  * if back ref isn't found, *ref_ret is set to the address where it
1459  * should be inserted, and -ENOENT is returned.
1460  *
1461  * if insert is true and there are too many inline back refs, the path
1462  * points to the extent item, and -EAGAIN is returned.
1463  *
1464  * NOTE: inline back refs are ordered in the same way that back ref
1465  *       items in the tree are ordered.
1466  */
1467 static noinline_for_stack
1468 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1469                                  struct btrfs_root *root,
1470                                  struct btrfs_path *path,
1471                                  struct btrfs_extent_inline_ref **ref_ret,
1472                                  u64 bytenr, u64 num_bytes,
1473                                  u64 parent, u64 root_objectid,
1474                                  u64 owner, u64 offset, int insert)
1475 {
1476         struct btrfs_key key;
1477         struct extent_buffer *leaf;
1478         struct btrfs_extent_item *ei;
1479         struct btrfs_extent_inline_ref *iref;
1480         u64 flags;
1481         u64 item_size;
1482         unsigned long ptr;
1483         unsigned long end;
1484         int extra_size;
1485         int type;
1486         int want;
1487         int ret;
1488         int err = 0;
1489         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1490                                                  SKINNY_METADATA);
1491
1492         key.objectid = bytenr;
1493         key.type = BTRFS_EXTENT_ITEM_KEY;
1494         key.offset = num_bytes;
1495
1496         want = extent_ref_type(parent, owner);
1497         if (insert) {
1498                 extra_size = btrfs_extent_inline_ref_size(want);
1499                 path->keep_locks = 1;
1500         } else
1501                 extra_size = -1;
1502
1503         /*
1504          * Owner is our parent level, so we can just add one to get the level
1505          * for the block we are interested in.
1506          */
1507         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1508                 key.type = BTRFS_METADATA_ITEM_KEY;
1509                 key.offset = owner;
1510         }
1511
1512 again:
1513         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1514         if (ret < 0) {
1515                 err = ret;
1516                 goto out;
1517         }
1518
1519         /*
1520          * We may be a newly converted file system which still has the old fat
1521          * extent entries for metadata, so try and see if we have one of those.
1522          */
1523         if (ret > 0 && skinny_metadata) {
1524                 skinny_metadata = false;
1525                 if (path->slots[0]) {
1526                         path->slots[0]--;
1527                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1528                                               path->slots[0]);
1529                         if (key.objectid == bytenr &&
1530                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1531                             key.offset == num_bytes)
1532                                 ret = 0;
1533                 }
1534                 if (ret) {
1535                         key.objectid = bytenr;
1536                         key.type = BTRFS_EXTENT_ITEM_KEY;
1537                         key.offset = num_bytes;
1538                         btrfs_release_path(path);
1539                         goto again;
1540                 }
1541         }
1542
1543         if (ret && !insert) {
1544                 err = -ENOENT;
1545                 goto out;
1546         } else if (WARN_ON(ret)) {
1547                 err = -EIO;
1548                 goto out;
1549         }
1550
1551         leaf = path->nodes[0];
1552         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1553 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1554         if (item_size < sizeof(*ei)) {
1555                 if (!insert) {
1556                         err = -ENOENT;
1557                         goto out;
1558                 }
1559                 ret = convert_extent_item_v0(trans, root, path, owner,
1560                                              extra_size);
1561                 if (ret < 0) {
1562                         err = ret;
1563                         goto out;
1564                 }
1565                 leaf = path->nodes[0];
1566                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1567         }
1568 #endif
1569         BUG_ON(item_size < sizeof(*ei));
1570
1571         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1572         flags = btrfs_extent_flags(leaf, ei);
1573
1574         ptr = (unsigned long)(ei + 1);
1575         end = (unsigned long)ei + item_size;
1576
1577         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1578                 ptr += sizeof(struct btrfs_tree_block_info);
1579                 BUG_ON(ptr > end);
1580         }
1581
1582         err = -ENOENT;
1583         while (1) {
1584                 if (ptr >= end) {
1585                         WARN_ON(ptr > end);
1586                         break;
1587                 }
1588                 iref = (struct btrfs_extent_inline_ref *)ptr;
1589                 type = btrfs_extent_inline_ref_type(leaf, iref);
1590                 if (want < type)
1591                         break;
1592                 if (want > type) {
1593                         ptr += btrfs_extent_inline_ref_size(type);
1594                         continue;
1595                 }
1596
1597                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1598                         struct btrfs_extent_data_ref *dref;
1599                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1600                         if (match_extent_data_ref(leaf, dref, root_objectid,
1601                                                   owner, offset)) {
1602                                 err = 0;
1603                                 break;
1604                         }
1605                         if (hash_extent_data_ref_item(leaf, dref) <
1606                             hash_extent_data_ref(root_objectid, owner, offset))
1607                                 break;
1608                 } else {
1609                         u64 ref_offset;
1610                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1611                         if (parent > 0) {
1612                                 if (parent == ref_offset) {
1613                                         err = 0;
1614                                         break;
1615                                 }
1616                                 if (ref_offset < parent)
1617                                         break;
1618                         } else {
1619                                 if (root_objectid == ref_offset) {
1620                                         err = 0;
1621                                         break;
1622                                 }
1623                                 if (ref_offset < root_objectid)
1624                                         break;
1625                         }
1626                 }
1627                 ptr += btrfs_extent_inline_ref_size(type);
1628         }
1629         if (err == -ENOENT && insert) {
1630                 if (item_size + extra_size >=
1631                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1632                         err = -EAGAIN;
1633                         goto out;
1634                 }
1635                 /*
1636                  * To add new inline back ref, we have to make sure
1637                  * there is no corresponding back ref item.
1638                  * For simplicity, we just do not add new inline back
1639                  * ref if there is any kind of item for this block
1640                  */
1641                 if (find_next_key(path, 0, &key) == 0 &&
1642                     key.objectid == bytenr &&
1643                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1644                         err = -EAGAIN;
1645                         goto out;
1646                 }
1647         }
1648         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1649 out:
1650         if (insert) {
1651                 path->keep_locks = 0;
1652                 btrfs_unlock_up_safe(path, 1);
1653         }
1654         return err;
1655 }
1656
1657 /*
1658  * helper to add new inline back ref
1659  */
1660 static noinline_for_stack
1661 void setup_inline_extent_backref(struct btrfs_root *root,
1662                                  struct btrfs_path *path,
1663                                  struct btrfs_extent_inline_ref *iref,
1664                                  u64 parent, u64 root_objectid,
1665                                  u64 owner, u64 offset, int refs_to_add,
1666                                  struct btrfs_delayed_extent_op *extent_op)
1667 {
1668         struct extent_buffer *leaf;
1669         struct btrfs_extent_item *ei;
1670         unsigned long ptr;
1671         unsigned long end;
1672         unsigned long item_offset;
1673         u64 refs;
1674         int size;
1675         int type;
1676
1677         leaf = path->nodes[0];
1678         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1679         item_offset = (unsigned long)iref - (unsigned long)ei;
1680
1681         type = extent_ref_type(parent, owner);
1682         size = btrfs_extent_inline_ref_size(type);
1683
1684         btrfs_extend_item(root, path, size);
1685
1686         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1687         refs = btrfs_extent_refs(leaf, ei);
1688         refs += refs_to_add;
1689         btrfs_set_extent_refs(leaf, ei, refs);
1690         if (extent_op)
1691                 __run_delayed_extent_op(extent_op, leaf, ei);
1692
1693         ptr = (unsigned long)ei + item_offset;
1694         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1695         if (ptr < end - size)
1696                 memmove_extent_buffer(leaf, ptr + size, ptr,
1697                                       end - size - ptr);
1698
1699         iref = (struct btrfs_extent_inline_ref *)ptr;
1700         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1701         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1702                 struct btrfs_extent_data_ref *dref;
1703                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1704                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1705                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1706                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1707                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1708         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1709                 struct btrfs_shared_data_ref *sref;
1710                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1711                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1712                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1713         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1714                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1715         } else {
1716                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1717         }
1718         btrfs_mark_buffer_dirty(leaf);
1719 }
1720
1721 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1722                                  struct btrfs_root *root,
1723                                  struct btrfs_path *path,
1724                                  struct btrfs_extent_inline_ref **ref_ret,
1725                                  u64 bytenr, u64 num_bytes, u64 parent,
1726                                  u64 root_objectid, u64 owner, u64 offset)
1727 {
1728         int ret;
1729
1730         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1731                                            bytenr, num_bytes, parent,
1732                                            root_objectid, owner, offset, 0);
1733         if (ret != -ENOENT)
1734                 return ret;
1735
1736         btrfs_release_path(path);
1737         *ref_ret = NULL;
1738
1739         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1740                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1741                                             root_objectid);
1742         } else {
1743                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1744                                              root_objectid, owner, offset);
1745         }
1746         return ret;
1747 }
1748
1749 /*
1750  * helper to update/remove inline back ref
1751  */
1752 static noinline_for_stack
1753 void update_inline_extent_backref(struct btrfs_root *root,
1754                                   struct btrfs_path *path,
1755                                   struct btrfs_extent_inline_ref *iref,
1756                                   int refs_to_mod,
1757                                   struct btrfs_delayed_extent_op *extent_op,
1758                                   int *last_ref)
1759 {
1760         struct extent_buffer *leaf;
1761         struct btrfs_extent_item *ei;
1762         struct btrfs_extent_data_ref *dref = NULL;
1763         struct btrfs_shared_data_ref *sref = NULL;
1764         unsigned long ptr;
1765         unsigned long end;
1766         u32 item_size;
1767         int size;
1768         int type;
1769         u64 refs;
1770
1771         leaf = path->nodes[0];
1772         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1773         refs = btrfs_extent_refs(leaf, ei);
1774         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1775         refs += refs_to_mod;
1776         btrfs_set_extent_refs(leaf, ei, refs);
1777         if (extent_op)
1778                 __run_delayed_extent_op(extent_op, leaf, ei);
1779
1780         type = btrfs_extent_inline_ref_type(leaf, iref);
1781
1782         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1783                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1784                 refs = btrfs_extent_data_ref_count(leaf, dref);
1785         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1786                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1787                 refs = btrfs_shared_data_ref_count(leaf, sref);
1788         } else {
1789                 refs = 1;
1790                 BUG_ON(refs_to_mod != -1);
1791         }
1792
1793         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1794         refs += refs_to_mod;
1795
1796         if (refs > 0) {
1797                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1798                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1799                 else
1800                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1801         } else {
1802                 *last_ref = 1;
1803                 size =  btrfs_extent_inline_ref_size(type);
1804                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1805                 ptr = (unsigned long)iref;
1806                 end = (unsigned long)ei + item_size;
1807                 if (ptr + size < end)
1808                         memmove_extent_buffer(leaf, ptr, ptr + size,
1809                                               end - ptr - size);
1810                 item_size -= size;
1811                 btrfs_truncate_item(root, path, item_size, 1);
1812         }
1813         btrfs_mark_buffer_dirty(leaf);
1814 }
1815
1816 static noinline_for_stack
1817 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1818                                  struct btrfs_root *root,
1819                                  struct btrfs_path *path,
1820                                  u64 bytenr, u64 num_bytes, u64 parent,
1821                                  u64 root_objectid, u64 owner,
1822                                  u64 offset, int refs_to_add,
1823                                  struct btrfs_delayed_extent_op *extent_op)
1824 {
1825         struct btrfs_extent_inline_ref *iref;
1826         int ret;
1827
1828         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1829                                            bytenr, num_bytes, parent,
1830                                            root_objectid, owner, offset, 1);
1831         if (ret == 0) {
1832                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1833                 update_inline_extent_backref(root, path, iref,
1834                                              refs_to_add, extent_op, NULL);
1835         } else if (ret == -ENOENT) {
1836                 setup_inline_extent_backref(root, path, iref, parent,
1837                                             root_objectid, owner, offset,
1838                                             refs_to_add, extent_op);
1839                 ret = 0;
1840         }
1841         return ret;
1842 }
1843
1844 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1845                                  struct btrfs_root *root,
1846                                  struct btrfs_path *path,
1847                                  u64 bytenr, u64 parent, u64 root_objectid,
1848                                  u64 owner, u64 offset, int refs_to_add)
1849 {
1850         int ret;
1851         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1852                 BUG_ON(refs_to_add != 1);
1853                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1854                                             parent, root_objectid);
1855         } else {
1856                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1857                                              parent, root_objectid,
1858                                              owner, offset, refs_to_add);
1859         }
1860         return ret;
1861 }
1862
1863 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1864                                  struct btrfs_root *root,
1865                                  struct btrfs_path *path,
1866                                  struct btrfs_extent_inline_ref *iref,
1867                                  int refs_to_drop, int is_data, int *last_ref)
1868 {
1869         int ret = 0;
1870
1871         BUG_ON(!is_data && refs_to_drop != 1);
1872         if (iref) {
1873                 update_inline_extent_backref(root, path, iref,
1874                                              -refs_to_drop, NULL, last_ref);
1875         } else if (is_data) {
1876                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1877                                              last_ref);
1878         } else {
1879                 *last_ref = 1;
1880                 ret = btrfs_del_item(trans, root, path);
1881         }
1882         return ret;
1883 }
1884
1885 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1886 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1887                                u64 *discarded_bytes)
1888 {
1889         int j, ret = 0;
1890         u64 bytes_left, end;
1891         u64 aligned_start = ALIGN(start, 1 << 9);
1892
1893         if (WARN_ON(start != aligned_start)) {
1894                 len -= aligned_start - start;
1895                 len = round_down(len, 1 << 9);
1896                 start = aligned_start;
1897         }
1898
1899         *discarded_bytes = 0;
1900
1901         if (!len)
1902                 return 0;
1903
1904         end = start + len;
1905         bytes_left = len;
1906
1907         /* Skip any superblocks on this device. */
1908         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1909                 u64 sb_start = btrfs_sb_offset(j);
1910                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1911                 u64 size = sb_start - start;
1912
1913                 if (!in_range(sb_start, start, bytes_left) &&
1914                     !in_range(sb_end, start, bytes_left) &&
1915                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1916                         continue;
1917
1918                 /*
1919                  * Superblock spans beginning of range.  Adjust start and
1920                  * try again.
1921                  */
1922                 if (sb_start <= start) {
1923                         start += sb_end - start;
1924                         if (start > end) {
1925                                 bytes_left = 0;
1926                                 break;
1927                         }
1928                         bytes_left = end - start;
1929                         continue;
1930                 }
1931
1932                 if (size) {
1933                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1934                                                    GFP_NOFS, 0);
1935                         if (!ret)
1936                                 *discarded_bytes += size;
1937                         else if (ret != -EOPNOTSUPP)
1938                                 return ret;
1939                 }
1940
1941                 start = sb_end;
1942                 if (start > end) {
1943                         bytes_left = 0;
1944                         break;
1945                 }
1946                 bytes_left = end - start;
1947         }
1948
1949         if (bytes_left) {
1950                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
1951                                            GFP_NOFS, 0);
1952                 if (!ret)
1953                         *discarded_bytes += bytes_left;
1954         }
1955         return ret;
1956 }
1957
1958 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1959                          u64 num_bytes, u64 *actual_bytes)
1960 {
1961         int ret;
1962         u64 discarded_bytes = 0;
1963         struct btrfs_bio *bbio = NULL;
1964
1965
1966         /* Tell the block device(s) that the sectors can be discarded */
1967         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1968                               bytenr, &num_bytes, &bbio, 0);
1969         /* Error condition is -ENOMEM */
1970         if (!ret) {
1971                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1972                 int i;
1973
1974
1975                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1976                         u64 bytes;
1977                         if (!stripe->dev->can_discard)
1978                                 continue;
1979
1980                         ret = btrfs_issue_discard(stripe->dev->bdev,
1981                                                   stripe->physical,
1982                                                   stripe->length,
1983                                                   &bytes);
1984                         if (!ret)
1985                                 discarded_bytes += bytes;
1986                         else if (ret != -EOPNOTSUPP)
1987                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1988
1989                         /*
1990                          * Just in case we get back EOPNOTSUPP for some reason,
1991                          * just ignore the return value so we don't screw up
1992                          * people calling discard_extent.
1993                          */
1994                         ret = 0;
1995                 }
1996                 btrfs_put_bbio(bbio);
1997         }
1998
1999         if (actual_bytes)
2000                 *actual_bytes = discarded_bytes;
2001
2002
2003         if (ret == -EOPNOTSUPP)
2004                 ret = 0;
2005         return ret;
2006 }
2007
2008 /* Can return -ENOMEM */
2009 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2010                          struct btrfs_root *root,
2011                          u64 bytenr, u64 num_bytes, u64 parent,
2012                          u64 root_objectid, u64 owner, u64 offset,
2013                          int no_quota)
2014 {
2015         int ret;
2016         struct btrfs_fs_info *fs_info = root->fs_info;
2017
2018         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2019                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2020
2021         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2022                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2023                                         num_bytes,
2024                                         parent, root_objectid, (int)owner,
2025                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2026         } else {
2027                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2028                                         num_bytes,
2029                                         parent, root_objectid, owner, offset,
2030                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2031         }
2032         return ret;
2033 }
2034
2035 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2036                                   struct btrfs_root *root,
2037                                   struct btrfs_delayed_ref_node *node,
2038                                   u64 parent, u64 root_objectid,
2039                                   u64 owner, u64 offset, int refs_to_add,
2040                                   struct btrfs_delayed_extent_op *extent_op)
2041 {
2042         struct btrfs_fs_info *fs_info = root->fs_info;
2043         struct btrfs_path *path;
2044         struct extent_buffer *leaf;
2045         struct btrfs_extent_item *item;
2046         struct btrfs_key key;
2047         u64 bytenr = node->bytenr;
2048         u64 num_bytes = node->num_bytes;
2049         u64 refs;
2050         int ret;
2051         int no_quota = node->no_quota;
2052
2053         path = btrfs_alloc_path();
2054         if (!path)
2055                 return -ENOMEM;
2056
2057         if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
2058                 no_quota = 1;
2059
2060         path->reada = 1;
2061         path->leave_spinning = 1;
2062         /* this will setup the path even if it fails to insert the back ref */
2063         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2064                                            bytenr, num_bytes, parent,
2065                                            root_objectid, owner, offset,
2066                                            refs_to_add, extent_op);
2067         if ((ret < 0 && ret != -EAGAIN) || !ret)
2068                 goto out;
2069
2070         /*
2071          * Ok we had -EAGAIN which means we didn't have space to insert and
2072          * inline extent ref, so just update the reference count and add a
2073          * normal backref.
2074          */
2075         leaf = path->nodes[0];
2076         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2077         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2078         refs = btrfs_extent_refs(leaf, item);
2079         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2080         if (extent_op)
2081                 __run_delayed_extent_op(extent_op, leaf, item);
2082
2083         btrfs_mark_buffer_dirty(leaf);
2084         btrfs_release_path(path);
2085
2086         path->reada = 1;
2087         path->leave_spinning = 1;
2088         /* now insert the actual backref */
2089         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2090                                     path, bytenr, parent, root_objectid,
2091                                     owner, offset, refs_to_add);
2092         if (ret)
2093                 btrfs_abort_transaction(trans, root, ret);
2094 out:
2095         btrfs_free_path(path);
2096         return ret;
2097 }
2098
2099 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2100                                 struct btrfs_root *root,
2101                                 struct btrfs_delayed_ref_node *node,
2102                                 struct btrfs_delayed_extent_op *extent_op,
2103                                 int insert_reserved)
2104 {
2105         int ret = 0;
2106         struct btrfs_delayed_data_ref *ref;
2107         struct btrfs_key ins;
2108         u64 parent = 0;
2109         u64 ref_root = 0;
2110         u64 flags = 0;
2111
2112         ins.objectid = node->bytenr;
2113         ins.offset = node->num_bytes;
2114         ins.type = BTRFS_EXTENT_ITEM_KEY;
2115
2116         ref = btrfs_delayed_node_to_data_ref(node);
2117         trace_run_delayed_data_ref(node, ref, node->action);
2118
2119         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2120                 parent = ref->parent;
2121         ref_root = ref->root;
2122
2123         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2124                 if (extent_op)
2125                         flags |= extent_op->flags_to_set;
2126                 ret = alloc_reserved_file_extent(trans, root,
2127                                                  parent, ref_root, flags,
2128                                                  ref->objectid, ref->offset,
2129                                                  &ins, node->ref_mod);
2130         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2131                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2132                                              ref_root, ref->objectid,
2133                                              ref->offset, node->ref_mod,
2134                                              extent_op);
2135         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2136                 ret = __btrfs_free_extent(trans, root, node, parent,
2137                                           ref_root, ref->objectid,
2138                                           ref->offset, node->ref_mod,
2139                                           extent_op);
2140         } else {
2141                 BUG();
2142         }
2143         return ret;
2144 }
2145
2146 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2147                                     struct extent_buffer *leaf,
2148                                     struct btrfs_extent_item *ei)
2149 {
2150         u64 flags = btrfs_extent_flags(leaf, ei);
2151         if (extent_op->update_flags) {
2152                 flags |= extent_op->flags_to_set;
2153                 btrfs_set_extent_flags(leaf, ei, flags);
2154         }
2155
2156         if (extent_op->update_key) {
2157                 struct btrfs_tree_block_info *bi;
2158                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2159                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2160                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2161         }
2162 }
2163
2164 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2165                                  struct btrfs_root *root,
2166                                  struct btrfs_delayed_ref_node *node,
2167                                  struct btrfs_delayed_extent_op *extent_op)
2168 {
2169         struct btrfs_key key;
2170         struct btrfs_path *path;
2171         struct btrfs_extent_item *ei;
2172         struct extent_buffer *leaf;
2173         u32 item_size;
2174         int ret;
2175         int err = 0;
2176         int metadata = !extent_op->is_data;
2177
2178         if (trans->aborted)
2179                 return 0;
2180
2181         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2182                 metadata = 0;
2183
2184         path = btrfs_alloc_path();
2185         if (!path)
2186                 return -ENOMEM;
2187
2188         key.objectid = node->bytenr;
2189
2190         if (metadata) {
2191                 key.type = BTRFS_METADATA_ITEM_KEY;
2192                 key.offset = extent_op->level;
2193         } else {
2194                 key.type = BTRFS_EXTENT_ITEM_KEY;
2195                 key.offset = node->num_bytes;
2196         }
2197
2198 again:
2199         path->reada = 1;
2200         path->leave_spinning = 1;
2201         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2202                                 path, 0, 1);
2203         if (ret < 0) {
2204                 err = ret;
2205                 goto out;
2206         }
2207         if (ret > 0) {
2208                 if (metadata) {
2209                         if (path->slots[0] > 0) {
2210                                 path->slots[0]--;
2211                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2212                                                       path->slots[0]);
2213                                 if (key.objectid == node->bytenr &&
2214                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2215                                     key.offset == node->num_bytes)
2216                                         ret = 0;
2217                         }
2218                         if (ret > 0) {
2219                                 btrfs_release_path(path);
2220                                 metadata = 0;
2221
2222                                 key.objectid = node->bytenr;
2223                                 key.offset = node->num_bytes;
2224                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2225                                 goto again;
2226                         }
2227                 } else {
2228                         err = -EIO;
2229                         goto out;
2230                 }
2231         }
2232
2233         leaf = path->nodes[0];
2234         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2235 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2236         if (item_size < sizeof(*ei)) {
2237                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2238                                              path, (u64)-1, 0);
2239                 if (ret < 0) {
2240                         err = ret;
2241                         goto out;
2242                 }
2243                 leaf = path->nodes[0];
2244                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2245         }
2246 #endif
2247         BUG_ON(item_size < sizeof(*ei));
2248         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2249         __run_delayed_extent_op(extent_op, leaf, ei);
2250
2251         btrfs_mark_buffer_dirty(leaf);
2252 out:
2253         btrfs_free_path(path);
2254         return err;
2255 }
2256
2257 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2258                                 struct btrfs_root *root,
2259                                 struct btrfs_delayed_ref_node *node,
2260                                 struct btrfs_delayed_extent_op *extent_op,
2261                                 int insert_reserved)
2262 {
2263         int ret = 0;
2264         struct btrfs_delayed_tree_ref *ref;
2265         struct btrfs_key ins;
2266         u64 parent = 0;
2267         u64 ref_root = 0;
2268         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2269                                                  SKINNY_METADATA);
2270
2271         ref = btrfs_delayed_node_to_tree_ref(node);
2272         trace_run_delayed_tree_ref(node, ref, node->action);
2273
2274         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2275                 parent = ref->parent;
2276         ref_root = ref->root;
2277
2278         ins.objectid = node->bytenr;
2279         if (skinny_metadata) {
2280                 ins.offset = ref->level;
2281                 ins.type = BTRFS_METADATA_ITEM_KEY;
2282         } else {
2283                 ins.offset = node->num_bytes;
2284                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2285         }
2286
2287         BUG_ON(node->ref_mod != 1);
2288         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2289                 BUG_ON(!extent_op || !extent_op->update_flags);
2290                 ret = alloc_reserved_tree_block(trans, root,
2291                                                 parent, ref_root,
2292                                                 extent_op->flags_to_set,
2293                                                 &extent_op->key,
2294                                                 ref->level, &ins,
2295                                                 node->no_quota);
2296         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2297                 ret = __btrfs_inc_extent_ref(trans, root, node,
2298                                              parent, ref_root,
2299                                              ref->level, 0, 1,
2300                                              extent_op);
2301         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2302                 ret = __btrfs_free_extent(trans, root, node,
2303                                           parent, ref_root,
2304                                           ref->level, 0, 1, extent_op);
2305         } else {
2306                 BUG();
2307         }
2308         return ret;
2309 }
2310
2311 /* helper function to actually process a single delayed ref entry */
2312 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2313                                struct btrfs_root *root,
2314                                struct btrfs_delayed_ref_node *node,
2315                                struct btrfs_delayed_extent_op *extent_op,
2316                                int insert_reserved)
2317 {
2318         int ret = 0;
2319
2320         if (trans->aborted) {
2321                 if (insert_reserved)
2322                         btrfs_pin_extent(root, node->bytenr,
2323                                          node->num_bytes, 1);
2324                 return 0;
2325         }
2326
2327         if (btrfs_delayed_ref_is_head(node)) {
2328                 struct btrfs_delayed_ref_head *head;
2329                 /*
2330                  * we've hit the end of the chain and we were supposed
2331                  * to insert this extent into the tree.  But, it got
2332                  * deleted before we ever needed to insert it, so all
2333                  * we have to do is clean up the accounting
2334                  */
2335                 BUG_ON(extent_op);
2336                 head = btrfs_delayed_node_to_head(node);
2337                 trace_run_delayed_ref_head(node, head, node->action);
2338
2339                 if (insert_reserved) {
2340                         btrfs_pin_extent(root, node->bytenr,
2341                                          node->num_bytes, 1);
2342                         if (head->is_data) {
2343                                 ret = btrfs_del_csums(trans, root,
2344                                                       node->bytenr,
2345                                                       node->num_bytes);
2346                         }
2347                 }
2348                 return ret;
2349         }
2350
2351         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2352             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2353                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2354                                            insert_reserved);
2355         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2356                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2357                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2358                                            insert_reserved);
2359         else
2360                 BUG();
2361         return ret;
2362 }
2363
2364 static inline struct btrfs_delayed_ref_node *
2365 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2366 {
2367         struct btrfs_delayed_ref_node *ref;
2368
2369         if (list_empty(&head->ref_list))
2370                 return NULL;
2371
2372         /*
2373          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2374          * This is to prevent a ref count from going down to zero, which deletes
2375          * the extent item from the extent tree, when there still are references
2376          * to add, which would fail because they would not find the extent item.
2377          */
2378         list_for_each_entry(ref, &head->ref_list, list) {
2379                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2380                         return ref;
2381         }
2382
2383         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2384                           list);
2385 }
2386
2387 /*
2388  * Returns 0 on success or if called with an already aborted transaction.
2389  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2390  */
2391 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2392                                              struct btrfs_root *root,
2393                                              unsigned long nr)
2394 {
2395         struct btrfs_delayed_ref_root *delayed_refs;
2396         struct btrfs_delayed_ref_node *ref;
2397         struct btrfs_delayed_ref_head *locked_ref = NULL;
2398         struct btrfs_delayed_extent_op *extent_op;
2399         struct btrfs_fs_info *fs_info = root->fs_info;
2400         ktime_t start = ktime_get();
2401         int ret;
2402         unsigned long count = 0;
2403         unsigned long actual_count = 0;
2404         int must_insert_reserved = 0;
2405
2406         delayed_refs = &trans->transaction->delayed_refs;
2407         while (1) {
2408                 if (!locked_ref) {
2409                         if (count >= nr)
2410                                 break;
2411
2412                         spin_lock(&delayed_refs->lock);
2413                         locked_ref = btrfs_select_ref_head(trans);
2414                         if (!locked_ref) {
2415                                 spin_unlock(&delayed_refs->lock);
2416                                 break;
2417                         }
2418
2419                         /* grab the lock that says we are going to process
2420                          * all the refs for this head */
2421                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2422                         spin_unlock(&delayed_refs->lock);
2423                         /*
2424                          * we may have dropped the spin lock to get the head
2425                          * mutex lock, and that might have given someone else
2426                          * time to free the head.  If that's true, it has been
2427                          * removed from our list and we can move on.
2428                          */
2429                         if (ret == -EAGAIN) {
2430                                 locked_ref = NULL;
2431                                 count++;
2432                                 continue;
2433                         }
2434                 }
2435
2436                 spin_lock(&locked_ref->lock);
2437
2438                 /*
2439                  * locked_ref is the head node, so we have to go one
2440                  * node back for any delayed ref updates
2441                  */
2442                 ref = select_delayed_ref(locked_ref);
2443
2444                 if (ref && ref->seq &&
2445                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2446                         spin_unlock(&locked_ref->lock);
2447                         btrfs_delayed_ref_unlock(locked_ref);
2448                         spin_lock(&delayed_refs->lock);
2449                         locked_ref->processing = 0;
2450                         delayed_refs->num_heads_ready++;
2451                         spin_unlock(&delayed_refs->lock);
2452                         locked_ref = NULL;
2453                         cond_resched();
2454                         count++;
2455                         continue;
2456                 }
2457
2458                 /*
2459                  * record the must insert reserved flag before we
2460                  * drop the spin lock.
2461                  */
2462                 must_insert_reserved = locked_ref->must_insert_reserved;
2463                 locked_ref->must_insert_reserved = 0;
2464
2465                 extent_op = locked_ref->extent_op;
2466                 locked_ref->extent_op = NULL;
2467
2468                 if (!ref) {
2469
2470
2471                         /* All delayed refs have been processed, Go ahead
2472                          * and send the head node to run_one_delayed_ref,
2473                          * so that any accounting fixes can happen
2474                          */
2475                         ref = &locked_ref->node;
2476
2477                         if (extent_op && must_insert_reserved) {
2478                                 btrfs_free_delayed_extent_op(extent_op);
2479                                 extent_op = NULL;
2480                         }
2481
2482                         if (extent_op) {
2483                                 spin_unlock(&locked_ref->lock);
2484                                 ret = run_delayed_extent_op(trans, root,
2485                                                             ref, extent_op);
2486                                 btrfs_free_delayed_extent_op(extent_op);
2487
2488                                 if (ret) {
2489                                         /*
2490                                          * Need to reset must_insert_reserved if
2491                                          * there was an error so the abort stuff
2492                                          * can cleanup the reserved space
2493                                          * properly.
2494                                          */
2495                                         if (must_insert_reserved)
2496                                                 locked_ref->must_insert_reserved = 1;
2497                                         locked_ref->processing = 0;
2498                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2499                                         btrfs_delayed_ref_unlock(locked_ref);
2500                                         return ret;
2501                                 }
2502                                 continue;
2503                         }
2504
2505                         /*
2506                          * Need to drop our head ref lock and re-aqcuire the
2507                          * delayed ref lock and then re-check to make sure
2508                          * nobody got added.
2509                          */
2510                         spin_unlock(&locked_ref->lock);
2511                         spin_lock(&delayed_refs->lock);
2512                         spin_lock(&locked_ref->lock);
2513                         if (!list_empty(&locked_ref->ref_list) ||
2514                             locked_ref->extent_op) {
2515                                 spin_unlock(&locked_ref->lock);
2516                                 spin_unlock(&delayed_refs->lock);
2517                                 continue;
2518                         }
2519                         ref->in_tree = 0;
2520                         delayed_refs->num_heads--;
2521                         rb_erase(&locked_ref->href_node,
2522                                  &delayed_refs->href_root);
2523                         spin_unlock(&delayed_refs->lock);
2524                 } else {
2525                         actual_count++;
2526                         ref->in_tree = 0;
2527                         list_del(&ref->list);
2528                 }
2529                 atomic_dec(&delayed_refs->num_entries);
2530
2531                 if (!btrfs_delayed_ref_is_head(ref)) {
2532                         /*
2533                          * when we play the delayed ref, also correct the
2534                          * ref_mod on head
2535                          */
2536                         switch (ref->action) {
2537                         case BTRFS_ADD_DELAYED_REF:
2538                         case BTRFS_ADD_DELAYED_EXTENT:
2539                                 locked_ref->node.ref_mod -= ref->ref_mod;
2540                                 break;
2541                         case BTRFS_DROP_DELAYED_REF:
2542                                 locked_ref->node.ref_mod += ref->ref_mod;
2543                                 break;
2544                         default:
2545                                 WARN_ON(1);
2546                         }
2547                 }
2548                 spin_unlock(&locked_ref->lock);
2549
2550                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2551                                           must_insert_reserved);
2552
2553                 btrfs_free_delayed_extent_op(extent_op);
2554                 if (ret) {
2555                         locked_ref->processing = 0;
2556                         btrfs_delayed_ref_unlock(locked_ref);
2557                         btrfs_put_delayed_ref(ref);
2558                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2559                         return ret;
2560                 }
2561
2562                 /*
2563                  * If this node is a head, that means all the refs in this head
2564                  * have been dealt with, and we will pick the next head to deal
2565                  * with, so we must unlock the head and drop it from the cluster
2566                  * list before we release it.
2567                  */
2568                 if (btrfs_delayed_ref_is_head(ref)) {
2569                         if (locked_ref->is_data &&
2570                             locked_ref->total_ref_mod < 0) {
2571                                 spin_lock(&delayed_refs->lock);
2572                                 delayed_refs->pending_csums -= ref->num_bytes;
2573                                 spin_unlock(&delayed_refs->lock);
2574                         }
2575                         btrfs_delayed_ref_unlock(locked_ref);
2576                         locked_ref = NULL;
2577                 }
2578                 btrfs_put_delayed_ref(ref);
2579                 count++;
2580                 cond_resched();
2581         }
2582
2583         /*
2584          * We don't want to include ref heads since we can have empty ref heads
2585          * and those will drastically skew our runtime down since we just do
2586          * accounting, no actual extent tree updates.
2587          */
2588         if (actual_count > 0) {
2589                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2590                 u64 avg;
2591
2592                 /*
2593                  * We weigh the current average higher than our current runtime
2594                  * to avoid large swings in the average.
2595                  */
2596                 spin_lock(&delayed_refs->lock);
2597                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2598                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2599                 spin_unlock(&delayed_refs->lock);
2600         }
2601         return 0;
2602 }
2603
2604 #ifdef SCRAMBLE_DELAYED_REFS
2605 /*
2606  * Normally delayed refs get processed in ascending bytenr order. This
2607  * correlates in most cases to the order added. To expose dependencies on this
2608  * order, we start to process the tree in the middle instead of the beginning
2609  */
2610 static u64 find_middle(struct rb_root *root)
2611 {
2612         struct rb_node *n = root->rb_node;
2613         struct btrfs_delayed_ref_node *entry;
2614         int alt = 1;
2615         u64 middle;
2616         u64 first = 0, last = 0;
2617
2618         n = rb_first(root);
2619         if (n) {
2620                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2621                 first = entry->bytenr;
2622         }
2623         n = rb_last(root);
2624         if (n) {
2625                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2626                 last = entry->bytenr;
2627         }
2628         n = root->rb_node;
2629
2630         while (n) {
2631                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2632                 WARN_ON(!entry->in_tree);
2633
2634                 middle = entry->bytenr;
2635
2636                 if (alt)
2637                         n = n->rb_left;
2638                 else
2639                         n = n->rb_right;
2640
2641                 alt = 1 - alt;
2642         }
2643         return middle;
2644 }
2645 #endif
2646
2647 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2648 {
2649         u64 num_bytes;
2650
2651         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2652                              sizeof(struct btrfs_extent_inline_ref));
2653         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2654                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2655
2656         /*
2657          * We don't ever fill up leaves all the way so multiply by 2 just to be
2658          * closer to what we're really going to want to ouse.
2659          */
2660         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2661 }
2662
2663 /*
2664  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2665  * would require to store the csums for that many bytes.
2666  */
2667 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2668 {
2669         u64 csum_size;
2670         u64 num_csums_per_leaf;
2671         u64 num_csums;
2672
2673         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2674         num_csums_per_leaf = div64_u64(csum_size,
2675                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2676         num_csums = div64_u64(csum_bytes, root->sectorsize);
2677         num_csums += num_csums_per_leaf - 1;
2678         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2679         return num_csums;
2680 }
2681
2682 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2683                                        struct btrfs_root *root)
2684 {
2685         struct btrfs_block_rsv *global_rsv;
2686         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2687         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2688         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2689         u64 num_bytes, num_dirty_bgs_bytes;
2690         int ret = 0;
2691
2692         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2693         num_heads = heads_to_leaves(root, num_heads);
2694         if (num_heads > 1)
2695                 num_bytes += (num_heads - 1) * root->nodesize;
2696         num_bytes <<= 1;
2697         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2698         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2699                                                              num_dirty_bgs);
2700         global_rsv = &root->fs_info->global_block_rsv;
2701
2702         /*
2703          * If we can't allocate any more chunks lets make sure we have _lots_ of
2704          * wiggle room since running delayed refs can create more delayed refs.
2705          */
2706         if (global_rsv->space_info->full) {
2707                 num_dirty_bgs_bytes <<= 1;
2708                 num_bytes <<= 1;
2709         }
2710
2711         spin_lock(&global_rsv->lock);
2712         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2713                 ret = 1;
2714         spin_unlock(&global_rsv->lock);
2715         return ret;
2716 }
2717
2718 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2719                                        struct btrfs_root *root)
2720 {
2721         struct btrfs_fs_info *fs_info = root->fs_info;
2722         u64 num_entries =
2723                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2724         u64 avg_runtime;
2725         u64 val;
2726
2727         smp_mb();
2728         avg_runtime = fs_info->avg_delayed_ref_runtime;
2729         val = num_entries * avg_runtime;
2730         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2731                 return 1;
2732         if (val >= NSEC_PER_SEC / 2)
2733                 return 2;
2734
2735         return btrfs_check_space_for_delayed_refs(trans, root);
2736 }
2737
2738 struct async_delayed_refs {
2739         struct btrfs_root *root;
2740         int count;
2741         int error;
2742         int sync;
2743         struct completion wait;
2744         struct btrfs_work work;
2745 };
2746
2747 static void delayed_ref_async_start(struct btrfs_work *work)
2748 {
2749         struct async_delayed_refs *async;
2750         struct btrfs_trans_handle *trans;
2751         int ret;
2752
2753         async = container_of(work, struct async_delayed_refs, work);
2754
2755         trans = btrfs_join_transaction(async->root);
2756         if (IS_ERR(trans)) {
2757                 async->error = PTR_ERR(trans);
2758                 goto done;
2759         }
2760
2761         /*
2762          * trans->sync means that when we call end_transaciton, we won't
2763          * wait on delayed refs
2764          */
2765         trans->sync = true;
2766         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2767         if (ret)
2768                 async->error = ret;
2769
2770         ret = btrfs_end_transaction(trans, async->root);
2771         if (ret && !async->error)
2772                 async->error = ret;
2773 done:
2774         if (async->sync)
2775                 complete(&async->wait);
2776         else
2777                 kfree(async);
2778 }
2779
2780 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2781                                  unsigned long count, int wait)
2782 {
2783         struct async_delayed_refs *async;
2784         int ret;
2785
2786         async = kmalloc(sizeof(*async), GFP_NOFS);
2787         if (!async)
2788                 return -ENOMEM;
2789
2790         async->root = root->fs_info->tree_root;
2791         async->count = count;
2792         async->error = 0;
2793         if (wait)
2794                 async->sync = 1;
2795         else
2796                 async->sync = 0;
2797         init_completion(&async->wait);
2798
2799         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2800                         delayed_ref_async_start, NULL, NULL);
2801
2802         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2803
2804         if (wait) {
2805                 wait_for_completion(&async->wait);
2806                 ret = async->error;
2807                 kfree(async);
2808                 return ret;
2809         }
2810         return 0;
2811 }
2812
2813 /*
2814  * this starts processing the delayed reference count updates and
2815  * extent insertions we have queued up so far.  count can be
2816  * 0, which means to process everything in the tree at the start
2817  * of the run (but not newly added entries), or it can be some target
2818  * number you'd like to process.
2819  *
2820  * Returns 0 on success or if called with an aborted transaction
2821  * Returns <0 on error and aborts the transaction
2822  */
2823 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2824                            struct btrfs_root *root, unsigned long count)
2825 {
2826         struct rb_node *node;
2827         struct btrfs_delayed_ref_root *delayed_refs;
2828         struct btrfs_delayed_ref_head *head;
2829         int ret;
2830         int run_all = count == (unsigned long)-1;
2831
2832         /* We'll clean this up in btrfs_cleanup_transaction */
2833         if (trans->aborted)
2834                 return 0;
2835
2836         if (root == root->fs_info->extent_root)
2837                 root = root->fs_info->tree_root;
2838
2839         delayed_refs = &trans->transaction->delayed_refs;
2840         if (count == 0)
2841                 count = atomic_read(&delayed_refs->num_entries) * 2;
2842
2843 again:
2844 #ifdef SCRAMBLE_DELAYED_REFS
2845         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2846 #endif
2847         ret = __btrfs_run_delayed_refs(trans, root, count);
2848         if (ret < 0) {
2849                 btrfs_abort_transaction(trans, root, ret);
2850                 return ret;
2851         }
2852
2853         if (run_all) {
2854                 if (!list_empty(&trans->new_bgs))
2855                         btrfs_create_pending_block_groups(trans, root);
2856
2857                 spin_lock(&delayed_refs->lock);
2858                 node = rb_first(&delayed_refs->href_root);
2859                 if (!node) {
2860                         spin_unlock(&delayed_refs->lock);
2861                         goto out;
2862                 }
2863                 count = (unsigned long)-1;
2864
2865                 while (node) {
2866                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2867                                         href_node);
2868                         if (btrfs_delayed_ref_is_head(&head->node)) {
2869                                 struct btrfs_delayed_ref_node *ref;
2870
2871                                 ref = &head->node;
2872                                 atomic_inc(&ref->refs);
2873
2874                                 spin_unlock(&delayed_refs->lock);
2875                                 /*
2876                                  * Mutex was contended, block until it's
2877                                  * released and try again
2878                                  */
2879                                 mutex_lock(&head->mutex);
2880                                 mutex_unlock(&head->mutex);
2881
2882                                 btrfs_put_delayed_ref(ref);
2883                                 cond_resched();
2884                                 goto again;
2885                         } else {
2886                                 WARN_ON(1);
2887                         }
2888                         node = rb_next(node);
2889                 }
2890                 spin_unlock(&delayed_refs->lock);
2891                 cond_resched();
2892                 goto again;
2893         }
2894 out:
2895         assert_qgroups_uptodate(trans);
2896         return 0;
2897 }
2898
2899 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2900                                 struct btrfs_root *root,
2901                                 u64 bytenr, u64 num_bytes, u64 flags,
2902                                 int level, int is_data)
2903 {
2904         struct btrfs_delayed_extent_op *extent_op;
2905         int ret;
2906
2907         extent_op = btrfs_alloc_delayed_extent_op();
2908         if (!extent_op)
2909                 return -ENOMEM;
2910
2911         extent_op->flags_to_set = flags;
2912         extent_op->update_flags = 1;
2913         extent_op->update_key = 0;
2914         extent_op->is_data = is_data ? 1 : 0;
2915         extent_op->level = level;
2916
2917         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2918                                           num_bytes, extent_op);
2919         if (ret)
2920                 btrfs_free_delayed_extent_op(extent_op);
2921         return ret;
2922 }
2923
2924 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2925                                       struct btrfs_root *root,
2926                                       struct btrfs_path *path,
2927                                       u64 objectid, u64 offset, u64 bytenr)
2928 {
2929         struct btrfs_delayed_ref_head *head;
2930         struct btrfs_delayed_ref_node *ref;
2931         struct btrfs_delayed_data_ref *data_ref;
2932         struct btrfs_delayed_ref_root *delayed_refs;
2933         int ret = 0;
2934
2935         delayed_refs = &trans->transaction->delayed_refs;
2936         spin_lock(&delayed_refs->lock);
2937         head = btrfs_find_delayed_ref_head(trans, bytenr);
2938         if (!head) {
2939                 spin_unlock(&delayed_refs->lock);
2940                 return 0;
2941         }
2942
2943         if (!mutex_trylock(&head->mutex)) {
2944                 atomic_inc(&head->node.refs);
2945                 spin_unlock(&delayed_refs->lock);
2946
2947                 btrfs_release_path(path);
2948
2949                 /*
2950                  * Mutex was contended, block until it's released and let
2951                  * caller try again
2952                  */
2953                 mutex_lock(&head->mutex);
2954                 mutex_unlock(&head->mutex);
2955                 btrfs_put_delayed_ref(&head->node);
2956                 return -EAGAIN;
2957         }
2958         spin_unlock(&delayed_refs->lock);
2959
2960         spin_lock(&head->lock);
2961         list_for_each_entry(ref, &head->ref_list, list) {
2962                 /* If it's a shared ref we know a cross reference exists */
2963                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2964                         ret = 1;
2965                         break;
2966                 }
2967
2968                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2969
2970                 /*
2971                  * If our ref doesn't match the one we're currently looking at
2972                  * then we have a cross reference.
2973                  */
2974                 if (data_ref->root != root->root_key.objectid ||
2975                     data_ref->objectid != objectid ||
2976                     data_ref->offset != offset) {
2977                         ret = 1;
2978                         break;
2979                 }
2980         }
2981         spin_unlock(&head->lock);
2982         mutex_unlock(&head->mutex);
2983         return ret;
2984 }
2985
2986 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2987                                         struct btrfs_root *root,
2988                                         struct btrfs_path *path,
2989                                         u64 objectid, u64 offset, u64 bytenr)
2990 {
2991         struct btrfs_root *extent_root = root->fs_info->extent_root;
2992         struct extent_buffer *leaf;
2993         struct btrfs_extent_data_ref *ref;
2994         struct btrfs_extent_inline_ref *iref;
2995         struct btrfs_extent_item *ei;
2996         struct btrfs_key key;
2997         u32 item_size;
2998         int ret;
2999
3000         key.objectid = bytenr;
3001         key.offset = (u64)-1;
3002         key.type = BTRFS_EXTENT_ITEM_KEY;
3003
3004         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3005         if (ret < 0)
3006                 goto out;
3007         BUG_ON(ret == 0); /* Corruption */
3008
3009         ret = -ENOENT;
3010         if (path->slots[0] == 0)
3011                 goto out;
3012
3013         path->slots[0]--;
3014         leaf = path->nodes[0];
3015         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3016
3017         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3018                 goto out;
3019
3020         ret = 1;
3021         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3022 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3023         if (item_size < sizeof(*ei)) {
3024                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3025                 goto out;
3026         }
3027 #endif
3028         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3029
3030         if (item_size != sizeof(*ei) +
3031             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3032                 goto out;
3033
3034         if (btrfs_extent_generation(leaf, ei) <=
3035             btrfs_root_last_snapshot(&root->root_item))
3036                 goto out;
3037
3038         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3039         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3040             BTRFS_EXTENT_DATA_REF_KEY)
3041                 goto out;
3042
3043         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3044         if (btrfs_extent_refs(leaf, ei) !=
3045             btrfs_extent_data_ref_count(leaf, ref) ||
3046             btrfs_extent_data_ref_root(leaf, ref) !=
3047             root->root_key.objectid ||
3048             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3049             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3050                 goto out;
3051
3052         ret = 0;
3053 out:
3054         return ret;
3055 }
3056
3057 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3058                           struct btrfs_root *root,
3059                           u64 objectid, u64 offset, u64 bytenr)
3060 {
3061         struct btrfs_path *path;
3062         int ret;
3063         int ret2;
3064
3065         path = btrfs_alloc_path();
3066         if (!path)
3067                 return -ENOENT;
3068
3069         do {
3070                 ret = check_committed_ref(trans, root, path, objectid,
3071                                           offset, bytenr);
3072                 if (ret && ret != -ENOENT)
3073                         goto out;
3074
3075                 ret2 = check_delayed_ref(trans, root, path, objectid,
3076                                          offset, bytenr);
3077         } while (ret2 == -EAGAIN);
3078
3079         if (ret2 && ret2 != -ENOENT) {
3080                 ret = ret2;
3081                 goto out;
3082         }
3083
3084         if (ret != -ENOENT || ret2 != -ENOENT)
3085                 ret = 0;
3086 out:
3087         btrfs_free_path(path);
3088         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3089                 WARN_ON(ret > 0);
3090         return ret;
3091 }
3092
3093 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3094                            struct btrfs_root *root,
3095                            struct extent_buffer *buf,
3096                            int full_backref, int inc)
3097 {
3098         u64 bytenr;
3099         u64 num_bytes;
3100         u64 parent;
3101         u64 ref_root;
3102         u32 nritems;
3103         struct btrfs_key key;
3104         struct btrfs_file_extent_item *fi;
3105         int i;
3106         int level;
3107         int ret = 0;
3108         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3109                             u64, u64, u64, u64, u64, u64, int);
3110
3111
3112         if (btrfs_test_is_dummy_root(root))
3113                 return 0;
3114
3115         ref_root = btrfs_header_owner(buf);
3116         nritems = btrfs_header_nritems(buf);
3117         level = btrfs_header_level(buf);
3118
3119         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3120                 return 0;
3121
3122         if (inc)
3123                 process_func = btrfs_inc_extent_ref;
3124         else
3125                 process_func = btrfs_free_extent;
3126
3127         if (full_backref)
3128                 parent = buf->start;
3129         else
3130                 parent = 0;
3131
3132         for (i = 0; i < nritems; i++) {
3133                 if (level == 0) {
3134                         btrfs_item_key_to_cpu(buf, &key, i);
3135                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3136                                 continue;
3137                         fi = btrfs_item_ptr(buf, i,
3138                                             struct btrfs_file_extent_item);
3139                         if (btrfs_file_extent_type(buf, fi) ==
3140                             BTRFS_FILE_EXTENT_INLINE)
3141                                 continue;
3142                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3143                         if (bytenr == 0)
3144                                 continue;
3145
3146                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3147                         key.offset -= btrfs_file_extent_offset(buf, fi);
3148                         ret = process_func(trans, root, bytenr, num_bytes,
3149                                            parent, ref_root, key.objectid,
3150                                            key.offset, 1);
3151                         if (ret)
3152                                 goto fail;
3153                 } else {
3154                         bytenr = btrfs_node_blockptr(buf, i);
3155                         num_bytes = root->nodesize;
3156                         ret = process_func(trans, root, bytenr, num_bytes,
3157                                            parent, ref_root, level - 1, 0,
3158                                            1);
3159                         if (ret)
3160                                 goto fail;
3161                 }
3162         }
3163         return 0;
3164 fail:
3165         return ret;
3166 }
3167
3168 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3169                   struct extent_buffer *buf, int full_backref)
3170 {
3171         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3172 }
3173
3174 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3175                   struct extent_buffer *buf, int full_backref)
3176 {
3177         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3178 }
3179
3180 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3181                                  struct btrfs_root *root,
3182                                  struct btrfs_path *path,
3183                                  struct btrfs_block_group_cache *cache)
3184 {
3185         int ret;
3186         struct btrfs_root *extent_root = root->fs_info->extent_root;
3187         unsigned long bi;
3188         struct extent_buffer *leaf;
3189
3190         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3191         if (ret) {
3192                 if (ret > 0)
3193                         ret = -ENOENT;
3194                 goto fail;
3195         }
3196
3197         leaf = path->nodes[0];
3198         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3199         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3200         btrfs_mark_buffer_dirty(leaf);
3201 fail:
3202         btrfs_release_path(path);
3203         return ret;
3204
3205 }
3206
3207 static struct btrfs_block_group_cache *
3208 next_block_group(struct btrfs_root *root,
3209                  struct btrfs_block_group_cache *cache)
3210 {
3211         struct rb_node *node;
3212
3213         spin_lock(&root->fs_info->block_group_cache_lock);
3214
3215         /* If our block group was removed, we need a full search. */
3216         if (RB_EMPTY_NODE(&cache->cache_node)) {
3217                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3218
3219                 spin_unlock(&root->fs_info->block_group_cache_lock);
3220                 btrfs_put_block_group(cache);
3221                 cache = btrfs_lookup_first_block_group(root->fs_info,
3222                                                        next_bytenr);
3223                 return cache;
3224         }
3225         node = rb_next(&cache->cache_node);
3226         btrfs_put_block_group(cache);
3227         if (node) {
3228                 cache = rb_entry(node, struct btrfs_block_group_cache,
3229                                  cache_node);
3230                 btrfs_get_block_group(cache);
3231         } else
3232                 cache = NULL;
3233         spin_unlock(&root->fs_info->block_group_cache_lock);
3234         return cache;
3235 }
3236
3237 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3238                             struct btrfs_trans_handle *trans,
3239                             struct btrfs_path *path)
3240 {
3241         struct btrfs_root *root = block_group->fs_info->tree_root;
3242         struct inode *inode = NULL;
3243         u64 alloc_hint = 0;
3244         int dcs = BTRFS_DC_ERROR;
3245         u64 num_pages = 0;
3246         int retries = 0;
3247         int ret = 0;
3248
3249         /*
3250          * If this block group is smaller than 100 megs don't bother caching the
3251          * block group.
3252          */
3253         if (block_group->key.offset < (100 * 1024 * 1024)) {
3254                 spin_lock(&block_group->lock);
3255                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3256                 spin_unlock(&block_group->lock);
3257                 return 0;
3258         }
3259
3260         if (trans->aborted)
3261                 return 0;
3262 again:
3263         inode = lookup_free_space_inode(root, block_group, path);
3264         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3265                 ret = PTR_ERR(inode);
3266                 btrfs_release_path(path);
3267                 goto out;
3268         }
3269
3270         if (IS_ERR(inode)) {
3271                 BUG_ON(retries);
3272                 retries++;
3273
3274                 if (block_group->ro)
3275                         goto out_free;
3276
3277                 ret = create_free_space_inode(root, trans, block_group, path);
3278                 if (ret)
3279                         goto out_free;
3280                 goto again;
3281         }
3282
3283         /* We've already setup this transaction, go ahead and exit */
3284         if (block_group->cache_generation == trans->transid &&
3285             i_size_read(inode)) {
3286                 dcs = BTRFS_DC_SETUP;
3287                 goto out_put;
3288         }
3289
3290         /*
3291          * We want to set the generation to 0, that way if anything goes wrong
3292          * from here on out we know not to trust this cache when we load up next
3293          * time.
3294          */
3295         BTRFS_I(inode)->generation = 0;
3296         ret = btrfs_update_inode(trans, root, inode);
3297         if (ret) {
3298                 /*
3299                  * So theoretically we could recover from this, simply set the
3300                  * super cache generation to 0 so we know to invalidate the
3301                  * cache, but then we'd have to keep track of the block groups
3302                  * that fail this way so we know we _have_ to reset this cache
3303                  * before the next commit or risk reading stale cache.  So to
3304                  * limit our exposure to horrible edge cases lets just abort the
3305                  * transaction, this only happens in really bad situations
3306                  * anyway.
3307                  */
3308                 btrfs_abort_transaction(trans, root, ret);
3309                 goto out_put;
3310         }
3311         WARN_ON(ret);
3312
3313         if (i_size_read(inode) > 0) {
3314                 ret = btrfs_check_trunc_cache_free_space(root,
3315                                         &root->fs_info->global_block_rsv);
3316                 if (ret)
3317                         goto out_put;
3318
3319                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3320                 if (ret)
3321                         goto out_put;
3322         }
3323
3324         spin_lock(&block_group->lock);
3325         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3326             !btrfs_test_opt(root, SPACE_CACHE)) {
3327                 /*
3328                  * don't bother trying to write stuff out _if_
3329                  * a) we're not cached,
3330                  * b) we're with nospace_cache mount option.
3331                  */
3332                 dcs = BTRFS_DC_WRITTEN;
3333                 spin_unlock(&block_group->lock);
3334                 goto out_put;
3335         }
3336         spin_unlock(&block_group->lock);
3337
3338         /*
3339          * Try to preallocate enough space based on how big the block group is.
3340          * Keep in mind this has to include any pinned space which could end up
3341          * taking up quite a bit since it's not folded into the other space
3342          * cache.
3343          */
3344         num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3345         if (!num_pages)
3346                 num_pages = 1;
3347
3348         num_pages *= 16;
3349         num_pages *= PAGE_CACHE_SIZE;
3350
3351         ret = btrfs_check_data_free_space(inode, num_pages, num_pages);
3352         if (ret)
3353                 goto out_put;
3354
3355         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3356                                               num_pages, num_pages,
3357                                               &alloc_hint);
3358         if (!ret)
3359                 dcs = BTRFS_DC_SETUP;
3360         btrfs_free_reserved_data_space(inode, num_pages);
3361
3362 out_put:
3363         iput(inode);
3364 out_free:
3365         btrfs_release_path(path);
3366 out:
3367         spin_lock(&block_group->lock);
3368         if (!ret && dcs == BTRFS_DC_SETUP)
3369                 block_group->cache_generation = trans->transid;
3370         block_group->disk_cache_state = dcs;
3371         spin_unlock(&block_group->lock);
3372
3373         return ret;
3374 }
3375
3376 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3377                             struct btrfs_root *root)
3378 {
3379         struct btrfs_block_group_cache *cache, *tmp;
3380         struct btrfs_transaction *cur_trans = trans->transaction;
3381         struct btrfs_path *path;
3382
3383         if (list_empty(&cur_trans->dirty_bgs) ||
3384             !btrfs_test_opt(root, SPACE_CACHE))
3385                 return 0;
3386
3387         path = btrfs_alloc_path();
3388         if (!path)
3389                 return -ENOMEM;
3390
3391         /* Could add new block groups, use _safe just in case */
3392         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3393                                  dirty_list) {
3394                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3395                         cache_save_setup(cache, trans, path);
3396         }
3397
3398         btrfs_free_path(path);
3399         return 0;
3400 }
3401
3402 /*
3403  * transaction commit does final block group cache writeback during a
3404  * critical section where nothing is allowed to change the FS.  This is
3405  * required in order for the cache to actually match the block group,
3406  * but can introduce a lot of latency into the commit.
3407  *
3408  * So, btrfs_start_dirty_block_groups is here to kick off block group
3409  * cache IO.  There's a chance we'll have to redo some of it if the
3410  * block group changes again during the commit, but it greatly reduces
3411  * the commit latency by getting rid of the easy block groups while
3412  * we're still allowing others to join the commit.
3413  */
3414 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3415                                    struct btrfs_root *root)
3416 {
3417         struct btrfs_block_group_cache *cache;
3418         struct btrfs_transaction *cur_trans = trans->transaction;
3419         int ret = 0;
3420         int should_put;
3421         struct btrfs_path *path = NULL;
3422         LIST_HEAD(dirty);
3423         struct list_head *io = &cur_trans->io_bgs;
3424         int num_started = 0;
3425         int loops = 0;
3426
3427         spin_lock(&cur_trans->dirty_bgs_lock);
3428         if (list_empty(&cur_trans->dirty_bgs)) {
3429                 spin_unlock(&cur_trans->dirty_bgs_lock);
3430                 return 0;
3431         }
3432         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3433         spin_unlock(&cur_trans->dirty_bgs_lock);
3434
3435 again:
3436         /*
3437          * make sure all the block groups on our dirty list actually
3438          * exist
3439          */
3440         btrfs_create_pending_block_groups(trans, root);
3441
3442         if (!path) {
3443                 path = btrfs_alloc_path();
3444                 if (!path)
3445                         return -ENOMEM;
3446         }
3447
3448         /*
3449          * cache_write_mutex is here only to save us from balance or automatic
3450          * removal of empty block groups deleting this block group while we are
3451          * writing out the cache
3452          */
3453         mutex_lock(&trans->transaction->cache_write_mutex);
3454         while (!list_empty(&dirty)) {
3455                 cache = list_first_entry(&dirty,
3456                                          struct btrfs_block_group_cache,
3457                                          dirty_list);
3458                 /*
3459                  * this can happen if something re-dirties a block
3460                  * group that is already under IO.  Just wait for it to
3461                  * finish and then do it all again
3462                  */
3463                 if (!list_empty(&cache->io_list)) {
3464                         list_del_init(&cache->io_list);
3465                         btrfs_wait_cache_io(root, trans, cache,
3466                                             &cache->io_ctl, path,
3467                                             cache->key.objectid);
3468                         btrfs_put_block_group(cache);
3469                 }
3470
3471
3472                 /*
3473                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3474                  * if it should update the cache_state.  Don't delete
3475                  * until after we wait.
3476                  *
3477                  * Since we're not running in the commit critical section
3478                  * we need the dirty_bgs_lock to protect from update_block_group
3479                  */
3480                 spin_lock(&cur_trans->dirty_bgs_lock);
3481                 list_del_init(&cache->dirty_list);
3482                 spin_unlock(&cur_trans->dirty_bgs_lock);
3483
3484                 should_put = 1;
3485
3486                 cache_save_setup(cache, trans, path);
3487
3488                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3489                         cache->io_ctl.inode = NULL;
3490                         ret = btrfs_write_out_cache(root, trans, cache, path);
3491                         if (ret == 0 && cache->io_ctl.inode) {
3492                                 num_started++;
3493                                 should_put = 0;
3494
3495                                 /*
3496                                  * the cache_write_mutex is protecting
3497                                  * the io_list
3498                                  */
3499                                 list_add_tail(&cache->io_list, io);
3500                         } else {
3501                                 /*
3502                                  * if we failed to write the cache, the
3503                                  * generation will be bad and life goes on
3504                                  */
3505                                 ret = 0;
3506                         }
3507                 }
3508                 if (!ret) {
3509                         ret = write_one_cache_group(trans, root, path, cache);
3510                         /*
3511                          * Our block group might still be attached to the list
3512                          * of new block groups in the transaction handle of some
3513                          * other task (struct btrfs_trans_handle->new_bgs). This
3514                          * means its block group item isn't yet in the extent
3515                          * tree. If this happens ignore the error, as we will
3516                          * try again later in the critical section of the
3517                          * transaction commit.
3518                          */
3519                         if (ret == -ENOENT) {
3520                                 ret = 0;
3521                                 spin_lock(&cur_trans->dirty_bgs_lock);
3522                                 if (list_empty(&cache->dirty_list)) {
3523                                         list_add_tail(&cache->dirty_list,
3524                                                       &cur_trans->dirty_bgs);
3525                                         btrfs_get_block_group(cache);
3526                                 }
3527                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3528                         } else if (ret) {
3529                                 btrfs_abort_transaction(trans, root, ret);
3530                         }
3531                 }
3532
3533                 /* if its not on the io list, we need to put the block group */
3534                 if (should_put)
3535                         btrfs_put_block_group(cache);
3536
3537                 if (ret)
3538                         break;
3539
3540                 /*
3541                  * Avoid blocking other tasks for too long. It might even save
3542                  * us from writing caches for block groups that are going to be
3543                  * removed.
3544                  */
3545                 mutex_unlock(&trans->transaction->cache_write_mutex);
3546                 mutex_lock(&trans->transaction->cache_write_mutex);
3547         }
3548         mutex_unlock(&trans->transaction->cache_write_mutex);
3549
3550         /*
3551          * go through delayed refs for all the stuff we've just kicked off
3552          * and then loop back (just once)
3553          */
3554         ret = btrfs_run_delayed_refs(trans, root, 0);
3555         if (!ret && loops == 0) {
3556                 loops++;
3557                 spin_lock(&cur_trans->dirty_bgs_lock);
3558                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3559                 /*
3560                  * dirty_bgs_lock protects us from concurrent block group
3561                  * deletes too (not just cache_write_mutex).
3562                  */
3563                 if (!list_empty(&dirty)) {
3564                         spin_unlock(&cur_trans->dirty_bgs_lock);
3565                         goto again;
3566                 }
3567                 spin_unlock(&cur_trans->dirty_bgs_lock);
3568         }
3569
3570         btrfs_free_path(path);
3571         return ret;
3572 }
3573
3574 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3575                                    struct btrfs_root *root)
3576 {
3577         struct btrfs_block_group_cache *cache;
3578         struct btrfs_transaction *cur_trans = trans->transaction;
3579         int ret = 0;
3580         int should_put;
3581         struct btrfs_path *path;
3582         struct list_head *io = &cur_trans->io_bgs;
3583         int num_started = 0;
3584
3585         path = btrfs_alloc_path();
3586         if (!path)
3587                 return -ENOMEM;
3588
3589         /*
3590          * We don't need the lock here since we are protected by the transaction
3591          * commit.  We want to do the cache_save_setup first and then run the
3592          * delayed refs to make sure we have the best chance at doing this all
3593          * in one shot.
3594          */
3595         while (!list_empty(&cur_trans->dirty_bgs)) {
3596                 cache = list_first_entry(&cur_trans->dirty_bgs,
3597                                          struct btrfs_block_group_cache,
3598                                          dirty_list);
3599
3600                 /*
3601                  * this can happen if cache_save_setup re-dirties a block
3602                  * group that is already under IO.  Just wait for it to
3603                  * finish and then do it all again
3604                  */
3605                 if (!list_empty(&cache->io_list)) {
3606                         list_del_init(&cache->io_list);
3607                         btrfs_wait_cache_io(root, trans, cache,
3608                                             &cache->io_ctl, path,
3609                                             cache->key.objectid);
3610                         btrfs_put_block_group(cache);
3611                 }
3612
3613                 /*
3614                  * don't remove from the dirty list until after we've waited
3615                  * on any pending IO
3616                  */
3617                 list_del_init(&cache->dirty_list);
3618                 should_put = 1;
3619
3620                 cache_save_setup(cache, trans, path);
3621
3622                 if (!ret)
3623                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3624
3625                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3626                         cache->io_ctl.inode = NULL;
3627                         ret = btrfs_write_out_cache(root, trans, cache, path);
3628                         if (ret == 0 && cache->io_ctl.inode) {
3629                                 num_started++;
3630                                 should_put = 0;
3631                                 list_add_tail(&cache->io_list, io);
3632                         } else {
3633                                 /*
3634                                  * if we failed to write the cache, the
3635                                  * generation will be bad and life goes on
3636                                  */
3637                                 ret = 0;
3638                         }
3639                 }
3640                 if (!ret) {
3641                         ret = write_one_cache_group(trans, root, path, cache);
3642                         if (ret)
3643                                 btrfs_abort_transaction(trans, root, ret);
3644                 }
3645
3646                 /* if its not on the io list, we need to put the block group */
3647                 if (should_put)
3648                         btrfs_put_block_group(cache);
3649         }
3650
3651         while (!list_empty(io)) {
3652                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3653                                          io_list);
3654                 list_del_init(&cache->io_list);
3655                 btrfs_wait_cache_io(root, trans, cache,
3656                                     &cache->io_ctl, path, cache->key.objectid);
3657                 btrfs_put_block_group(cache);
3658         }
3659
3660         btrfs_free_path(path);
3661         return ret;
3662 }
3663
3664 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3665 {
3666         struct btrfs_block_group_cache *block_group;
3667         int readonly = 0;
3668
3669         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3670         if (!block_group || block_group->ro)
3671                 readonly = 1;
3672         if (block_group)
3673                 btrfs_put_block_group(block_group);
3674         return readonly;
3675 }
3676
3677 static const char *alloc_name(u64 flags)
3678 {
3679         switch (flags) {
3680         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3681                 return "mixed";
3682         case BTRFS_BLOCK_GROUP_METADATA:
3683                 return "metadata";
3684         case BTRFS_BLOCK_GROUP_DATA:
3685                 return "data";
3686         case BTRFS_BLOCK_GROUP_SYSTEM:
3687                 return "system";
3688         default:
3689                 WARN_ON(1);
3690                 return "invalid-combination";
3691         };
3692 }
3693
3694 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3695                              u64 total_bytes, u64 bytes_used,
3696                              struct btrfs_space_info **space_info)
3697 {
3698         struct btrfs_space_info *found;
3699         int i;
3700         int factor;
3701         int ret;
3702
3703         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3704                      BTRFS_BLOCK_GROUP_RAID10))
3705                 factor = 2;
3706         else
3707                 factor = 1;
3708
3709         found = __find_space_info(info, flags);
3710         if (found) {
3711                 spin_lock(&found->lock);
3712                 found->total_bytes += total_bytes;
3713                 found->disk_total += total_bytes * factor;
3714                 found->bytes_used += bytes_used;
3715                 found->disk_used += bytes_used * factor;
3716                 if (total_bytes > 0)
3717                         found->full = 0;
3718                 spin_unlock(&found->lock);
3719                 *space_info = found;
3720                 return 0;
3721         }
3722         found = kzalloc(sizeof(*found), GFP_NOFS);
3723         if (!found)
3724                 return -ENOMEM;
3725
3726         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3727         if (ret) {
3728                 kfree(found);
3729                 return ret;
3730         }
3731
3732         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3733                 INIT_LIST_HEAD(&found->block_groups[i]);
3734         init_rwsem(&found->groups_sem);
3735         spin_lock_init(&found->lock);
3736         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3737         found->total_bytes = total_bytes;
3738         found->disk_total = total_bytes * factor;
3739         found->bytes_used = bytes_used;
3740         found->disk_used = bytes_used * factor;
3741         found->bytes_pinned = 0;
3742         found->bytes_reserved = 0;
3743         found->bytes_readonly = 0;
3744         found->bytes_may_use = 0;
3745         if (total_bytes > 0)
3746                 found->full = 0;
3747         else
3748                 found->full = 1;
3749         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3750         found->chunk_alloc = 0;
3751         found->flush = 0;
3752         init_waitqueue_head(&found->wait);
3753         INIT_LIST_HEAD(&found->ro_bgs);
3754
3755         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3756                                     info->space_info_kobj, "%s",
3757                                     alloc_name(found->flags));
3758         if (ret) {
3759                 kfree(found);
3760                 return ret;
3761         }
3762
3763         *space_info = found;
3764         list_add_rcu(&found->list, &info->space_info);
3765         if (flags & BTRFS_BLOCK_GROUP_DATA)
3766                 info->data_sinfo = found;
3767
3768         return ret;
3769 }
3770
3771 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3772 {
3773         u64 extra_flags = chunk_to_extended(flags) &
3774                                 BTRFS_EXTENDED_PROFILE_MASK;
3775
3776         write_seqlock(&fs_info->profiles_lock);
3777         if (flags & BTRFS_BLOCK_GROUP_DATA)
3778                 fs_info->avail_data_alloc_bits |= extra_flags;
3779         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3780                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3781         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3782                 fs_info->avail_system_alloc_bits |= extra_flags;
3783         write_sequnlock(&fs_info->profiles_lock);
3784 }
3785
3786 /*
3787  * returns target flags in extended format or 0 if restripe for this
3788  * chunk_type is not in progress
3789  *
3790  * should be called with either volume_mutex or balance_lock held
3791  */
3792 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3793 {
3794         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3795         u64 target = 0;
3796
3797         if (!bctl)
3798                 return 0;
3799
3800         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3801             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3802                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3803         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3804                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3805                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3806         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3807                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3808                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3809         }
3810
3811         return target;
3812 }
3813
3814 /*
3815  * @flags: available profiles in extended format (see ctree.h)
3816  *
3817  * Returns reduced profile in chunk format.  If profile changing is in
3818  * progress (either running or paused) picks the target profile (if it's
3819  * already available), otherwise falls back to plain reducing.
3820  */
3821 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3822 {
3823         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3824         u64 target;
3825         u64 tmp;
3826
3827         /*
3828          * see if restripe for this chunk_type is in progress, if so
3829          * try to reduce to the target profile
3830          */
3831         spin_lock(&root->fs_info->balance_lock);
3832         target = get_restripe_target(root->fs_info, flags);
3833         if (target) {
3834                 /* pick target profile only if it's already available */
3835                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3836                         spin_unlock(&root->fs_info->balance_lock);
3837                         return extended_to_chunk(target);
3838                 }
3839         }
3840         spin_unlock(&root->fs_info->balance_lock);
3841
3842         /* First, mask out the RAID levels which aren't possible */
3843         if (num_devices == 1)
3844                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3845                            BTRFS_BLOCK_GROUP_RAID5);
3846         if (num_devices < 3)
3847                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3848         if (num_devices < 4)
3849                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3850
3851         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3852                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3853                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3854         flags &= ~tmp;
3855
3856         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3857                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3858         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3859                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3860         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3861                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3862         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3863                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3864         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3865                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3866
3867         return extended_to_chunk(flags | tmp);
3868 }
3869
3870 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3871 {
3872         unsigned seq;
3873         u64 flags;
3874
3875         do {
3876                 flags = orig_flags;
3877                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3878
3879                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3880                         flags |= root->fs_info->avail_data_alloc_bits;
3881                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3882                         flags |= root->fs_info->avail_system_alloc_bits;
3883                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3884                         flags |= root->fs_info->avail_metadata_alloc_bits;
3885         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3886
3887         return btrfs_reduce_alloc_profile(root, flags);
3888 }
3889
3890 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3891 {
3892         u64 flags;
3893         u64 ret;
3894
3895         if (data)
3896                 flags = BTRFS_BLOCK_GROUP_DATA;
3897         else if (root == root->fs_info->chunk_root)
3898                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3899         else
3900                 flags = BTRFS_BLOCK_GROUP_METADATA;
3901
3902         ret = get_alloc_profile(root, flags);
3903         return ret;
3904 }
3905
3906 /*
3907  * This will check the space that the inode allocates from to make sure we have
3908  * enough space for bytes.
3909  */
3910 int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
3911 {
3912         struct btrfs_space_info *data_sinfo;
3913         struct btrfs_root *root = BTRFS_I(inode)->root;
3914         struct btrfs_fs_info *fs_info = root->fs_info;
3915         u64 used;
3916         int ret = 0;
3917         int need_commit = 2;
3918         int have_pinned_space;
3919
3920         /* make sure bytes are sectorsize aligned */
3921         bytes = ALIGN(bytes, root->sectorsize);
3922
3923         if (btrfs_is_free_space_inode(inode)) {
3924                 need_commit = 0;
3925                 ASSERT(current->journal_info);
3926         }
3927
3928         data_sinfo = fs_info->data_sinfo;
3929         if (!data_sinfo)
3930                 goto alloc;
3931
3932 again:
3933         /* make sure we have enough space to handle the data first */
3934         spin_lock(&data_sinfo->lock);
3935         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3936                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3937                 data_sinfo->bytes_may_use;
3938
3939         if (used + bytes > data_sinfo->total_bytes) {
3940                 struct btrfs_trans_handle *trans;
3941
3942                 /*
3943                  * if we don't have enough free bytes in this space then we need
3944                  * to alloc a new chunk.
3945                  */
3946                 if (!data_sinfo->full) {
3947                         u64 alloc_target;
3948
3949                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3950                         spin_unlock(&data_sinfo->lock);
3951 alloc:
3952                         alloc_target = btrfs_get_alloc_profile(root, 1);
3953                         /*
3954                          * It is ugly that we don't call nolock join
3955                          * transaction for the free space inode case here.
3956                          * But it is safe because we only do the data space
3957                          * reservation for the free space cache in the
3958                          * transaction context, the common join transaction
3959                          * just increase the counter of the current transaction
3960                          * handler, doesn't try to acquire the trans_lock of
3961                          * the fs.
3962                          */
3963                         trans = btrfs_join_transaction(root);
3964                         if (IS_ERR(trans))
3965                                 return PTR_ERR(trans);
3966
3967                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3968                                              alloc_target,
3969                                              CHUNK_ALLOC_NO_FORCE);
3970                         btrfs_end_transaction(trans, root);
3971                         if (ret < 0) {
3972                                 if (ret != -ENOSPC)
3973                                         return ret;
3974                                 else {
3975                                         have_pinned_space = 1;
3976                                         goto commit_trans;
3977                                 }
3978                         }
3979
3980                         if (!data_sinfo)
3981                                 data_sinfo = fs_info->data_sinfo;
3982
3983                         goto again;
3984                 }
3985
3986                 /*
3987                  * If we don't have enough pinned space to deal with this
3988                  * allocation, and no removed chunk in current transaction,
3989                  * don't bother committing the transaction.
3990                  */
3991                 have_pinned_space = percpu_counter_compare(
3992                         &data_sinfo->total_bytes_pinned,
3993                         used + bytes - data_sinfo->total_bytes);
3994                 spin_unlock(&data_sinfo->lock);
3995
3996                 /* commit the current transaction and try again */
3997 commit_trans:
3998                 if (need_commit &&
3999                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
4000                         need_commit--;
4001
4002                         if (need_commit > 0)
4003                                 btrfs_wait_ordered_roots(fs_info, -1);
4004
4005                         trans = btrfs_join_transaction(root);
4006                         if (IS_ERR(trans))
4007                                 return PTR_ERR(trans);
4008                         if (have_pinned_space >= 0 ||
4009                             trans->transaction->have_free_bgs ||
4010                             need_commit > 0) {
4011                                 ret = btrfs_commit_transaction(trans, root);
4012                                 if (ret)
4013                                         return ret;
4014                                 /*
4015                                  * make sure that all running delayed iput are
4016                                  * done
4017                                  */
4018                                 down_write(&root->fs_info->delayed_iput_sem);
4019                                 up_write(&root->fs_info->delayed_iput_sem);
4020                                 goto again;
4021                         } else {
4022                                 btrfs_end_transaction(trans, root);
4023                         }
4024                 }
4025
4026                 trace_btrfs_space_reservation(root->fs_info,
4027                                               "space_info:enospc",
4028                                               data_sinfo->flags, bytes, 1);
4029                 return -ENOSPC;
4030         }
4031         ret = btrfs_qgroup_reserve(root, write_bytes);
4032         if (ret)
4033                 goto out;
4034         data_sinfo->bytes_may_use += bytes;
4035         trace_btrfs_space_reservation(root->fs_info, "space_info",
4036                                       data_sinfo->flags, bytes, 1);
4037 out:
4038         spin_unlock(&data_sinfo->lock);
4039
4040         return ret;
4041 }
4042
4043 /*
4044  * Called if we need to clear a data reservation for this inode.
4045  */
4046 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
4047 {
4048         struct btrfs_root *root = BTRFS_I(inode)->root;
4049         struct btrfs_space_info *data_sinfo;
4050
4051         /* make sure bytes are sectorsize aligned */
4052         bytes = ALIGN(bytes, root->sectorsize);
4053
4054         data_sinfo = root->fs_info->data_sinfo;
4055         spin_lock(&data_sinfo->lock);
4056         WARN_ON(data_sinfo->bytes_may_use < bytes);
4057         data_sinfo->bytes_may_use -= bytes;
4058         trace_btrfs_space_reservation(root->fs_info, "space_info",
4059                                       data_sinfo->flags, bytes, 0);
4060         spin_unlock(&data_sinfo->lock);
4061 }
4062
4063 static void force_metadata_allocation(struct btrfs_fs_info *info)
4064 {
4065         struct list_head *head = &info->space_info;
4066         struct btrfs_space_info *found;
4067
4068         rcu_read_lock();
4069         list_for_each_entry_rcu(found, head, list) {
4070                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4071                         found->force_alloc = CHUNK_ALLOC_FORCE;
4072         }
4073         rcu_read_unlock();
4074 }
4075
4076 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4077 {
4078         return (global->size << 1);
4079 }
4080
4081 static int should_alloc_chunk(struct btrfs_root *root,
4082                               struct btrfs_space_info *sinfo, int force)
4083 {
4084         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4085         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4086         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4087         u64 thresh;
4088
4089         if (force == CHUNK_ALLOC_FORCE)
4090                 return 1;
4091
4092         /*
4093          * We need to take into account the global rsv because for all intents
4094          * and purposes it's used space.  Don't worry about locking the
4095          * global_rsv, it doesn't change except when the transaction commits.
4096          */
4097         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4098                 num_allocated += calc_global_rsv_need_space(global_rsv);
4099
4100         /*
4101          * in limited mode, we want to have some free space up to
4102          * about 1% of the FS size.
4103          */
4104         if (force == CHUNK_ALLOC_LIMITED) {
4105                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4106                 thresh = max_t(u64, 64 * 1024 * 1024,
4107                                div_factor_fine(thresh, 1));
4108
4109                 if (num_bytes - num_allocated < thresh)
4110                         return 1;
4111         }
4112
4113         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
4114                 return 0;
4115         return 1;
4116 }
4117
4118 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4119 {
4120         u64 num_dev;
4121
4122         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4123                     BTRFS_BLOCK_GROUP_RAID0 |
4124                     BTRFS_BLOCK_GROUP_RAID5 |
4125                     BTRFS_BLOCK_GROUP_RAID6))
4126                 num_dev = root->fs_info->fs_devices->rw_devices;
4127         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4128                 num_dev = 2;
4129         else
4130                 num_dev = 1;    /* DUP or single */
4131
4132         return num_dev;
4133 }
4134
4135 /*
4136  * If @is_allocation is true, reserve space in the system space info necessary
4137  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4138  * removing a chunk.
4139  */
4140 void check_system_chunk(struct btrfs_trans_handle *trans,
4141                         struct btrfs_root *root,
4142                         u64 type)
4143 {
4144         struct btrfs_space_info *info;
4145         u64 left;
4146         u64 thresh;
4147         int ret = 0;
4148         u64 num_devs;
4149
4150         /*
4151          * Needed because we can end up allocating a system chunk and for an
4152          * atomic and race free space reservation in the chunk block reserve.
4153          */
4154         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4155
4156         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4157         spin_lock(&info->lock);
4158         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4159                 info->bytes_reserved - info->bytes_readonly -
4160                 info->bytes_may_use;
4161         spin_unlock(&info->lock);
4162
4163         num_devs = get_profile_num_devs(root, type);
4164
4165         /* num_devs device items to update and 1 chunk item to add or remove */
4166         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4167                 btrfs_calc_trans_metadata_size(root, 1);
4168
4169         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4170                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4171                         left, thresh, type);
4172                 dump_space_info(info, 0, 0);
4173         }
4174
4175         if (left < thresh) {
4176                 u64 flags;
4177
4178                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4179                 /*
4180                  * Ignore failure to create system chunk. We might end up not
4181                  * needing it, as we might not need to COW all nodes/leafs from
4182                  * the paths we visit in the chunk tree (they were already COWed
4183                  * or created in the current transaction for example).
4184                  */
4185                 ret = btrfs_alloc_chunk(trans, root, flags);
4186         }
4187
4188         if (!ret) {
4189                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4190                                           &root->fs_info->chunk_block_rsv,
4191                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4192                 if (!ret)
4193                         trans->chunk_bytes_reserved += thresh;
4194         }
4195 }
4196
4197 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4198                           struct btrfs_root *extent_root, u64 flags, int force)
4199 {
4200         struct btrfs_space_info *space_info;
4201         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4202         int wait_for_alloc = 0;
4203         int ret = 0;
4204
4205         /* Don't re-enter if we're already allocating a chunk */
4206         if (trans->allocating_chunk)
4207                 return -ENOSPC;
4208
4209         space_info = __find_space_info(extent_root->fs_info, flags);
4210         if (!space_info) {
4211                 ret = update_space_info(extent_root->fs_info, flags,
4212                                         0, 0, &space_info);
4213                 BUG_ON(ret); /* -ENOMEM */
4214         }
4215         BUG_ON(!space_info); /* Logic error */
4216
4217 again:
4218         spin_lock(&space_info->lock);
4219         if (force < space_info->force_alloc)
4220                 force = space_info->force_alloc;
4221         if (space_info->full) {
4222                 if (should_alloc_chunk(extent_root, space_info, force))
4223                         ret = -ENOSPC;
4224                 else
4225                         ret = 0;
4226                 spin_unlock(&space_info->lock);
4227                 return ret;
4228         }
4229
4230         if (!should_alloc_chunk(extent_root, space_info, force)) {
4231                 spin_unlock(&space_info->lock);
4232                 return 0;
4233         } else if (space_info->chunk_alloc) {
4234                 wait_for_alloc = 1;
4235         } else {
4236                 space_info->chunk_alloc = 1;
4237         }
4238
4239         spin_unlock(&space_info->lock);
4240
4241         mutex_lock(&fs_info->chunk_mutex);
4242
4243         /*
4244          * The chunk_mutex is held throughout the entirety of a chunk
4245          * allocation, so once we've acquired the chunk_mutex we know that the
4246          * other guy is done and we need to recheck and see if we should
4247          * allocate.
4248          */
4249         if (wait_for_alloc) {
4250                 mutex_unlock(&fs_info->chunk_mutex);
4251                 wait_for_alloc = 0;
4252                 goto again;
4253         }
4254
4255         trans->allocating_chunk = true;
4256
4257         /*
4258          * If we have mixed data/metadata chunks we want to make sure we keep
4259          * allocating mixed chunks instead of individual chunks.
4260          */
4261         if (btrfs_mixed_space_info(space_info))
4262                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4263
4264         /*
4265          * if we're doing a data chunk, go ahead and make sure that
4266          * we keep a reasonable number of metadata chunks allocated in the
4267          * FS as well.
4268          */
4269         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4270                 fs_info->data_chunk_allocations++;
4271                 if (!(fs_info->data_chunk_allocations %
4272                       fs_info->metadata_ratio))
4273                         force_metadata_allocation(fs_info);
4274         }
4275
4276         /*
4277          * Check if we have enough space in SYSTEM chunk because we may need
4278          * to update devices.
4279          */
4280         check_system_chunk(trans, extent_root, flags);
4281
4282         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4283         trans->allocating_chunk = false;
4284
4285         spin_lock(&space_info->lock);
4286         if (ret < 0 && ret != -ENOSPC)
4287                 goto out;
4288         if (ret)
4289                 space_info->full = 1;
4290         else
4291                 ret = 1;
4292
4293         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4294 out:
4295         space_info->chunk_alloc = 0;
4296         spin_unlock(&space_info->lock);
4297         mutex_unlock(&fs_info->chunk_mutex);
4298         /*
4299          * When we allocate a new chunk we reserve space in the chunk block
4300          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4301          * add new nodes/leafs to it if we end up needing to do it when
4302          * inserting the chunk item and updating device items as part of the
4303          * second phase of chunk allocation, performed by
4304          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4305          * large number of new block groups to create in our transaction
4306          * handle's new_bgs list to avoid exhausting the chunk block reserve
4307          * in extreme cases - like having a single transaction create many new
4308          * block groups when starting to write out the free space caches of all
4309          * the block groups that were made dirty during the lifetime of the
4310          * transaction.
4311          */
4312         if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4313                 btrfs_create_pending_block_groups(trans, trans->root);
4314                 btrfs_trans_release_chunk_metadata(trans);
4315         }
4316         return ret;
4317 }
4318
4319 static int can_overcommit(struct btrfs_root *root,
4320                           struct btrfs_space_info *space_info, u64 bytes,
4321                           enum btrfs_reserve_flush_enum flush)
4322 {
4323         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4324         u64 profile = btrfs_get_alloc_profile(root, 0);
4325         u64 space_size;
4326         u64 avail;
4327         u64 used;
4328
4329         used = space_info->bytes_used + space_info->bytes_reserved +
4330                 space_info->bytes_pinned + space_info->bytes_readonly;
4331
4332         /*
4333          * We only want to allow over committing if we have lots of actual space
4334          * free, but if we don't have enough space to handle the global reserve
4335          * space then we could end up having a real enospc problem when trying
4336          * to allocate a chunk or some other such important allocation.
4337          */
4338         spin_lock(&global_rsv->lock);
4339         space_size = calc_global_rsv_need_space(global_rsv);
4340         spin_unlock(&global_rsv->lock);
4341         if (used + space_size >= space_info->total_bytes)
4342                 return 0;
4343
4344         used += space_info->bytes_may_use;
4345
4346         spin_lock(&root->fs_info->free_chunk_lock);
4347         avail = root->fs_info->free_chunk_space;
4348         spin_unlock(&root->fs_info->free_chunk_lock);
4349
4350         /*
4351          * If we have dup, raid1 or raid10 then only half of the free
4352          * space is actually useable.  For raid56, the space info used
4353          * doesn't include the parity drive, so we don't have to
4354          * change the math
4355          */
4356         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4357                        BTRFS_BLOCK_GROUP_RAID1 |
4358                        BTRFS_BLOCK_GROUP_RAID10))
4359                 avail >>= 1;
4360
4361         /*
4362          * If we aren't flushing all things, let us overcommit up to
4363          * 1/2th of the space. If we can flush, don't let us overcommit
4364          * too much, let it overcommit up to 1/8 of the space.
4365          */
4366         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4367                 avail >>= 3;
4368         else
4369                 avail >>= 1;
4370
4371         if (used + bytes < space_info->total_bytes + avail)
4372                 return 1;
4373         return 0;
4374 }
4375
4376 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4377                                          unsigned long nr_pages, int nr_items)
4378 {
4379         struct super_block *sb = root->fs_info->sb;
4380
4381         if (down_read_trylock(&sb->s_umount)) {
4382                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4383                 up_read(&sb->s_umount);
4384         } else {
4385                 /*
4386                  * We needn't worry the filesystem going from r/w to r/o though
4387                  * we don't acquire ->s_umount mutex, because the filesystem
4388                  * should guarantee the delalloc inodes list be empty after
4389                  * the filesystem is readonly(all dirty pages are written to
4390                  * the disk).
4391                  */
4392                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4393                 if (!current->journal_info)
4394                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4395         }
4396 }
4397
4398 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4399 {
4400         u64 bytes;
4401         int nr;
4402
4403         bytes = btrfs_calc_trans_metadata_size(root, 1);
4404         nr = (int)div64_u64(to_reclaim, bytes);
4405         if (!nr)
4406                 nr = 1;
4407         return nr;
4408 }
4409
4410 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4411
4412 /*
4413  * shrink metadata reservation for delalloc
4414  */
4415 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4416                             bool wait_ordered)
4417 {
4418         struct btrfs_block_rsv *block_rsv;
4419         struct btrfs_space_info *space_info;
4420         struct btrfs_trans_handle *trans;
4421         u64 delalloc_bytes;
4422         u64 max_reclaim;
4423         long time_left;
4424         unsigned long nr_pages;
4425         int loops;
4426         int items;
4427         enum btrfs_reserve_flush_enum flush;
4428
4429         /* Calc the number of the pages we need flush for space reservation */
4430         items = calc_reclaim_items_nr(root, to_reclaim);
4431         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4432
4433         trans = (struct btrfs_trans_handle *)current->journal_info;
4434         block_rsv = &root->fs_info->delalloc_block_rsv;
4435         space_info = block_rsv->space_info;
4436
4437         delalloc_bytes = percpu_counter_sum_positive(
4438                                                 &root->fs_info->delalloc_bytes);
4439         if (delalloc_bytes == 0) {
4440                 if (trans)
4441                         return;
4442                 if (wait_ordered)
4443                         btrfs_wait_ordered_roots(root->fs_info, items);
4444                 return;
4445         }
4446
4447         loops = 0;
4448         while (delalloc_bytes && loops < 3) {
4449                 max_reclaim = min(delalloc_bytes, to_reclaim);
4450                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4451                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4452                 /*
4453                  * We need to wait for the async pages to actually start before
4454                  * we do anything.
4455                  */
4456                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4457                 if (!max_reclaim)
4458                         goto skip_async;
4459
4460                 if (max_reclaim <= nr_pages)
4461                         max_reclaim = 0;
4462                 else
4463                         max_reclaim -= nr_pages;
4464
4465                 wait_event(root->fs_info->async_submit_wait,
4466                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4467                            (int)max_reclaim);
4468 skip_async:
4469                 if (!trans)
4470                         flush = BTRFS_RESERVE_FLUSH_ALL;
4471                 else
4472                         flush = BTRFS_RESERVE_NO_FLUSH;
4473                 spin_lock(&space_info->lock);
4474                 if (can_overcommit(root, space_info, orig, flush)) {
4475                         spin_unlock(&space_info->lock);
4476                         break;
4477                 }
4478                 spin_unlock(&space_info->lock);
4479
4480                 loops++;
4481                 if (wait_ordered && !trans) {
4482                         btrfs_wait_ordered_roots(root->fs_info, items);
4483                 } else {
4484                         time_left = schedule_timeout_killable(1);
4485                         if (time_left)
4486                                 break;
4487                 }
4488                 delalloc_bytes = percpu_counter_sum_positive(
4489                                                 &root->fs_info->delalloc_bytes);
4490         }
4491 }
4492
4493 /**
4494  * maybe_commit_transaction - possibly commit the transaction if its ok to
4495  * @root - the root we're allocating for
4496  * @bytes - the number of bytes we want to reserve
4497  * @force - force the commit
4498  *
4499  * This will check to make sure that committing the transaction will actually
4500  * get us somewhere and then commit the transaction if it does.  Otherwise it
4501  * will return -ENOSPC.
4502  */
4503 static int may_commit_transaction(struct btrfs_root *root,
4504                                   struct btrfs_space_info *space_info,
4505                                   u64 bytes, int force)
4506 {
4507         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4508         struct btrfs_trans_handle *trans;
4509
4510         trans = (struct btrfs_trans_handle *)current->journal_info;
4511         if (trans)
4512                 return -EAGAIN;
4513
4514         if (force)
4515                 goto commit;
4516
4517         /* See if there is enough pinned space to make this reservation */
4518         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4519                                    bytes) >= 0)
4520                 goto commit;
4521
4522         /*
4523          * See if there is some space in the delayed insertion reservation for
4524          * this reservation.
4525          */
4526         if (space_info != delayed_rsv->space_info)
4527                 return -ENOSPC;
4528
4529         spin_lock(&delayed_rsv->lock);
4530         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4531                                    bytes - delayed_rsv->size) >= 0) {
4532                 spin_unlock(&delayed_rsv->lock);
4533                 return -ENOSPC;
4534         }
4535         spin_unlock(&delayed_rsv->lock);
4536
4537 commit:
4538         trans = btrfs_join_transaction(root);
4539         if (IS_ERR(trans))
4540                 return -ENOSPC;
4541
4542         return btrfs_commit_transaction(trans, root);
4543 }
4544
4545 enum flush_state {
4546         FLUSH_DELAYED_ITEMS_NR  =       1,
4547         FLUSH_DELAYED_ITEMS     =       2,
4548         FLUSH_DELALLOC          =       3,
4549         FLUSH_DELALLOC_WAIT     =       4,
4550         ALLOC_CHUNK             =       5,
4551         COMMIT_TRANS            =       6,
4552 };
4553
4554 static int flush_space(struct btrfs_root *root,
4555                        struct btrfs_space_info *space_info, u64 num_bytes,
4556                        u64 orig_bytes, int state)
4557 {
4558         struct btrfs_trans_handle *trans;
4559         int nr;
4560         int ret = 0;
4561
4562         switch (state) {
4563         case FLUSH_DELAYED_ITEMS_NR:
4564         case FLUSH_DELAYED_ITEMS:
4565                 if (state == FLUSH_DELAYED_ITEMS_NR)
4566                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4567                 else
4568                         nr = -1;
4569
4570                 trans = btrfs_join_transaction(root);
4571                 if (IS_ERR(trans)) {
4572                         ret = PTR_ERR(trans);
4573                         break;
4574                 }
4575                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4576                 btrfs_end_transaction(trans, root);
4577                 break;
4578         case FLUSH_DELALLOC:
4579         case FLUSH_DELALLOC_WAIT:
4580                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4581                                 state == FLUSH_DELALLOC_WAIT);
4582                 break;
4583         case ALLOC_CHUNK:
4584                 trans = btrfs_join_transaction(root);
4585                 if (IS_ERR(trans)) {
4586                         ret = PTR_ERR(trans);
4587                         break;
4588                 }
4589                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4590                                      btrfs_get_alloc_profile(root, 0),
4591                                      CHUNK_ALLOC_NO_FORCE);
4592                 btrfs_end_transaction(trans, root);
4593                 if (ret == -ENOSPC)
4594                         ret = 0;
4595                 break;
4596         case COMMIT_TRANS:
4597                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4598                 break;
4599         default:
4600                 ret = -ENOSPC;
4601                 break;
4602         }
4603
4604         return ret;
4605 }
4606
4607 static inline u64
4608 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4609                                  struct btrfs_space_info *space_info)
4610 {
4611         u64 used;
4612         u64 expected;
4613         u64 to_reclaim;
4614
4615         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4616                                 16 * 1024 * 1024);
4617         spin_lock(&space_info->lock);
4618         if (can_overcommit(root, space_info, to_reclaim,
4619                            BTRFS_RESERVE_FLUSH_ALL)) {
4620                 to_reclaim = 0;
4621                 goto out;
4622         }
4623
4624         used = space_info->bytes_used + space_info->bytes_reserved +
4625                space_info->bytes_pinned + space_info->bytes_readonly +
4626                space_info->bytes_may_use;
4627         if (can_overcommit(root, space_info, 1024 * 1024,
4628                            BTRFS_RESERVE_FLUSH_ALL))
4629                 expected = div_factor_fine(space_info->total_bytes, 95);
4630         else
4631                 expected = div_factor_fine(space_info->total_bytes, 90);
4632
4633         if (used > expected)
4634                 to_reclaim = used - expected;
4635         else
4636                 to_reclaim = 0;
4637         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4638                                      space_info->bytes_reserved);
4639 out:
4640         spin_unlock(&space_info->lock);
4641
4642         return to_reclaim;
4643 }
4644
4645 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4646                                         struct btrfs_fs_info *fs_info, u64 used)
4647 {
4648         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4649
4650         /* If we're just plain full then async reclaim just slows us down. */
4651         if (space_info->bytes_used >= thresh)
4652                 return 0;
4653
4654         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4655                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4656 }
4657
4658 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4659                                        struct btrfs_fs_info *fs_info,
4660                                        int flush_state)
4661 {
4662         u64 used;
4663
4664         spin_lock(&space_info->lock);
4665         /*
4666          * We run out of space and have not got any free space via flush_space,
4667          * so don't bother doing async reclaim.
4668          */
4669         if (flush_state > COMMIT_TRANS && space_info->full) {
4670                 spin_unlock(&space_info->lock);
4671                 return 0;
4672         }
4673
4674         used = space_info->bytes_used + space_info->bytes_reserved +
4675                space_info->bytes_pinned + space_info->bytes_readonly +
4676                space_info->bytes_may_use;
4677         if (need_do_async_reclaim(space_info, fs_info, used)) {
4678                 spin_unlock(&space_info->lock);
4679                 return 1;
4680         }
4681         spin_unlock(&space_info->lock);
4682
4683         return 0;
4684 }
4685
4686 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4687 {
4688         struct btrfs_fs_info *fs_info;
4689         struct btrfs_space_info *space_info;
4690         u64 to_reclaim;
4691         int flush_state;
4692
4693         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4694         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4695
4696         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4697                                                       space_info);
4698         if (!to_reclaim)
4699                 return;
4700
4701         flush_state = FLUSH_DELAYED_ITEMS_NR;
4702         do {
4703                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4704                             to_reclaim, flush_state);
4705                 flush_state++;
4706                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4707                                                  flush_state))
4708                         return;
4709         } while (flush_state < COMMIT_TRANS);
4710 }
4711
4712 void btrfs_init_async_reclaim_work(struct work_struct *work)
4713 {
4714         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4715 }
4716
4717 /**
4718  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4719  * @root - the root we're allocating for
4720  * @block_rsv - the block_rsv we're allocating for
4721  * @orig_bytes - the number of bytes we want
4722  * @flush - whether or not we can flush to make our reservation
4723  *
4724  * This will reserve orgi_bytes number of bytes from the space info associated
4725  * with the block_rsv.  If there is not enough space it will make an attempt to
4726  * flush out space to make room.  It will do this by flushing delalloc if
4727  * possible or committing the transaction.  If flush is 0 then no attempts to
4728  * regain reservations will be made and this will fail if there is not enough
4729  * space already.
4730  */
4731 static int reserve_metadata_bytes(struct btrfs_root *root,
4732                                   struct btrfs_block_rsv *block_rsv,
4733                                   u64 orig_bytes,
4734                                   enum btrfs_reserve_flush_enum flush)
4735 {
4736         struct btrfs_space_info *space_info = block_rsv->space_info;
4737         u64 used;
4738         u64 num_bytes = orig_bytes;
4739         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4740         int ret = 0;
4741         bool flushing = false;
4742
4743 again:
4744         ret = 0;
4745         spin_lock(&space_info->lock);
4746         /*
4747          * We only want to wait if somebody other than us is flushing and we
4748          * are actually allowed to flush all things.
4749          */
4750         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4751                space_info->flush) {
4752                 spin_unlock(&space_info->lock);
4753                 /*
4754                  * If we have a trans handle we can't wait because the flusher
4755                  * may have to commit the transaction, which would mean we would
4756                  * deadlock since we are waiting for the flusher to finish, but
4757                  * hold the current transaction open.
4758                  */
4759                 if (current->journal_info)
4760                         return -EAGAIN;
4761                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4762                 /* Must have been killed, return */
4763                 if (ret)
4764                         return -EINTR;
4765
4766                 spin_lock(&space_info->lock);
4767         }
4768
4769         ret = -ENOSPC;
4770         used = space_info->bytes_used + space_info->bytes_reserved +
4771                 space_info->bytes_pinned + space_info->bytes_readonly +
4772                 space_info->bytes_may_use;
4773
4774         /*
4775          * The idea here is that we've not already over-reserved the block group
4776          * then we can go ahead and save our reservation first and then start
4777          * flushing if we need to.  Otherwise if we've already overcommitted
4778          * lets start flushing stuff first and then come back and try to make
4779          * our reservation.
4780          */
4781         if (used <= space_info->total_bytes) {
4782                 if (used + orig_bytes <= space_info->total_bytes) {
4783                         space_info->bytes_may_use += orig_bytes;
4784                         trace_btrfs_space_reservation(root->fs_info,
4785                                 "space_info", space_info->flags, orig_bytes, 1);
4786                         ret = 0;
4787                 } else {
4788                         /*
4789                          * Ok set num_bytes to orig_bytes since we aren't
4790                          * overocmmitted, this way we only try and reclaim what
4791                          * we need.
4792                          */
4793                         num_bytes = orig_bytes;
4794                 }
4795         } else {
4796                 /*
4797                  * Ok we're over committed, set num_bytes to the overcommitted
4798                  * amount plus the amount of bytes that we need for this
4799                  * reservation.
4800                  */
4801                 num_bytes = used - space_info->total_bytes +
4802                         (orig_bytes * 2);
4803         }
4804
4805         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4806                 space_info->bytes_may_use += orig_bytes;
4807                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4808                                               space_info->flags, orig_bytes,
4809                                               1);
4810                 ret = 0;
4811         }
4812
4813         /*
4814          * Couldn't make our reservation, save our place so while we're trying
4815          * to reclaim space we can actually use it instead of somebody else
4816          * stealing it from us.
4817          *
4818          * We make the other tasks wait for the flush only when we can flush
4819          * all things.
4820          */
4821         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4822                 flushing = true;
4823                 space_info->flush = 1;
4824         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4825                 used += orig_bytes;
4826                 /*
4827                  * We will do the space reservation dance during log replay,
4828                  * which means we won't have fs_info->fs_root set, so don't do
4829                  * the async reclaim as we will panic.
4830                  */
4831                 if (!root->fs_info->log_root_recovering &&
4832                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4833                     !work_busy(&root->fs_info->async_reclaim_work))
4834                         queue_work(system_unbound_wq,
4835                                    &root->fs_info->async_reclaim_work);
4836         }
4837         spin_unlock(&space_info->lock);
4838
4839         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4840                 goto out;
4841
4842         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4843                           flush_state);
4844         flush_state++;
4845
4846         /*
4847          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4848          * would happen. So skip delalloc flush.
4849          */
4850         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4851             (flush_state == FLUSH_DELALLOC ||
4852              flush_state == FLUSH_DELALLOC_WAIT))
4853                 flush_state = ALLOC_CHUNK;
4854
4855         if (!ret)
4856                 goto again;
4857         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4858                  flush_state < COMMIT_TRANS)
4859                 goto again;
4860         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4861                  flush_state <= COMMIT_TRANS)
4862                 goto again;
4863
4864 out:
4865         if (ret == -ENOSPC &&
4866             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4867                 struct btrfs_block_rsv *global_rsv =
4868                         &root->fs_info->global_block_rsv;
4869
4870                 if (block_rsv != global_rsv &&
4871                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4872                         ret = 0;
4873         }
4874         if (ret == -ENOSPC)
4875                 trace_btrfs_space_reservation(root->fs_info,
4876                                               "space_info:enospc",
4877                                               space_info->flags, orig_bytes, 1);
4878         if (flushing) {
4879                 spin_lock(&space_info->lock);
4880                 space_info->flush = 0;
4881                 wake_up_all(&space_info->wait);
4882                 spin_unlock(&space_info->lock);
4883         }
4884         return ret;
4885 }
4886
4887 static struct btrfs_block_rsv *get_block_rsv(
4888                                         const struct btrfs_trans_handle *trans,
4889                                         const struct btrfs_root *root)
4890 {
4891         struct btrfs_block_rsv *block_rsv = NULL;
4892
4893         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4894                 block_rsv = trans->block_rsv;
4895
4896         if (root == root->fs_info->csum_root && trans->adding_csums)
4897                 block_rsv = trans->block_rsv;
4898
4899         if (root == root->fs_info->uuid_root)
4900                 block_rsv = trans->block_rsv;
4901
4902         if (!block_rsv)
4903                 block_rsv = root->block_rsv;
4904
4905         if (!block_rsv)
4906                 block_rsv = &root->fs_info->empty_block_rsv;
4907
4908         return block_rsv;
4909 }
4910
4911 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4912                                u64 num_bytes)
4913 {
4914         int ret = -ENOSPC;
4915         spin_lock(&block_rsv->lock);
4916         if (block_rsv->reserved >= num_bytes) {
4917                 block_rsv->reserved -= num_bytes;
4918                 if (block_rsv->reserved < block_rsv->size)
4919                         block_rsv->full = 0;
4920                 ret = 0;
4921         }
4922         spin_unlock(&block_rsv->lock);
4923         return ret;
4924 }
4925
4926 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4927                                 u64 num_bytes, int update_size)
4928 {
4929         spin_lock(&block_rsv->lock);
4930         block_rsv->reserved += num_bytes;
4931         if (update_size)
4932                 block_rsv->size += num_bytes;
4933         else if (block_rsv->reserved >= block_rsv->size)
4934                 block_rsv->full = 1;
4935         spin_unlock(&block_rsv->lock);
4936 }
4937
4938 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4939                              struct btrfs_block_rsv *dest, u64 num_bytes,
4940                              int min_factor)
4941 {
4942         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4943         u64 min_bytes;
4944
4945         if (global_rsv->space_info != dest->space_info)
4946                 return -ENOSPC;
4947
4948         spin_lock(&global_rsv->lock);
4949         min_bytes = div_factor(global_rsv->size, min_factor);
4950         if (global_rsv->reserved < min_bytes + num_bytes) {
4951                 spin_unlock(&global_rsv->lock);
4952                 return -ENOSPC;
4953         }
4954         global_rsv->reserved -= num_bytes;
4955         if (global_rsv->reserved < global_rsv->size)
4956                 global_rsv->full = 0;
4957         spin_unlock(&global_rsv->lock);
4958
4959         block_rsv_add_bytes(dest, num_bytes, 1);
4960         return 0;
4961 }
4962
4963 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4964                                     struct btrfs_block_rsv *block_rsv,
4965                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4966 {
4967         struct btrfs_space_info *space_info = block_rsv->space_info;
4968
4969         spin_lock(&block_rsv->lock);
4970         if (num_bytes == (u64)-1)
4971                 num_bytes = block_rsv->size;
4972         block_rsv->size -= num_bytes;
4973         if (block_rsv->reserved >= block_rsv->size) {
4974                 num_bytes = block_rsv->reserved - block_rsv->size;
4975                 block_rsv->reserved = block_rsv->size;
4976                 block_rsv->full = 1;
4977         } else {
4978                 num_bytes = 0;
4979         }
4980         spin_unlock(&block_rsv->lock);
4981
4982         if (num_bytes > 0) {
4983                 if (dest) {
4984                         spin_lock(&dest->lock);
4985                         if (!dest->full) {
4986                                 u64 bytes_to_add;
4987
4988                                 bytes_to_add = dest->size - dest->reserved;
4989                                 bytes_to_add = min(num_bytes, bytes_to_add);
4990                                 dest->reserved += bytes_to_add;
4991                                 if (dest->reserved >= dest->size)
4992                                         dest->full = 1;
4993                                 num_bytes -= bytes_to_add;
4994                         }
4995                         spin_unlock(&dest->lock);
4996                 }
4997                 if (num_bytes) {
4998                         spin_lock(&space_info->lock);
4999                         space_info->bytes_may_use -= num_bytes;
5000                         trace_btrfs_space_reservation(fs_info, "space_info",
5001                                         space_info->flags, num_bytes, 0);
5002                         spin_unlock(&space_info->lock);
5003                 }
5004         }
5005 }
5006
5007 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
5008                                    struct btrfs_block_rsv *dst, u64 num_bytes)
5009 {
5010         int ret;
5011
5012         ret = block_rsv_use_bytes(src, num_bytes);
5013         if (ret)
5014                 return ret;
5015
5016         block_rsv_add_bytes(dst, num_bytes, 1);
5017         return 0;
5018 }
5019
5020 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5021 {
5022         memset(rsv, 0, sizeof(*rsv));
5023         spin_lock_init(&rsv->lock);
5024         rsv->type = type;
5025 }
5026
5027 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5028                                               unsigned short type)
5029 {
5030         struct btrfs_block_rsv *block_rsv;
5031         struct btrfs_fs_info *fs_info = root->fs_info;
5032
5033         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5034         if (!block_rsv)
5035                 return NULL;
5036
5037         btrfs_init_block_rsv(block_rsv, type);
5038         block_rsv->space_info = __find_space_info(fs_info,
5039                                                   BTRFS_BLOCK_GROUP_METADATA);
5040         return block_rsv;
5041 }
5042
5043 void btrfs_free_block_rsv(struct btrfs_root *root,
5044                           struct btrfs_block_rsv *rsv)
5045 {
5046         if (!rsv)
5047                 return;
5048         btrfs_block_rsv_release(root, rsv, (u64)-1);
5049         kfree(rsv);
5050 }
5051
5052 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5053 {
5054         kfree(rsv);
5055 }
5056
5057 int btrfs_block_rsv_add(struct btrfs_root *root,
5058                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5059                         enum btrfs_reserve_flush_enum flush)
5060 {
5061         int ret;
5062
5063         if (num_bytes == 0)
5064                 return 0;
5065
5066         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5067         if (!ret) {
5068                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5069                 return 0;
5070         }
5071
5072         return ret;
5073 }
5074
5075 int btrfs_block_rsv_check(struct btrfs_root *root,
5076                           struct btrfs_block_rsv *block_rsv, int min_factor)
5077 {
5078         u64 num_bytes = 0;
5079         int ret = -ENOSPC;
5080
5081         if (!block_rsv)
5082                 return 0;
5083
5084         spin_lock(&block_rsv->lock);
5085         num_bytes = div_factor(block_rsv->size, min_factor);
5086         if (block_rsv->reserved >= num_bytes)
5087                 ret = 0;
5088         spin_unlock(&block_rsv->lock);
5089
5090         return ret;
5091 }
5092
5093 int btrfs_block_rsv_refill(struct btrfs_root *root,
5094                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5095                            enum btrfs_reserve_flush_enum flush)
5096 {
5097         u64 num_bytes = 0;
5098         int ret = -ENOSPC;
5099
5100         if (!block_rsv)
5101                 return 0;
5102
5103         spin_lock(&block_rsv->lock);
5104         num_bytes = min_reserved;
5105         if (block_rsv->reserved >= num_bytes)
5106                 ret = 0;
5107         else
5108                 num_bytes -= block_rsv->reserved;
5109         spin_unlock(&block_rsv->lock);
5110
5111         if (!ret)
5112                 return 0;
5113
5114         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5115         if (!ret) {
5116                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5117                 return 0;
5118         }
5119
5120         return ret;
5121 }
5122
5123 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5124                             struct btrfs_block_rsv *dst_rsv,
5125                             u64 num_bytes)
5126 {
5127         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5128 }
5129
5130 void btrfs_block_rsv_release(struct btrfs_root *root,
5131                              struct btrfs_block_rsv *block_rsv,
5132                              u64 num_bytes)
5133 {
5134         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5135         if (global_rsv == block_rsv ||
5136             block_rsv->space_info != global_rsv->space_info)
5137                 global_rsv = NULL;
5138         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5139                                 num_bytes);
5140 }
5141
5142 /*
5143  * helper to calculate size of global block reservation.
5144  * the desired value is sum of space used by extent tree,
5145  * checksum tree and root tree
5146  */
5147 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5148 {
5149         struct btrfs_space_info *sinfo;
5150         u64 num_bytes;
5151         u64 meta_used;
5152         u64 data_used;
5153         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5154
5155         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5156         spin_lock(&sinfo->lock);
5157         data_used = sinfo->bytes_used;
5158         spin_unlock(&sinfo->lock);
5159
5160         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5161         spin_lock(&sinfo->lock);
5162         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5163                 data_used = 0;
5164         meta_used = sinfo->bytes_used;
5165         spin_unlock(&sinfo->lock);
5166
5167         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5168                     csum_size * 2;
5169         num_bytes += div_u64(data_used + meta_used, 50);
5170
5171         if (num_bytes * 3 > meta_used)
5172                 num_bytes = div_u64(meta_used, 3);
5173
5174         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5175 }
5176
5177 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5178 {
5179         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5180         struct btrfs_space_info *sinfo = block_rsv->space_info;
5181         u64 num_bytes;
5182
5183         num_bytes = calc_global_metadata_size(fs_info);
5184
5185         spin_lock(&sinfo->lock);
5186         spin_lock(&block_rsv->lock);
5187
5188         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
5189
5190         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5191                     sinfo->bytes_reserved + sinfo->bytes_readonly +
5192                     sinfo->bytes_may_use;
5193
5194         if (sinfo->total_bytes > num_bytes) {
5195                 num_bytes = sinfo->total_bytes - num_bytes;
5196                 block_rsv->reserved += num_bytes;
5197                 sinfo->bytes_may_use += num_bytes;
5198                 trace_btrfs_space_reservation(fs_info, "space_info",
5199                                       sinfo->flags, num_bytes, 1);
5200         }
5201
5202         if (block_rsv->reserved >= block_rsv->size) {
5203                 num_bytes = block_rsv->reserved - block_rsv->size;
5204                 sinfo->bytes_may_use -= num_bytes;
5205                 trace_btrfs_space_reservation(fs_info, "space_info",
5206                                       sinfo->flags, num_bytes, 0);
5207                 block_rsv->reserved = block_rsv->size;
5208                 block_rsv->full = 1;
5209         }
5210
5211         spin_unlock(&block_rsv->lock);
5212         spin_unlock(&sinfo->lock);
5213 }
5214
5215 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5216 {
5217         struct btrfs_space_info *space_info;
5218
5219         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5220         fs_info->chunk_block_rsv.space_info = space_info;
5221
5222         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5223         fs_info->global_block_rsv.space_info = space_info;
5224         fs_info->delalloc_block_rsv.space_info = space_info;
5225         fs_info->trans_block_rsv.space_info = space_info;
5226         fs_info->empty_block_rsv.space_info = space_info;
5227         fs_info->delayed_block_rsv.space_info = space_info;
5228
5229         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5230         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5231         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5232         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5233         if (fs_info->quota_root)
5234                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5235         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5236
5237         update_global_block_rsv(fs_info);
5238 }
5239
5240 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5241 {
5242         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5243                                 (u64)-1);
5244         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5245         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5246         WARN_ON(fs_info->trans_block_rsv.size > 0);
5247         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5248         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5249         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5250         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5251         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5252 }
5253
5254 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5255                                   struct btrfs_root *root)
5256 {
5257         if (!trans->block_rsv)
5258                 return;
5259
5260         if (!trans->bytes_reserved)
5261                 return;
5262
5263         trace_btrfs_space_reservation(root->fs_info, "transaction",
5264                                       trans->transid, trans->bytes_reserved, 0);
5265         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5266         trans->bytes_reserved = 0;
5267 }
5268
5269 /*
5270  * To be called after all the new block groups attached to the transaction
5271  * handle have been created (btrfs_create_pending_block_groups()).
5272  */
5273 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5274 {
5275         struct btrfs_fs_info *fs_info = trans->root->fs_info;
5276
5277         if (!trans->chunk_bytes_reserved)
5278                 return;
5279
5280         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5281
5282         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5283                                 trans->chunk_bytes_reserved);
5284         trans->chunk_bytes_reserved = 0;
5285 }
5286
5287 /* Can only return 0 or -ENOSPC */
5288 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5289                                   struct inode *inode)
5290 {
5291         struct btrfs_root *root = BTRFS_I(inode)->root;
5292         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5293         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5294
5295         /*
5296          * We need to hold space in order to delete our orphan item once we've
5297          * added it, so this takes the reservation so we can release it later
5298          * when we are truly done with the orphan item.
5299          */
5300         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5301         trace_btrfs_space_reservation(root->fs_info, "orphan",
5302                                       btrfs_ino(inode), num_bytes, 1);
5303         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5304 }
5305
5306 void btrfs_orphan_release_metadata(struct inode *inode)
5307 {
5308         struct btrfs_root *root = BTRFS_I(inode)->root;
5309         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5310         trace_btrfs_space_reservation(root->fs_info, "orphan",
5311                                       btrfs_ino(inode), num_bytes, 0);
5312         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5313 }
5314
5315 /*
5316  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5317  * root: the root of the parent directory
5318  * rsv: block reservation
5319  * items: the number of items that we need do reservation
5320  * qgroup_reserved: used to return the reserved size in qgroup
5321  *
5322  * This function is used to reserve the space for snapshot/subvolume
5323  * creation and deletion. Those operations are different with the
5324  * common file/directory operations, they change two fs/file trees
5325  * and root tree, the number of items that the qgroup reserves is
5326  * different with the free space reservation. So we can not use
5327  * the space reseravtion mechanism in start_transaction().
5328  */
5329 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5330                                      struct btrfs_block_rsv *rsv,
5331                                      int items,
5332                                      u64 *qgroup_reserved,
5333                                      bool use_global_rsv)
5334 {
5335         u64 num_bytes;
5336         int ret;
5337         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5338
5339         if (root->fs_info->quota_enabled) {
5340                 /* One for parent inode, two for dir entries */
5341                 num_bytes = 3 * root->nodesize;
5342                 ret = btrfs_qgroup_reserve(root, num_bytes);
5343                 if (ret)
5344                         return ret;
5345         } else {
5346                 num_bytes = 0;
5347         }
5348
5349         *qgroup_reserved = num_bytes;
5350
5351         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5352         rsv->space_info = __find_space_info(root->fs_info,
5353                                             BTRFS_BLOCK_GROUP_METADATA);
5354         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5355                                   BTRFS_RESERVE_FLUSH_ALL);
5356
5357         if (ret == -ENOSPC && use_global_rsv)
5358                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5359
5360         if (ret) {
5361                 if (*qgroup_reserved)
5362                         btrfs_qgroup_free(root, *qgroup_reserved);
5363         }
5364
5365         return ret;
5366 }
5367
5368 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5369                                       struct btrfs_block_rsv *rsv,
5370                                       u64 qgroup_reserved)
5371 {
5372         btrfs_block_rsv_release(root, rsv, (u64)-1);
5373 }
5374
5375 /**
5376  * drop_outstanding_extent - drop an outstanding extent
5377  * @inode: the inode we're dropping the extent for
5378  * @num_bytes: the number of bytes we're relaseing.
5379  *
5380  * This is called when we are freeing up an outstanding extent, either called
5381  * after an error or after an extent is written.  This will return the number of
5382  * reserved extents that need to be freed.  This must be called with
5383  * BTRFS_I(inode)->lock held.
5384  */
5385 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5386 {
5387         unsigned drop_inode_space = 0;
5388         unsigned dropped_extents = 0;
5389         unsigned num_extents = 0;
5390
5391         num_extents = (unsigned)div64_u64(num_bytes +
5392                                           BTRFS_MAX_EXTENT_SIZE - 1,
5393                                           BTRFS_MAX_EXTENT_SIZE);
5394         ASSERT(num_extents);
5395         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5396         BTRFS_I(inode)->outstanding_extents -= num_extents;
5397
5398         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5399             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5400                                &BTRFS_I(inode)->runtime_flags))
5401                 drop_inode_space = 1;
5402
5403         /*
5404          * If we have more or the same amount of outsanding extents than we have
5405          * reserved then we need to leave the reserved extents count alone.
5406          */
5407         if (BTRFS_I(inode)->outstanding_extents >=
5408             BTRFS_I(inode)->reserved_extents)
5409                 return drop_inode_space;
5410
5411         dropped_extents = BTRFS_I(inode)->reserved_extents -
5412                 BTRFS_I(inode)->outstanding_extents;
5413         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5414         return dropped_extents + drop_inode_space;
5415 }
5416
5417 /**
5418  * calc_csum_metadata_size - return the amount of metada space that must be
5419  *      reserved/free'd for the given bytes.
5420  * @inode: the inode we're manipulating
5421  * @num_bytes: the number of bytes in question
5422  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5423  *
5424  * This adjusts the number of csum_bytes in the inode and then returns the
5425  * correct amount of metadata that must either be reserved or freed.  We
5426  * calculate how many checksums we can fit into one leaf and then divide the
5427  * number of bytes that will need to be checksumed by this value to figure out
5428  * how many checksums will be required.  If we are adding bytes then the number
5429  * may go up and we will return the number of additional bytes that must be
5430  * reserved.  If it is going down we will return the number of bytes that must
5431  * be freed.
5432  *
5433  * This must be called with BTRFS_I(inode)->lock held.
5434  */
5435 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5436                                    int reserve)
5437 {
5438         struct btrfs_root *root = BTRFS_I(inode)->root;
5439         u64 old_csums, num_csums;
5440
5441         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5442             BTRFS_I(inode)->csum_bytes == 0)
5443                 return 0;
5444
5445         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5446         if (reserve)
5447                 BTRFS_I(inode)->csum_bytes += num_bytes;
5448         else
5449                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5450         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5451
5452         /* No change, no need to reserve more */
5453         if (old_csums == num_csums)
5454                 return 0;
5455
5456         if (reserve)
5457                 return btrfs_calc_trans_metadata_size(root,
5458                                                       num_csums - old_csums);
5459
5460         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5461 }
5462
5463 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5464 {
5465         struct btrfs_root *root = BTRFS_I(inode)->root;
5466         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5467         u64 to_reserve = 0;
5468         u64 csum_bytes;
5469         unsigned nr_extents = 0;
5470         int extra_reserve = 0;
5471         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5472         int ret = 0;
5473         bool delalloc_lock = true;
5474         u64 to_free = 0;
5475         unsigned dropped;
5476
5477         /* If we are a free space inode we need to not flush since we will be in
5478          * the middle of a transaction commit.  We also don't need the delalloc
5479          * mutex since we won't race with anybody.  We need this mostly to make
5480          * lockdep shut its filthy mouth.
5481          */
5482         if (btrfs_is_free_space_inode(inode)) {
5483                 flush = BTRFS_RESERVE_NO_FLUSH;
5484                 delalloc_lock = false;
5485         }
5486
5487         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5488             btrfs_transaction_in_commit(root->fs_info))
5489                 schedule_timeout(1);
5490
5491         if (delalloc_lock)
5492                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5493
5494         num_bytes = ALIGN(num_bytes, root->sectorsize);
5495
5496         spin_lock(&BTRFS_I(inode)->lock);
5497         nr_extents = (unsigned)div64_u64(num_bytes +
5498                                          BTRFS_MAX_EXTENT_SIZE - 1,
5499                                          BTRFS_MAX_EXTENT_SIZE);
5500         BTRFS_I(inode)->outstanding_extents += nr_extents;
5501         nr_extents = 0;
5502
5503         if (BTRFS_I(inode)->outstanding_extents >
5504             BTRFS_I(inode)->reserved_extents)
5505                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5506                         BTRFS_I(inode)->reserved_extents;
5507
5508         /*
5509          * Add an item to reserve for updating the inode when we complete the
5510          * delalloc io.
5511          */
5512         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5513                       &BTRFS_I(inode)->runtime_flags)) {
5514                 nr_extents++;
5515                 extra_reserve = 1;
5516         }
5517
5518         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5519         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5520         csum_bytes = BTRFS_I(inode)->csum_bytes;
5521         spin_unlock(&BTRFS_I(inode)->lock);
5522
5523         if (root->fs_info->quota_enabled) {
5524                 ret = btrfs_qgroup_reserve(root, nr_extents * root->nodesize);
5525                 if (ret)
5526                         goto out_fail;
5527         }
5528
5529         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5530         if (unlikely(ret)) {
5531                 if (root->fs_info->quota_enabled)
5532                         btrfs_qgroup_free(root, nr_extents * root->nodesize);
5533                 goto out_fail;
5534         }
5535
5536         spin_lock(&BTRFS_I(inode)->lock);
5537         if (extra_reserve) {
5538                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5539                         &BTRFS_I(inode)->runtime_flags);
5540                 nr_extents--;
5541         }
5542         BTRFS_I(inode)->reserved_extents += nr_extents;
5543         spin_unlock(&BTRFS_I(inode)->lock);
5544
5545         if (delalloc_lock)
5546                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5547
5548         if (to_reserve)
5549                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5550                                               btrfs_ino(inode), to_reserve, 1);
5551         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5552
5553         return 0;
5554
5555 out_fail:
5556         spin_lock(&BTRFS_I(inode)->lock);
5557         dropped = drop_outstanding_extent(inode, num_bytes);
5558         /*
5559          * If the inodes csum_bytes is the same as the original
5560          * csum_bytes then we know we haven't raced with any free()ers
5561          * so we can just reduce our inodes csum bytes and carry on.
5562          */
5563         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5564                 calc_csum_metadata_size(inode, num_bytes, 0);
5565         } else {
5566                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5567                 u64 bytes;
5568
5569                 /*
5570                  * This is tricky, but first we need to figure out how much we
5571                  * free'd from any free-ers that occured during this
5572                  * reservation, so we reset ->csum_bytes to the csum_bytes
5573                  * before we dropped our lock, and then call the free for the
5574                  * number of bytes that were freed while we were trying our
5575                  * reservation.
5576                  */
5577                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5578                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5579                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5580
5581
5582                 /*
5583                  * Now we need to see how much we would have freed had we not
5584                  * been making this reservation and our ->csum_bytes were not
5585                  * artificially inflated.
5586                  */
5587                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5588                 bytes = csum_bytes - orig_csum_bytes;
5589                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5590
5591                 /*
5592                  * Now reset ->csum_bytes to what it should be.  If bytes is
5593                  * more than to_free then we would have free'd more space had we
5594                  * not had an artificially high ->csum_bytes, so we need to free
5595                  * the remainder.  If bytes is the same or less then we don't
5596                  * need to do anything, the other free-ers did the correct
5597                  * thing.
5598                  */
5599                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5600                 if (bytes > to_free)
5601                         to_free = bytes - to_free;
5602                 else
5603                         to_free = 0;
5604         }
5605         spin_unlock(&BTRFS_I(inode)->lock);
5606         if (dropped)
5607                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5608
5609         if (to_free) {
5610                 btrfs_block_rsv_release(root, block_rsv, to_free);
5611                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5612                                               btrfs_ino(inode), to_free, 0);
5613         }
5614         if (delalloc_lock)
5615                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5616         return ret;
5617 }
5618
5619 /**
5620  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5621  * @inode: the inode to release the reservation for
5622  * @num_bytes: the number of bytes we're releasing
5623  *
5624  * This will release the metadata reservation for an inode.  This can be called
5625  * once we complete IO for a given set of bytes to release their metadata
5626  * reservations.
5627  */
5628 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5629 {
5630         struct btrfs_root *root = BTRFS_I(inode)->root;
5631         u64 to_free = 0;
5632         unsigned dropped;
5633
5634         num_bytes = ALIGN(num_bytes, root->sectorsize);
5635         spin_lock(&BTRFS_I(inode)->lock);
5636         dropped = drop_outstanding_extent(inode, num_bytes);
5637
5638         if (num_bytes)
5639                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5640         spin_unlock(&BTRFS_I(inode)->lock);
5641         if (dropped > 0)
5642                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5643
5644         if (btrfs_test_is_dummy_root(root))
5645                 return;
5646
5647         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5648                                       btrfs_ino(inode), to_free, 0);
5649
5650         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5651                                 to_free);
5652 }
5653
5654 /**
5655  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5656  * @inode: inode we're writing to
5657  * @num_bytes: the number of bytes we want to allocate
5658  *
5659  * This will do the following things
5660  *
5661  * o reserve space in the data space info for num_bytes
5662  * o reserve space in the metadata space info based on number of outstanding
5663  *   extents and how much csums will be needed
5664  * o add to the inodes ->delalloc_bytes
5665  * o add it to the fs_info's delalloc inodes list.
5666  *
5667  * This will return 0 for success and -ENOSPC if there is no space left.
5668  */
5669 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5670 {
5671         int ret;
5672
5673         ret = btrfs_check_data_free_space(inode, num_bytes, num_bytes);
5674         if (ret)
5675                 return ret;
5676
5677         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5678         if (ret) {
5679                 btrfs_free_reserved_data_space(inode, num_bytes);
5680                 return ret;
5681         }
5682
5683         return 0;
5684 }
5685
5686 /**
5687  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5688  * @inode: inode we're releasing space for
5689  * @num_bytes: the number of bytes we want to free up
5690  *
5691  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5692  * called in the case that we don't need the metadata AND data reservations
5693  * anymore.  So if there is an error or we insert an inline extent.
5694  *
5695  * This function will release the metadata space that was not used and will
5696  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5697  * list if there are no delalloc bytes left.
5698  */
5699 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5700 {
5701         btrfs_delalloc_release_metadata(inode, num_bytes);
5702         btrfs_free_reserved_data_space(inode, num_bytes);
5703 }
5704
5705 static int update_block_group(struct btrfs_trans_handle *trans,
5706                               struct btrfs_root *root, u64 bytenr,
5707                               u64 num_bytes, int alloc)
5708 {
5709         struct btrfs_block_group_cache *cache = NULL;
5710         struct btrfs_fs_info *info = root->fs_info;
5711         u64 total = num_bytes;
5712         u64 old_val;
5713         u64 byte_in_group;
5714         int factor;
5715
5716         /* block accounting for super block */
5717         spin_lock(&info->delalloc_root_lock);
5718         old_val = btrfs_super_bytes_used(info->super_copy);
5719         if (alloc)
5720                 old_val += num_bytes;
5721         else
5722                 old_val -= num_bytes;
5723         btrfs_set_super_bytes_used(info->super_copy, old_val);
5724         spin_unlock(&info->delalloc_root_lock);
5725
5726         while (total) {
5727                 cache = btrfs_lookup_block_group(info, bytenr);
5728                 if (!cache)
5729                         return -ENOENT;
5730                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5731                                     BTRFS_BLOCK_GROUP_RAID1 |
5732                                     BTRFS_BLOCK_GROUP_RAID10))
5733                         factor = 2;
5734                 else
5735                         factor = 1;
5736                 /*
5737                  * If this block group has free space cache written out, we
5738                  * need to make sure to load it if we are removing space.  This
5739                  * is because we need the unpinning stage to actually add the
5740                  * space back to the block group, otherwise we will leak space.
5741                  */
5742                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5743                         cache_block_group(cache, 1);
5744
5745                 byte_in_group = bytenr - cache->key.objectid;
5746                 WARN_ON(byte_in_group > cache->key.offset);
5747
5748                 spin_lock(&cache->space_info->lock);
5749                 spin_lock(&cache->lock);
5750
5751                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5752                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5753                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5754
5755                 old_val = btrfs_block_group_used(&cache->item);
5756                 num_bytes = min(total, cache->key.offset - byte_in_group);
5757                 if (alloc) {
5758                         old_val += num_bytes;
5759                         btrfs_set_block_group_used(&cache->item, old_val);
5760                         cache->reserved -= num_bytes;
5761                         cache->space_info->bytes_reserved -= num_bytes;
5762                         cache->space_info->bytes_used += num_bytes;
5763                         cache->space_info->disk_used += num_bytes * factor;
5764                         spin_unlock(&cache->lock);
5765                         spin_unlock(&cache->space_info->lock);
5766                 } else {
5767                         old_val -= num_bytes;
5768                         btrfs_set_block_group_used(&cache->item, old_val);
5769                         cache->pinned += num_bytes;
5770                         cache->space_info->bytes_pinned += num_bytes;
5771                         cache->space_info->bytes_used -= num_bytes;
5772                         cache->space_info->disk_used -= num_bytes * factor;
5773                         spin_unlock(&cache->lock);
5774                         spin_unlock(&cache->space_info->lock);
5775
5776                         set_extent_dirty(info->pinned_extents,
5777                                          bytenr, bytenr + num_bytes - 1,
5778                                          GFP_NOFS | __GFP_NOFAIL);
5779                         /*
5780                          * No longer have used bytes in this block group, queue
5781                          * it for deletion.
5782                          */
5783                         if (old_val == 0) {
5784                                 spin_lock(&info->unused_bgs_lock);
5785                                 if (list_empty(&cache->bg_list)) {
5786                                         btrfs_get_block_group(cache);
5787                                         list_add_tail(&cache->bg_list,
5788                                                       &info->unused_bgs);
5789                                 }
5790                                 spin_unlock(&info->unused_bgs_lock);
5791                         }
5792                 }
5793
5794                 spin_lock(&trans->transaction->dirty_bgs_lock);
5795                 if (list_empty(&cache->dirty_list)) {
5796                         list_add_tail(&cache->dirty_list,
5797                                       &trans->transaction->dirty_bgs);
5798                                 trans->transaction->num_dirty_bgs++;
5799                         btrfs_get_block_group(cache);
5800                 }
5801                 spin_unlock(&trans->transaction->dirty_bgs_lock);
5802
5803                 btrfs_put_block_group(cache);
5804                 total -= num_bytes;
5805                 bytenr += num_bytes;
5806         }
5807         return 0;
5808 }
5809
5810 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5811 {
5812         struct btrfs_block_group_cache *cache;
5813         u64 bytenr;
5814
5815         spin_lock(&root->fs_info->block_group_cache_lock);
5816         bytenr = root->fs_info->first_logical_byte;
5817         spin_unlock(&root->fs_info->block_group_cache_lock);
5818
5819         if (bytenr < (u64)-1)
5820                 return bytenr;
5821
5822         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5823         if (!cache)
5824                 return 0;
5825
5826         bytenr = cache->key.objectid;
5827         btrfs_put_block_group(cache);
5828
5829         return bytenr;
5830 }
5831
5832 static int pin_down_extent(struct btrfs_root *root,
5833                            struct btrfs_block_group_cache *cache,
5834                            u64 bytenr, u64 num_bytes, int reserved)
5835 {
5836         spin_lock(&cache->space_info->lock);
5837         spin_lock(&cache->lock);
5838         cache->pinned += num_bytes;
5839         cache->space_info->bytes_pinned += num_bytes;
5840         if (reserved) {
5841                 cache->reserved -= num_bytes;
5842                 cache->space_info->bytes_reserved -= num_bytes;
5843         }
5844         spin_unlock(&cache->lock);
5845         spin_unlock(&cache->space_info->lock);
5846
5847         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5848                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5849         if (reserved)
5850                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5851         return 0;
5852 }
5853
5854 /*
5855  * this function must be called within transaction
5856  */
5857 int btrfs_pin_extent(struct btrfs_root *root,
5858                      u64 bytenr, u64 num_bytes, int reserved)
5859 {
5860         struct btrfs_block_group_cache *cache;
5861
5862         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5863         BUG_ON(!cache); /* Logic error */
5864
5865         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5866
5867         btrfs_put_block_group(cache);
5868         return 0;
5869 }
5870
5871 /*
5872  * this function must be called within transaction
5873  */
5874 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5875                                     u64 bytenr, u64 num_bytes)
5876 {
5877         struct btrfs_block_group_cache *cache;
5878         int ret;
5879
5880         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5881         if (!cache)
5882                 return -EINVAL;
5883
5884         /*
5885          * pull in the free space cache (if any) so that our pin
5886          * removes the free space from the cache.  We have load_only set
5887          * to one because the slow code to read in the free extents does check
5888          * the pinned extents.
5889          */
5890         cache_block_group(cache, 1);
5891
5892         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5893
5894         /* remove us from the free space cache (if we're there at all) */
5895         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5896         btrfs_put_block_group(cache);
5897         return ret;
5898 }
5899
5900 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5901 {
5902         int ret;
5903         struct btrfs_block_group_cache *block_group;
5904         struct btrfs_caching_control *caching_ctl;
5905
5906         block_group = btrfs_lookup_block_group(root->fs_info, start);
5907         if (!block_group)
5908                 return -EINVAL;
5909
5910         cache_block_group(block_group, 0);
5911         caching_ctl = get_caching_control(block_group);
5912
5913         if (!caching_ctl) {
5914                 /* Logic error */
5915                 BUG_ON(!block_group_cache_done(block_group));
5916                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5917         } else {
5918                 mutex_lock(&caching_ctl->mutex);
5919
5920                 if (start >= caching_ctl->progress) {
5921                         ret = add_excluded_extent(root, start, num_bytes);
5922                 } else if (start + num_bytes <= caching_ctl->progress) {
5923                         ret = btrfs_remove_free_space(block_group,
5924                                                       start, num_bytes);
5925                 } else {
5926                         num_bytes = caching_ctl->progress - start;
5927                         ret = btrfs_remove_free_space(block_group,
5928                                                       start, num_bytes);
5929                         if (ret)
5930                                 goto out_lock;
5931
5932                         num_bytes = (start + num_bytes) -
5933                                 caching_ctl->progress;
5934                         start = caching_ctl->progress;
5935                         ret = add_excluded_extent(root, start, num_bytes);
5936                 }
5937 out_lock:
5938                 mutex_unlock(&caching_ctl->mutex);
5939                 put_caching_control(caching_ctl);
5940         }
5941         btrfs_put_block_group(block_group);
5942         return ret;
5943 }
5944
5945 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5946                                  struct extent_buffer *eb)
5947 {
5948         struct btrfs_file_extent_item *item;
5949         struct btrfs_key key;
5950         int found_type;
5951         int i;
5952
5953         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5954                 return 0;
5955
5956         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5957                 btrfs_item_key_to_cpu(eb, &key, i);
5958                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5959                         continue;
5960                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5961                 found_type = btrfs_file_extent_type(eb, item);
5962                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5963                         continue;
5964                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5965                         continue;
5966                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5967                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5968                 __exclude_logged_extent(log, key.objectid, key.offset);
5969         }
5970
5971         return 0;
5972 }
5973
5974 /**
5975  * btrfs_update_reserved_bytes - update the block_group and space info counters
5976  * @cache:      The cache we are manipulating
5977  * @num_bytes:  The number of bytes in question
5978  * @reserve:    One of the reservation enums
5979  * @delalloc:   The blocks are allocated for the delalloc write
5980  *
5981  * This is called by the allocator when it reserves space, or by somebody who is
5982  * freeing space that was never actually used on disk.  For example if you
5983  * reserve some space for a new leaf in transaction A and before transaction A
5984  * commits you free that leaf, you call this with reserve set to 0 in order to
5985  * clear the reservation.
5986  *
5987  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5988  * ENOSPC accounting.  For data we handle the reservation through clearing the
5989  * delalloc bits in the io_tree.  We have to do this since we could end up
5990  * allocating less disk space for the amount of data we have reserved in the
5991  * case of compression.
5992  *
5993  * If this is a reservation and the block group has become read only we cannot
5994  * make the reservation and return -EAGAIN, otherwise this function always
5995  * succeeds.
5996  */
5997 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5998                                        u64 num_bytes, int reserve, int delalloc)
5999 {
6000         struct btrfs_space_info *space_info = cache->space_info;
6001         int ret = 0;
6002
6003         spin_lock(&space_info->lock);
6004         spin_lock(&cache->lock);
6005         if (reserve != RESERVE_FREE) {
6006                 if (cache->ro) {
6007                         ret = -EAGAIN;
6008                 } else {
6009                         cache->reserved += num_bytes;
6010                         space_info->bytes_reserved += num_bytes;
6011                         if (reserve == RESERVE_ALLOC) {
6012                                 trace_btrfs_space_reservation(cache->fs_info,
6013                                                 "space_info", space_info->flags,
6014                                                 num_bytes, 0);
6015                                 space_info->bytes_may_use -= num_bytes;
6016                         }
6017
6018                         if (delalloc)
6019                                 cache->delalloc_bytes += num_bytes;
6020                 }
6021         } else {
6022                 if (cache->ro)
6023                         space_info->bytes_readonly += num_bytes;
6024                 cache->reserved -= num_bytes;
6025                 space_info->bytes_reserved -= num_bytes;
6026
6027                 if (delalloc)
6028                         cache->delalloc_bytes -= num_bytes;
6029         }
6030         spin_unlock(&cache->lock);
6031         spin_unlock(&space_info->lock);
6032         return ret;
6033 }
6034
6035 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6036                                 struct btrfs_root *root)
6037 {
6038         struct btrfs_fs_info *fs_info = root->fs_info;
6039         struct btrfs_caching_control *next;
6040         struct btrfs_caching_control *caching_ctl;
6041         struct btrfs_block_group_cache *cache;
6042
6043         down_write(&fs_info->commit_root_sem);
6044
6045         list_for_each_entry_safe(caching_ctl, next,
6046                                  &fs_info->caching_block_groups, list) {
6047                 cache = caching_ctl->block_group;
6048                 if (block_group_cache_done(cache)) {
6049                         cache->last_byte_to_unpin = (u64)-1;
6050                         list_del_init(&caching_ctl->list);
6051                         put_caching_control(caching_ctl);
6052                 } else {
6053                         cache->last_byte_to_unpin = caching_ctl->progress;
6054                 }
6055         }
6056
6057         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6058                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6059         else
6060                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6061
6062         up_write(&fs_info->commit_root_sem);
6063
6064         update_global_block_rsv(fs_info);
6065 }
6066
6067 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6068                               const bool return_free_space)
6069 {
6070         struct btrfs_fs_info *fs_info = root->fs_info;
6071         struct btrfs_block_group_cache *cache = NULL;
6072         struct btrfs_space_info *space_info;
6073         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6074         u64 len;
6075         bool readonly;
6076
6077         while (start <= end) {
6078                 readonly = false;
6079                 if (!cache ||
6080                     start >= cache->key.objectid + cache->key.offset) {
6081                         if (cache)
6082                                 btrfs_put_block_group(cache);
6083                         cache = btrfs_lookup_block_group(fs_info, start);
6084                         BUG_ON(!cache); /* Logic error */
6085                 }
6086
6087                 len = cache->key.objectid + cache->key.offset - start;
6088                 len = min(len, end + 1 - start);
6089
6090                 if (start < cache->last_byte_to_unpin) {
6091                         len = min(len, cache->last_byte_to_unpin - start);
6092                         if (return_free_space)
6093                                 btrfs_add_free_space(cache, start, len);
6094                 }
6095
6096                 start += len;
6097                 space_info = cache->space_info;
6098
6099                 spin_lock(&space_info->lock);
6100                 spin_lock(&cache->lock);
6101                 cache->pinned -= len;
6102                 space_info->bytes_pinned -= len;
6103                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6104                 if (cache->ro) {
6105                         space_info->bytes_readonly += len;
6106                         readonly = true;
6107                 }
6108                 spin_unlock(&cache->lock);
6109                 if (!readonly && global_rsv->space_info == space_info) {
6110                         spin_lock(&global_rsv->lock);
6111                         if (!global_rsv->full) {
6112                                 len = min(len, global_rsv->size -
6113                                           global_rsv->reserved);
6114                                 global_rsv->reserved += len;
6115                                 space_info->bytes_may_use += len;
6116                                 if (global_rsv->reserved >= global_rsv->size)
6117                                         global_rsv->full = 1;
6118                         }
6119                         spin_unlock(&global_rsv->lock);
6120                 }
6121                 spin_unlock(&space_info->lock);
6122         }
6123
6124         if (cache)
6125                 btrfs_put_block_group(cache);
6126         return 0;
6127 }
6128
6129 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6130                                struct btrfs_root *root)
6131 {
6132         struct btrfs_fs_info *fs_info = root->fs_info;
6133         struct btrfs_block_group_cache *block_group, *tmp;
6134         struct list_head *deleted_bgs;
6135         struct extent_io_tree *unpin;
6136         u64 start;
6137         u64 end;
6138         int ret;
6139
6140         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6141                 unpin = &fs_info->freed_extents[1];
6142         else
6143                 unpin = &fs_info->freed_extents[0];
6144
6145         while (!trans->aborted) {
6146                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6147                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6148                                             EXTENT_DIRTY, NULL);
6149                 if (ret) {
6150                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6151                         break;
6152                 }
6153
6154                 if (btrfs_test_opt(root, DISCARD))
6155                         ret = btrfs_discard_extent(root, start,
6156                                                    end + 1 - start, NULL);
6157
6158                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
6159                 unpin_extent_range(root, start, end, true);
6160                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6161                 cond_resched();
6162         }
6163
6164         /*
6165          * Transaction is finished.  We don't need the lock anymore.  We
6166          * do need to clean up the block groups in case of a transaction
6167          * abort.
6168          */
6169         deleted_bgs = &trans->transaction->deleted_bgs;
6170         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6171                 u64 trimmed = 0;
6172
6173                 ret = -EROFS;
6174                 if (!trans->aborted)
6175                         ret = btrfs_discard_extent(root,
6176                                                    block_group->key.objectid,
6177                                                    block_group->key.offset,
6178                                                    &trimmed);
6179
6180                 list_del_init(&block_group->bg_list);
6181                 btrfs_put_block_group_trimming(block_group);
6182                 btrfs_put_block_group(block_group);
6183
6184                 if (ret) {
6185                         const char *errstr = btrfs_decode_error(ret);
6186                         btrfs_warn(fs_info,
6187                                    "Discard failed while removing blockgroup: errno=%d %s\n",
6188                                    ret, errstr);
6189                 }
6190         }
6191
6192         return 0;
6193 }
6194
6195 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6196                              u64 owner, u64 root_objectid)
6197 {
6198         struct btrfs_space_info *space_info;
6199         u64 flags;
6200
6201         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6202                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6203                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6204                 else
6205                         flags = BTRFS_BLOCK_GROUP_METADATA;
6206         } else {
6207                 flags = BTRFS_BLOCK_GROUP_DATA;
6208         }
6209
6210         space_info = __find_space_info(fs_info, flags);
6211         BUG_ON(!space_info); /* Logic bug */
6212         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6213 }
6214
6215
6216 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6217                                 struct btrfs_root *root,
6218                                 struct btrfs_delayed_ref_node *node, u64 parent,
6219                                 u64 root_objectid, u64 owner_objectid,
6220                                 u64 owner_offset, int refs_to_drop,
6221                                 struct btrfs_delayed_extent_op *extent_op)
6222 {
6223         struct btrfs_key key;
6224         struct btrfs_path *path;
6225         struct btrfs_fs_info *info = root->fs_info;
6226         struct btrfs_root *extent_root = info->extent_root;
6227         struct extent_buffer *leaf;
6228         struct btrfs_extent_item *ei;
6229         struct btrfs_extent_inline_ref *iref;
6230         int ret;
6231         int is_data;
6232         int extent_slot = 0;
6233         int found_extent = 0;
6234         int num_to_del = 1;
6235         int no_quota = node->no_quota;
6236         u32 item_size;
6237         u64 refs;
6238         u64 bytenr = node->bytenr;
6239         u64 num_bytes = node->num_bytes;
6240         int last_ref = 0;
6241         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6242                                                  SKINNY_METADATA);
6243
6244         if (!info->quota_enabled || !is_fstree(root_objectid))
6245                 no_quota = 1;
6246
6247         path = btrfs_alloc_path();
6248         if (!path)
6249                 return -ENOMEM;
6250
6251         path->reada = 1;
6252         path->leave_spinning = 1;
6253
6254         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6255         BUG_ON(!is_data && refs_to_drop != 1);
6256
6257         if (is_data)
6258                 skinny_metadata = 0;
6259
6260         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6261                                     bytenr, num_bytes, parent,
6262                                     root_objectid, owner_objectid,
6263                                     owner_offset);
6264         if (ret == 0) {
6265                 extent_slot = path->slots[0];
6266                 while (extent_slot >= 0) {
6267                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6268                                               extent_slot);
6269                         if (key.objectid != bytenr)
6270                                 break;
6271                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6272                             key.offset == num_bytes) {
6273                                 found_extent = 1;
6274                                 break;
6275                         }
6276                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6277                             key.offset == owner_objectid) {
6278                                 found_extent = 1;
6279                                 break;
6280                         }
6281                         if (path->slots[0] - extent_slot > 5)
6282                                 break;
6283                         extent_slot--;
6284                 }
6285 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6286                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6287                 if (found_extent && item_size < sizeof(*ei))
6288                         found_extent = 0;
6289 #endif
6290                 if (!found_extent) {
6291                         BUG_ON(iref);
6292                         ret = remove_extent_backref(trans, extent_root, path,
6293                                                     NULL, refs_to_drop,
6294                                                     is_data, &last_ref);
6295                         if (ret) {
6296                                 btrfs_abort_transaction(trans, extent_root, ret);
6297                                 goto out;
6298                         }
6299                         btrfs_release_path(path);
6300                         path->leave_spinning = 1;
6301
6302                         key.objectid = bytenr;
6303                         key.type = BTRFS_EXTENT_ITEM_KEY;
6304                         key.offset = num_bytes;
6305
6306                         if (!is_data && skinny_metadata) {
6307                                 key.type = BTRFS_METADATA_ITEM_KEY;
6308                                 key.offset = owner_objectid;
6309                         }
6310
6311                         ret = btrfs_search_slot(trans, extent_root,
6312                                                 &key, path, -1, 1);
6313                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6314                                 /*
6315                                  * Couldn't find our skinny metadata item,
6316                                  * see if we have ye olde extent item.
6317                                  */
6318                                 path->slots[0]--;
6319                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6320                                                       path->slots[0]);
6321                                 if (key.objectid == bytenr &&
6322                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6323                                     key.offset == num_bytes)
6324                                         ret = 0;
6325                         }
6326
6327                         if (ret > 0 && skinny_metadata) {
6328                                 skinny_metadata = false;
6329                                 key.objectid = bytenr;
6330                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6331                                 key.offset = num_bytes;
6332                                 btrfs_release_path(path);
6333                                 ret = btrfs_search_slot(trans, extent_root,
6334                                                         &key, path, -1, 1);
6335                         }
6336
6337                         if (ret) {
6338                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6339                                         ret, bytenr);
6340                                 if (ret > 0)
6341                                         btrfs_print_leaf(extent_root,
6342                                                          path->nodes[0]);
6343                         }
6344                         if (ret < 0) {
6345                                 btrfs_abort_transaction(trans, extent_root, ret);
6346                                 goto out;
6347                         }
6348                         extent_slot = path->slots[0];
6349                 }
6350         } else if (WARN_ON(ret == -ENOENT)) {
6351                 btrfs_print_leaf(extent_root, path->nodes[0]);
6352                 btrfs_err(info,
6353                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6354                         bytenr, parent, root_objectid, owner_objectid,
6355                         owner_offset);
6356                 btrfs_abort_transaction(trans, extent_root, ret);
6357                 goto out;
6358         } else {
6359                 btrfs_abort_transaction(trans, extent_root, ret);
6360                 goto out;
6361         }
6362
6363         leaf = path->nodes[0];
6364         item_size = btrfs_item_size_nr(leaf, extent_slot);
6365 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6366         if (item_size < sizeof(*ei)) {
6367                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6368                 ret = convert_extent_item_v0(trans, extent_root, path,
6369                                              owner_objectid, 0);
6370                 if (ret < 0) {
6371                         btrfs_abort_transaction(trans, extent_root, ret);
6372                         goto out;
6373                 }
6374
6375                 btrfs_release_path(path);
6376                 path->leave_spinning = 1;
6377
6378                 key.objectid = bytenr;
6379                 key.type = BTRFS_EXTENT_ITEM_KEY;
6380                 key.offset = num_bytes;
6381
6382                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6383                                         -1, 1);
6384                 if (ret) {
6385                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6386                                 ret, bytenr);
6387                         btrfs_print_leaf(extent_root, path->nodes[0]);
6388                 }
6389                 if (ret < 0) {
6390                         btrfs_abort_transaction(trans, extent_root, ret);
6391                         goto out;
6392                 }
6393
6394                 extent_slot = path->slots[0];
6395                 leaf = path->nodes[0];
6396                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6397         }
6398 #endif
6399         BUG_ON(item_size < sizeof(*ei));
6400         ei = btrfs_item_ptr(leaf, extent_slot,
6401                             struct btrfs_extent_item);
6402         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6403             key.type == BTRFS_EXTENT_ITEM_KEY) {
6404                 struct btrfs_tree_block_info *bi;
6405                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6406                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6407                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6408         }
6409
6410         refs = btrfs_extent_refs(leaf, ei);
6411         if (refs < refs_to_drop) {
6412                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6413                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6414                 ret = -EINVAL;
6415                 btrfs_abort_transaction(trans, extent_root, ret);
6416                 goto out;
6417         }
6418         refs -= refs_to_drop;
6419
6420         if (refs > 0) {
6421                 if (extent_op)
6422                         __run_delayed_extent_op(extent_op, leaf, ei);
6423                 /*
6424                  * In the case of inline back ref, reference count will
6425                  * be updated by remove_extent_backref
6426                  */
6427                 if (iref) {
6428                         BUG_ON(!found_extent);
6429                 } else {
6430                         btrfs_set_extent_refs(leaf, ei, refs);
6431                         btrfs_mark_buffer_dirty(leaf);
6432                 }
6433                 if (found_extent) {
6434                         ret = remove_extent_backref(trans, extent_root, path,
6435                                                     iref, refs_to_drop,
6436                                                     is_data, &last_ref);
6437                         if (ret) {
6438                                 btrfs_abort_transaction(trans, extent_root, ret);
6439                                 goto out;
6440                         }
6441                 }
6442                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6443                                  root_objectid);
6444         } else {
6445                 if (found_extent) {
6446                         BUG_ON(is_data && refs_to_drop !=
6447                                extent_data_ref_count(path, iref));
6448                         if (iref) {
6449                                 BUG_ON(path->slots[0] != extent_slot);
6450                         } else {
6451                                 BUG_ON(path->slots[0] != extent_slot + 1);
6452                                 path->slots[0] = extent_slot;
6453                                 num_to_del = 2;
6454                         }
6455                 }
6456
6457                 last_ref = 1;
6458                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6459                                       num_to_del);
6460                 if (ret) {
6461                         btrfs_abort_transaction(trans, extent_root, ret);
6462                         goto out;
6463                 }
6464                 btrfs_release_path(path);
6465
6466                 if (is_data) {
6467                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6468                         if (ret) {
6469                                 btrfs_abort_transaction(trans, extent_root, ret);
6470                                 goto out;
6471                         }
6472                 }
6473
6474                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6475                 if (ret) {
6476                         btrfs_abort_transaction(trans, extent_root, ret);
6477                         goto out;
6478                 }
6479         }
6480         btrfs_release_path(path);
6481
6482 out:
6483         btrfs_free_path(path);
6484         return ret;
6485 }
6486
6487 /*
6488  * when we free an block, it is possible (and likely) that we free the last
6489  * delayed ref for that extent as well.  This searches the delayed ref tree for
6490  * a given extent, and if there are no other delayed refs to be processed, it
6491  * removes it from the tree.
6492  */
6493 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6494                                       struct btrfs_root *root, u64 bytenr)
6495 {
6496         struct btrfs_delayed_ref_head *head;
6497         struct btrfs_delayed_ref_root *delayed_refs;
6498         int ret = 0;
6499
6500         delayed_refs = &trans->transaction->delayed_refs;
6501         spin_lock(&delayed_refs->lock);
6502         head = btrfs_find_delayed_ref_head(trans, bytenr);
6503         if (!head)
6504                 goto out_delayed_unlock;
6505
6506         spin_lock(&head->lock);
6507         if (!list_empty(&head->ref_list))
6508                 goto out;
6509
6510         if (head->extent_op) {
6511                 if (!head->must_insert_reserved)
6512                         goto out;
6513                 btrfs_free_delayed_extent_op(head->extent_op);
6514                 head->extent_op = NULL;
6515         }
6516
6517         /*
6518          * waiting for the lock here would deadlock.  If someone else has it
6519          * locked they are already in the process of dropping it anyway
6520          */
6521         if (!mutex_trylock(&head->mutex))
6522                 goto out;
6523
6524         /*
6525          * at this point we have a head with no other entries.  Go
6526          * ahead and process it.
6527          */
6528         head->node.in_tree = 0;
6529         rb_erase(&head->href_node, &delayed_refs->href_root);
6530
6531         atomic_dec(&delayed_refs->num_entries);
6532
6533         /*
6534          * we don't take a ref on the node because we're removing it from the
6535          * tree, so we just steal the ref the tree was holding.
6536          */
6537         delayed_refs->num_heads--;
6538         if (head->processing == 0)
6539                 delayed_refs->num_heads_ready--;
6540         head->processing = 0;
6541         spin_unlock(&head->lock);
6542         spin_unlock(&delayed_refs->lock);
6543
6544         BUG_ON(head->extent_op);
6545         if (head->must_insert_reserved)
6546                 ret = 1;
6547
6548         mutex_unlock(&head->mutex);
6549         btrfs_put_delayed_ref(&head->node);
6550         return ret;
6551 out:
6552         spin_unlock(&head->lock);
6553
6554 out_delayed_unlock:
6555         spin_unlock(&delayed_refs->lock);
6556         return 0;
6557 }
6558
6559 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6560                            struct btrfs_root *root,
6561                            struct extent_buffer *buf,
6562                            u64 parent, int last_ref)
6563 {
6564         int pin = 1;
6565         int ret;
6566
6567         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6568                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6569                                         buf->start, buf->len,
6570                                         parent, root->root_key.objectid,
6571                                         btrfs_header_level(buf),
6572                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6573                 BUG_ON(ret); /* -ENOMEM */
6574         }
6575
6576         if (!last_ref)
6577                 return;
6578
6579         if (btrfs_header_generation(buf) == trans->transid) {
6580                 struct btrfs_block_group_cache *cache;
6581
6582                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6583                         ret = check_ref_cleanup(trans, root, buf->start);
6584                         if (!ret)
6585                                 goto out;
6586                 }
6587
6588                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6589
6590                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6591                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6592                         btrfs_put_block_group(cache);
6593                         goto out;
6594                 }
6595
6596                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6597
6598                 btrfs_add_free_space(cache, buf->start, buf->len);
6599                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6600                 btrfs_put_block_group(cache);
6601                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6602                 pin = 0;
6603         }
6604 out:
6605         if (pin)
6606                 add_pinned_bytes(root->fs_info, buf->len,
6607                                  btrfs_header_level(buf),
6608                                  root->root_key.objectid);
6609
6610         /*
6611          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6612          * anymore.
6613          */
6614         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6615 }
6616
6617 /* Can return -ENOMEM */
6618 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6619                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6620                       u64 owner, u64 offset, int no_quota)
6621 {
6622         int ret;
6623         struct btrfs_fs_info *fs_info = root->fs_info;
6624
6625         if (btrfs_test_is_dummy_root(root))
6626                 return 0;
6627
6628         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6629
6630         /*
6631          * tree log blocks never actually go into the extent allocation
6632          * tree, just update pinning info and exit early.
6633          */
6634         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6635                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6636                 /* unlocks the pinned mutex */
6637                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6638                 ret = 0;
6639         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6640                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6641                                         num_bytes,
6642                                         parent, root_objectid, (int)owner,
6643                                         BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6644         } else {
6645                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6646                                                 num_bytes,
6647                                                 parent, root_objectid, owner,
6648                                                 offset, BTRFS_DROP_DELAYED_REF,
6649                                                 NULL, no_quota);
6650         }
6651         return ret;
6652 }
6653
6654 /*
6655  * when we wait for progress in the block group caching, its because
6656  * our allocation attempt failed at least once.  So, we must sleep
6657  * and let some progress happen before we try again.
6658  *
6659  * This function will sleep at least once waiting for new free space to
6660  * show up, and then it will check the block group free space numbers
6661  * for our min num_bytes.  Another option is to have it go ahead
6662  * and look in the rbtree for a free extent of a given size, but this
6663  * is a good start.
6664  *
6665  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6666  * any of the information in this block group.
6667  */
6668 static noinline void
6669 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6670                                 u64 num_bytes)
6671 {
6672         struct btrfs_caching_control *caching_ctl;
6673
6674         caching_ctl = get_caching_control(cache);
6675         if (!caching_ctl)
6676                 return;
6677
6678         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6679                    (cache->free_space_ctl->free_space >= num_bytes));
6680
6681         put_caching_control(caching_ctl);
6682 }
6683
6684 static noinline int
6685 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6686 {
6687         struct btrfs_caching_control *caching_ctl;
6688         int ret = 0;
6689
6690         caching_ctl = get_caching_control(cache);
6691         if (!caching_ctl)
6692                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6693
6694         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6695         if (cache->cached == BTRFS_CACHE_ERROR)
6696                 ret = -EIO;
6697         put_caching_control(caching_ctl);
6698         return ret;
6699 }
6700
6701 int __get_raid_index(u64 flags)
6702 {
6703         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6704                 return BTRFS_RAID_RAID10;
6705         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6706                 return BTRFS_RAID_RAID1;
6707         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6708                 return BTRFS_RAID_DUP;
6709         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6710                 return BTRFS_RAID_RAID0;
6711         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6712                 return BTRFS_RAID_RAID5;
6713         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6714                 return BTRFS_RAID_RAID6;
6715
6716         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6717 }
6718
6719 int get_block_group_index(struct btrfs_block_group_cache *cache)
6720 {
6721         return __get_raid_index(cache->flags);
6722 }
6723
6724 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6725         [BTRFS_RAID_RAID10]     = "raid10",
6726         [BTRFS_RAID_RAID1]      = "raid1",
6727         [BTRFS_RAID_DUP]        = "dup",
6728         [BTRFS_RAID_RAID0]      = "raid0",
6729         [BTRFS_RAID_SINGLE]     = "single",
6730         [BTRFS_RAID_RAID5]      = "raid5",
6731         [BTRFS_RAID_RAID6]      = "raid6",
6732 };
6733
6734 static const char *get_raid_name(enum btrfs_raid_types type)
6735 {
6736         if (type >= BTRFS_NR_RAID_TYPES)
6737                 return NULL;
6738
6739         return btrfs_raid_type_names[type];
6740 }
6741
6742 enum btrfs_loop_type {
6743         LOOP_CACHING_NOWAIT = 0,
6744         LOOP_CACHING_WAIT = 1,
6745         LOOP_ALLOC_CHUNK = 2,
6746         LOOP_NO_EMPTY_SIZE = 3,
6747 };
6748
6749 static inline void
6750 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6751                        int delalloc)
6752 {
6753         if (delalloc)
6754                 down_read(&cache->data_rwsem);
6755 }
6756
6757 static inline void
6758 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6759                        int delalloc)
6760 {
6761         btrfs_get_block_group(cache);
6762         if (delalloc)
6763                 down_read(&cache->data_rwsem);
6764 }
6765
6766 static struct btrfs_block_group_cache *
6767 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6768                    struct btrfs_free_cluster *cluster,
6769                    int delalloc)
6770 {
6771         struct btrfs_block_group_cache *used_bg;
6772         bool locked = false;
6773 again:
6774         spin_lock(&cluster->refill_lock);
6775         if (locked) {
6776                 if (used_bg == cluster->block_group)
6777                         return used_bg;
6778
6779                 up_read(&used_bg->data_rwsem);
6780                 btrfs_put_block_group(used_bg);
6781         }
6782
6783         used_bg = cluster->block_group;
6784         if (!used_bg)
6785                 return NULL;
6786
6787         if (used_bg == block_group)
6788                 return used_bg;
6789
6790         btrfs_get_block_group(used_bg);
6791
6792         if (!delalloc)
6793                 return used_bg;
6794
6795         if (down_read_trylock(&used_bg->data_rwsem))
6796                 return used_bg;
6797
6798         spin_unlock(&cluster->refill_lock);
6799         down_read(&used_bg->data_rwsem);
6800         locked = true;
6801         goto again;
6802 }
6803
6804 static inline void
6805 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6806                          int delalloc)
6807 {
6808         if (delalloc)
6809                 up_read(&cache->data_rwsem);
6810         btrfs_put_block_group(cache);
6811 }
6812
6813 /*
6814  * walks the btree of allocated extents and find a hole of a given size.
6815  * The key ins is changed to record the hole:
6816  * ins->objectid == start position
6817  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6818  * ins->offset == the size of the hole.
6819  * Any available blocks before search_start are skipped.
6820  *
6821  * If there is no suitable free space, we will record the max size of
6822  * the free space extent currently.
6823  */
6824 static noinline int find_free_extent(struct btrfs_root *orig_root,
6825                                      u64 num_bytes, u64 empty_size,
6826                                      u64 hint_byte, struct btrfs_key *ins,
6827                                      u64 flags, int delalloc)
6828 {
6829         int ret = 0;
6830         struct btrfs_root *root = orig_root->fs_info->extent_root;
6831         struct btrfs_free_cluster *last_ptr = NULL;
6832         struct btrfs_block_group_cache *block_group = NULL;
6833         u64 search_start = 0;
6834         u64 max_extent_size = 0;
6835         int empty_cluster = 2 * 1024 * 1024;
6836         struct btrfs_space_info *space_info;
6837         int loop = 0;
6838         int index = __get_raid_index(flags);
6839         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6840                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6841         bool failed_cluster_refill = false;
6842         bool failed_alloc = false;
6843         bool use_cluster = true;
6844         bool have_caching_bg = false;
6845
6846         WARN_ON(num_bytes < root->sectorsize);
6847         ins->type = BTRFS_EXTENT_ITEM_KEY;
6848         ins->objectid = 0;
6849         ins->offset = 0;
6850
6851         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6852
6853         space_info = __find_space_info(root->fs_info, flags);
6854         if (!space_info) {
6855                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6856                 return -ENOSPC;
6857         }
6858
6859         /*
6860          * If the space info is for both data and metadata it means we have a
6861          * small filesystem and we can't use the clustering stuff.
6862          */
6863         if (btrfs_mixed_space_info(space_info))
6864                 use_cluster = false;
6865
6866         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6867                 last_ptr = &root->fs_info->meta_alloc_cluster;
6868                 if (!btrfs_test_opt(root, SSD))
6869                         empty_cluster = 64 * 1024;
6870         }
6871
6872         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6873             btrfs_test_opt(root, SSD)) {
6874                 last_ptr = &root->fs_info->data_alloc_cluster;
6875         }
6876
6877         if (last_ptr) {
6878                 spin_lock(&last_ptr->lock);
6879                 if (last_ptr->block_group)
6880                         hint_byte = last_ptr->window_start;
6881                 spin_unlock(&last_ptr->lock);
6882         }
6883
6884         search_start = max(search_start, first_logical_byte(root, 0));
6885         search_start = max(search_start, hint_byte);
6886
6887         if (!last_ptr)
6888                 empty_cluster = 0;
6889
6890         if (search_start == hint_byte) {
6891                 block_group = btrfs_lookup_block_group(root->fs_info,
6892                                                        search_start);
6893                 /*
6894                  * we don't want to use the block group if it doesn't match our
6895                  * allocation bits, or if its not cached.
6896                  *
6897                  * However if we are re-searching with an ideal block group
6898                  * picked out then we don't care that the block group is cached.
6899                  */
6900                 if (block_group && block_group_bits(block_group, flags) &&
6901                     block_group->cached != BTRFS_CACHE_NO) {
6902                         down_read(&space_info->groups_sem);
6903                         if (list_empty(&block_group->list) ||
6904                             block_group->ro) {
6905                                 /*
6906                                  * someone is removing this block group,
6907                                  * we can't jump into the have_block_group
6908                                  * target because our list pointers are not
6909                                  * valid
6910                                  */
6911                                 btrfs_put_block_group(block_group);
6912                                 up_read(&space_info->groups_sem);
6913                         } else {
6914                                 index = get_block_group_index(block_group);
6915                                 btrfs_lock_block_group(block_group, delalloc);
6916                                 goto have_block_group;
6917                         }
6918                 } else if (block_group) {
6919                         btrfs_put_block_group(block_group);
6920                 }
6921         }
6922 search:
6923         have_caching_bg = false;
6924         down_read(&space_info->groups_sem);
6925         list_for_each_entry(block_group, &space_info->block_groups[index],
6926                             list) {
6927                 u64 offset;
6928                 int cached;
6929
6930                 btrfs_grab_block_group(block_group, delalloc);
6931                 search_start = block_group->key.objectid;
6932
6933                 /*
6934                  * this can happen if we end up cycling through all the
6935                  * raid types, but we want to make sure we only allocate
6936                  * for the proper type.
6937                  */
6938                 if (!block_group_bits(block_group, flags)) {
6939                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6940                                 BTRFS_BLOCK_GROUP_RAID1 |
6941                                 BTRFS_BLOCK_GROUP_RAID5 |
6942                                 BTRFS_BLOCK_GROUP_RAID6 |
6943                                 BTRFS_BLOCK_GROUP_RAID10;
6944
6945                         /*
6946                          * if they asked for extra copies and this block group
6947                          * doesn't provide them, bail.  This does allow us to
6948                          * fill raid0 from raid1.
6949                          */
6950                         if ((flags & extra) && !(block_group->flags & extra))
6951                                 goto loop;
6952                 }
6953
6954 have_block_group:
6955                 cached = block_group_cache_done(block_group);
6956                 if (unlikely(!cached)) {
6957                         ret = cache_block_group(block_group, 0);
6958                         BUG_ON(ret < 0);
6959                         ret = 0;
6960                 }
6961
6962                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6963                         goto loop;
6964                 if (unlikely(block_group->ro))
6965                         goto loop;
6966
6967                 /*
6968                  * Ok we want to try and use the cluster allocator, so
6969                  * lets look there
6970                  */
6971                 if (last_ptr) {
6972                         struct btrfs_block_group_cache *used_block_group;
6973                         unsigned long aligned_cluster;
6974                         /*
6975                          * the refill lock keeps out other
6976                          * people trying to start a new cluster
6977                          */
6978                         used_block_group = btrfs_lock_cluster(block_group,
6979                                                               last_ptr,
6980                                                               delalloc);
6981                         if (!used_block_group)
6982                                 goto refill_cluster;
6983
6984                         if (used_block_group != block_group &&
6985                             (used_block_group->ro ||
6986                              !block_group_bits(used_block_group, flags)))
6987                                 goto release_cluster;
6988
6989                         offset = btrfs_alloc_from_cluster(used_block_group,
6990                                                 last_ptr,
6991                                                 num_bytes,
6992                                                 used_block_group->key.objectid,
6993                                                 &max_extent_size);
6994                         if (offset) {
6995                                 /* we have a block, we're done */
6996                                 spin_unlock(&last_ptr->refill_lock);
6997                                 trace_btrfs_reserve_extent_cluster(root,
6998                                                 used_block_group,
6999                                                 search_start, num_bytes);
7000                                 if (used_block_group != block_group) {
7001                                         btrfs_release_block_group(block_group,
7002                                                                   delalloc);
7003                                         block_group = used_block_group;
7004                                 }
7005                                 goto checks;
7006                         }
7007
7008                         WARN_ON(last_ptr->block_group != used_block_group);
7009 release_cluster:
7010                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7011                          * set up a new clusters, so lets just skip it
7012                          * and let the allocator find whatever block
7013                          * it can find.  If we reach this point, we
7014                          * will have tried the cluster allocator
7015                          * plenty of times and not have found
7016                          * anything, so we are likely way too
7017                          * fragmented for the clustering stuff to find
7018                          * anything.
7019                          *
7020                          * However, if the cluster is taken from the
7021                          * current block group, release the cluster
7022                          * first, so that we stand a better chance of
7023                          * succeeding in the unclustered
7024                          * allocation.  */
7025                         if (loop >= LOOP_NO_EMPTY_SIZE &&
7026                             used_block_group != block_group) {
7027                                 spin_unlock(&last_ptr->refill_lock);
7028                                 btrfs_release_block_group(used_block_group,
7029                                                           delalloc);
7030                                 goto unclustered_alloc;
7031                         }
7032
7033                         /*
7034                          * this cluster didn't work out, free it and
7035                          * start over
7036                          */
7037                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7038
7039                         if (used_block_group != block_group)
7040                                 btrfs_release_block_group(used_block_group,
7041                                                           delalloc);
7042 refill_cluster:
7043                         if (loop >= LOOP_NO_EMPTY_SIZE) {
7044                                 spin_unlock(&last_ptr->refill_lock);
7045                                 goto unclustered_alloc;
7046                         }
7047
7048                         aligned_cluster = max_t(unsigned long,
7049                                                 empty_cluster + empty_size,
7050                                               block_group->full_stripe_len);
7051
7052                         /* allocate a cluster in this block group */
7053                         ret = btrfs_find_space_cluster(root, block_group,
7054                                                        last_ptr, search_start,
7055                                                        num_bytes,
7056                                                        aligned_cluster);
7057                         if (ret == 0) {
7058                                 /*
7059                                  * now pull our allocation out of this
7060                                  * cluster
7061                                  */
7062                                 offset = btrfs_alloc_from_cluster(block_group,
7063                                                         last_ptr,
7064                                                         num_bytes,
7065                                                         search_start,
7066                                                         &max_extent_size);
7067                                 if (offset) {
7068                                         /* we found one, proceed */
7069                                         spin_unlock(&last_ptr->refill_lock);
7070                                         trace_btrfs_reserve_extent_cluster(root,
7071                                                 block_group, search_start,
7072                                                 num_bytes);
7073                                         goto checks;
7074                                 }
7075                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
7076                                    && !failed_cluster_refill) {
7077                                 spin_unlock(&last_ptr->refill_lock);
7078
7079                                 failed_cluster_refill = true;
7080                                 wait_block_group_cache_progress(block_group,
7081                                        num_bytes + empty_cluster + empty_size);
7082                                 goto have_block_group;
7083                         }
7084
7085                         /*
7086                          * at this point we either didn't find a cluster
7087                          * or we weren't able to allocate a block from our
7088                          * cluster.  Free the cluster we've been trying
7089                          * to use, and go to the next block group
7090                          */
7091                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7092                         spin_unlock(&last_ptr->refill_lock);
7093                         goto loop;
7094                 }
7095
7096 unclustered_alloc:
7097                 spin_lock(&block_group->free_space_ctl->tree_lock);
7098                 if (cached &&
7099                     block_group->free_space_ctl->free_space <
7100                     num_bytes + empty_cluster + empty_size) {
7101                         if (block_group->free_space_ctl->free_space >
7102                             max_extent_size)
7103                                 max_extent_size =
7104                                         block_group->free_space_ctl->free_space;
7105                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7106                         goto loop;
7107                 }
7108                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7109
7110                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7111                                                     num_bytes, empty_size,
7112                                                     &max_extent_size);
7113                 /*
7114                  * If we didn't find a chunk, and we haven't failed on this
7115                  * block group before, and this block group is in the middle of
7116                  * caching and we are ok with waiting, then go ahead and wait
7117                  * for progress to be made, and set failed_alloc to true.
7118                  *
7119                  * If failed_alloc is true then we've already waited on this
7120                  * block group once and should move on to the next block group.
7121                  */
7122                 if (!offset && !failed_alloc && !cached &&
7123                     loop > LOOP_CACHING_NOWAIT) {
7124                         wait_block_group_cache_progress(block_group,
7125                                                 num_bytes + empty_size);
7126                         failed_alloc = true;
7127                         goto have_block_group;
7128                 } else if (!offset) {
7129                         if (!cached)
7130                                 have_caching_bg = true;
7131                         goto loop;
7132                 }
7133 checks:
7134                 search_start = ALIGN(offset, root->stripesize);
7135
7136                 /* move on to the next group */
7137                 if (search_start + num_bytes >
7138                     block_group->key.objectid + block_group->key.offset) {
7139                         btrfs_add_free_space(block_group, offset, num_bytes);
7140                         goto loop;
7141                 }
7142
7143                 if (offset < search_start)
7144                         btrfs_add_free_space(block_group, offset,
7145                                              search_start - offset);
7146                 BUG_ON(offset > search_start);
7147
7148                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7149                                                   alloc_type, delalloc);
7150                 if (ret == -EAGAIN) {
7151                         btrfs_add_free_space(block_group, offset, num_bytes);
7152                         goto loop;
7153                 }
7154
7155                 /* we are all good, lets return */
7156                 ins->objectid = search_start;
7157                 ins->offset = num_bytes;
7158
7159                 trace_btrfs_reserve_extent(orig_root, block_group,
7160                                            search_start, num_bytes);
7161                 btrfs_release_block_group(block_group, delalloc);
7162                 break;
7163 loop:
7164                 failed_cluster_refill = false;
7165                 failed_alloc = false;
7166                 BUG_ON(index != get_block_group_index(block_group));
7167                 btrfs_release_block_group(block_group, delalloc);
7168         }
7169         up_read(&space_info->groups_sem);
7170
7171         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7172                 goto search;
7173
7174         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7175                 goto search;
7176
7177         /*
7178          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7179          *                      caching kthreads as we move along
7180          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7181          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7182          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7183          *                      again
7184          */
7185         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7186                 index = 0;
7187                 loop++;
7188                 if (loop == LOOP_ALLOC_CHUNK) {
7189                         struct btrfs_trans_handle *trans;
7190                         int exist = 0;
7191
7192                         trans = current->journal_info;
7193                         if (trans)
7194                                 exist = 1;
7195                         else
7196                                 trans = btrfs_join_transaction(root);
7197
7198                         if (IS_ERR(trans)) {
7199                                 ret = PTR_ERR(trans);
7200                                 goto out;
7201                         }
7202
7203                         ret = do_chunk_alloc(trans, root, flags,
7204                                              CHUNK_ALLOC_FORCE);
7205                         /*
7206                          * Do not bail out on ENOSPC since we
7207                          * can do more things.
7208                          */
7209                         if (ret < 0 && ret != -ENOSPC)
7210                                 btrfs_abort_transaction(trans,
7211                                                         root, ret);
7212                         else
7213                                 ret = 0;
7214                         if (!exist)
7215                                 btrfs_end_transaction(trans, root);
7216                         if (ret)
7217                                 goto out;
7218                 }
7219
7220                 if (loop == LOOP_NO_EMPTY_SIZE) {
7221                         empty_size = 0;
7222                         empty_cluster = 0;
7223                 }
7224
7225                 goto search;
7226         } else if (!ins->objectid) {
7227                 ret = -ENOSPC;
7228         } else if (ins->objectid) {
7229                 ret = 0;
7230         }
7231 out:
7232         if (ret == -ENOSPC)
7233                 ins->offset = max_extent_size;
7234         return ret;
7235 }
7236
7237 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7238                             int dump_block_groups)
7239 {
7240         struct btrfs_block_group_cache *cache;
7241         int index = 0;
7242
7243         spin_lock(&info->lock);
7244         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7245                info->flags,
7246                info->total_bytes - info->bytes_used - info->bytes_pinned -
7247                info->bytes_reserved - info->bytes_readonly,
7248                (info->full) ? "" : "not ");
7249         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7250                "reserved=%llu, may_use=%llu, readonly=%llu\n",
7251                info->total_bytes, info->bytes_used, info->bytes_pinned,
7252                info->bytes_reserved, info->bytes_may_use,
7253                info->bytes_readonly);
7254         spin_unlock(&info->lock);
7255
7256         if (!dump_block_groups)
7257                 return;
7258
7259         down_read(&info->groups_sem);
7260 again:
7261         list_for_each_entry(cache, &info->block_groups[index], list) {
7262                 spin_lock(&cache->lock);
7263                 printk(KERN_INFO "BTRFS: "
7264                            "block group %llu has %llu bytes, "
7265                            "%llu used %llu pinned %llu reserved %s\n",
7266                        cache->key.objectid, cache->key.offset,
7267                        btrfs_block_group_used(&cache->item), cache->pinned,
7268                        cache->reserved, cache->ro ? "[readonly]" : "");
7269                 btrfs_dump_free_space(cache, bytes);
7270                 spin_unlock(&cache->lock);
7271         }
7272         if (++index < BTRFS_NR_RAID_TYPES)
7273                 goto again;
7274         up_read(&info->groups_sem);
7275 }
7276
7277 int btrfs_reserve_extent(struct btrfs_root *root,
7278                          u64 num_bytes, u64 min_alloc_size,
7279                          u64 empty_size, u64 hint_byte,
7280                          struct btrfs_key *ins, int is_data, int delalloc)
7281 {
7282         bool final_tried = false;
7283         u64 flags;
7284         int ret;
7285
7286         flags = btrfs_get_alloc_profile(root, is_data);
7287 again:
7288         WARN_ON(num_bytes < root->sectorsize);
7289         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7290                                flags, delalloc);
7291
7292         if (ret == -ENOSPC) {
7293                 if (!final_tried && ins->offset) {
7294                         num_bytes = min(num_bytes >> 1, ins->offset);
7295                         num_bytes = round_down(num_bytes, root->sectorsize);
7296                         num_bytes = max(num_bytes, min_alloc_size);
7297                         if (num_bytes == min_alloc_size)
7298                                 final_tried = true;
7299                         goto again;
7300                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7301                         struct btrfs_space_info *sinfo;
7302
7303                         sinfo = __find_space_info(root->fs_info, flags);
7304                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7305                                 flags, num_bytes);
7306                         if (sinfo)
7307                                 dump_space_info(sinfo, num_bytes, 1);
7308                 }
7309         }
7310
7311         return ret;
7312 }
7313
7314 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7315                                         u64 start, u64 len,
7316                                         int pin, int delalloc)
7317 {
7318         struct btrfs_block_group_cache *cache;
7319         int ret = 0;
7320
7321         cache = btrfs_lookup_block_group(root->fs_info, start);
7322         if (!cache) {
7323                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7324                         start);
7325                 return -ENOSPC;
7326         }
7327
7328         if (pin)
7329                 pin_down_extent(root, cache, start, len, 1);
7330         else {
7331                 if (btrfs_test_opt(root, DISCARD))
7332                         ret = btrfs_discard_extent(root, start, len, NULL);
7333                 btrfs_add_free_space(cache, start, len);
7334                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7335         }
7336
7337         btrfs_put_block_group(cache);
7338
7339         trace_btrfs_reserved_extent_free(root, start, len);
7340
7341         return ret;
7342 }
7343
7344 int btrfs_free_reserved_extent(struct btrfs_root *root,
7345                                u64 start, u64 len, int delalloc)
7346 {
7347         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7348 }
7349
7350 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7351                                        u64 start, u64 len)
7352 {
7353         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7354 }
7355
7356 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7357                                       struct btrfs_root *root,
7358                                       u64 parent, u64 root_objectid,
7359                                       u64 flags, u64 owner, u64 offset,
7360                                       struct btrfs_key *ins, int ref_mod)
7361 {
7362         int ret;
7363         struct btrfs_fs_info *fs_info = root->fs_info;
7364         struct btrfs_extent_item *extent_item;
7365         struct btrfs_extent_inline_ref *iref;
7366         struct btrfs_path *path;
7367         struct extent_buffer *leaf;
7368         int type;
7369         u32 size;
7370
7371         if (parent > 0)
7372                 type = BTRFS_SHARED_DATA_REF_KEY;
7373         else
7374                 type = BTRFS_EXTENT_DATA_REF_KEY;
7375
7376         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7377
7378         path = btrfs_alloc_path();
7379         if (!path)
7380                 return -ENOMEM;
7381
7382         path->leave_spinning = 1;
7383         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7384                                       ins, size);
7385         if (ret) {
7386                 btrfs_free_path(path);
7387                 return ret;
7388         }
7389
7390         leaf = path->nodes[0];
7391         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7392                                      struct btrfs_extent_item);
7393         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7394         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7395         btrfs_set_extent_flags(leaf, extent_item,
7396                                flags | BTRFS_EXTENT_FLAG_DATA);
7397
7398         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7399         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7400         if (parent > 0) {
7401                 struct btrfs_shared_data_ref *ref;
7402                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7403                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7404                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7405         } else {
7406                 struct btrfs_extent_data_ref *ref;
7407                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7408                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7409                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7410                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7411                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7412         }
7413
7414         btrfs_mark_buffer_dirty(path->nodes[0]);
7415         btrfs_free_path(path);
7416
7417         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7418         if (ret) { /* -ENOENT, logic error */
7419                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7420                         ins->objectid, ins->offset);
7421                 BUG();
7422         }
7423         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7424         return ret;
7425 }
7426
7427 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7428                                      struct btrfs_root *root,
7429                                      u64 parent, u64 root_objectid,
7430                                      u64 flags, struct btrfs_disk_key *key,
7431                                      int level, struct btrfs_key *ins,
7432                                      int no_quota)
7433 {
7434         int ret;
7435         struct btrfs_fs_info *fs_info = root->fs_info;
7436         struct btrfs_extent_item *extent_item;
7437         struct btrfs_tree_block_info *block_info;
7438         struct btrfs_extent_inline_ref *iref;
7439         struct btrfs_path *path;
7440         struct extent_buffer *leaf;
7441         u32 size = sizeof(*extent_item) + sizeof(*iref);
7442         u64 num_bytes = ins->offset;
7443         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7444                                                  SKINNY_METADATA);
7445
7446         if (!skinny_metadata)
7447                 size += sizeof(*block_info);
7448
7449         path = btrfs_alloc_path();
7450         if (!path) {
7451                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7452                                                    root->nodesize);
7453                 return -ENOMEM;
7454         }
7455
7456         path->leave_spinning = 1;
7457         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7458                                       ins, size);
7459         if (ret) {
7460                 btrfs_free_path(path);
7461                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7462                                                    root->nodesize);
7463                 return ret;
7464         }
7465
7466         leaf = path->nodes[0];
7467         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7468                                      struct btrfs_extent_item);
7469         btrfs_set_extent_refs(leaf, extent_item, 1);
7470         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7471         btrfs_set_extent_flags(leaf, extent_item,
7472                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7473
7474         if (skinny_metadata) {
7475                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7476                 num_bytes = root->nodesize;
7477         } else {
7478                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7479                 btrfs_set_tree_block_key(leaf, block_info, key);
7480                 btrfs_set_tree_block_level(leaf, block_info, level);
7481                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7482         }
7483
7484         if (parent > 0) {
7485                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7486                 btrfs_set_extent_inline_ref_type(leaf, iref,
7487                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7488                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7489         } else {
7490                 btrfs_set_extent_inline_ref_type(leaf, iref,
7491                                                  BTRFS_TREE_BLOCK_REF_KEY);
7492                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7493         }
7494
7495         btrfs_mark_buffer_dirty(leaf);
7496         btrfs_free_path(path);
7497
7498         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7499                                  1);
7500         if (ret) { /* -ENOENT, logic error */
7501                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7502                         ins->objectid, ins->offset);
7503                 BUG();
7504         }
7505
7506         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7507         return ret;
7508 }
7509
7510 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7511                                      struct btrfs_root *root,
7512                                      u64 root_objectid, u64 owner,
7513                                      u64 offset, struct btrfs_key *ins)
7514 {
7515         int ret;
7516
7517         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7518
7519         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7520                                          ins->offset, 0,
7521                                          root_objectid, owner, offset,
7522                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7523         return ret;
7524 }
7525
7526 /*
7527  * this is used by the tree logging recovery code.  It records that
7528  * an extent has been allocated and makes sure to clear the free
7529  * space cache bits as well
7530  */
7531 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7532                                    struct btrfs_root *root,
7533                                    u64 root_objectid, u64 owner, u64 offset,
7534                                    struct btrfs_key *ins)
7535 {
7536         int ret;
7537         struct btrfs_block_group_cache *block_group;
7538
7539         /*
7540          * Mixed block groups will exclude before processing the log so we only
7541          * need to do the exlude dance if this fs isn't mixed.
7542          */
7543         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7544                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7545                 if (ret)
7546                         return ret;
7547         }
7548
7549         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7550         if (!block_group)
7551                 return -EINVAL;
7552
7553         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7554                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7555         BUG_ON(ret); /* logic error */
7556         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7557                                          0, owner, offset, ins, 1);
7558         btrfs_put_block_group(block_group);
7559         return ret;
7560 }
7561
7562 static struct extent_buffer *
7563 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7564                       u64 bytenr, int level)
7565 {
7566         struct extent_buffer *buf;
7567
7568         buf = btrfs_find_create_tree_block(root, bytenr);
7569         if (!buf)
7570                 return ERR_PTR(-ENOMEM);
7571         btrfs_set_header_generation(buf, trans->transid);
7572         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7573         btrfs_tree_lock(buf);
7574         clean_tree_block(trans, root->fs_info, buf);
7575         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7576
7577         btrfs_set_lock_blocking(buf);
7578         btrfs_set_buffer_uptodate(buf);
7579
7580         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7581                 buf->log_index = root->log_transid % 2;
7582                 /*
7583                  * we allow two log transactions at a time, use different
7584                  * EXENT bit to differentiate dirty pages.
7585                  */
7586                 if (buf->log_index == 0)
7587                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7588                                         buf->start + buf->len - 1, GFP_NOFS);
7589                 else
7590                         set_extent_new(&root->dirty_log_pages, buf->start,
7591                                         buf->start + buf->len - 1, GFP_NOFS);
7592         } else {
7593                 buf->log_index = -1;
7594                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7595                          buf->start + buf->len - 1, GFP_NOFS);
7596         }
7597         trans->blocks_used++;
7598         /* this returns a buffer locked for blocking */
7599         return buf;
7600 }
7601
7602 static struct btrfs_block_rsv *
7603 use_block_rsv(struct btrfs_trans_handle *trans,
7604               struct btrfs_root *root, u32 blocksize)
7605 {
7606         struct btrfs_block_rsv *block_rsv;
7607         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7608         int ret;
7609         bool global_updated = false;
7610
7611         block_rsv = get_block_rsv(trans, root);
7612
7613         if (unlikely(block_rsv->size == 0))
7614                 goto try_reserve;
7615 again:
7616         ret = block_rsv_use_bytes(block_rsv, blocksize);
7617         if (!ret)
7618                 return block_rsv;
7619
7620         if (block_rsv->failfast)
7621                 return ERR_PTR(ret);
7622
7623         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7624                 global_updated = true;
7625                 update_global_block_rsv(root->fs_info);
7626                 goto again;
7627         }
7628
7629         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7630                 static DEFINE_RATELIMIT_STATE(_rs,
7631                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7632                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7633                 if (__ratelimit(&_rs))
7634                         WARN(1, KERN_DEBUG
7635                                 "BTRFS: block rsv returned %d\n", ret);
7636         }
7637 try_reserve:
7638         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7639                                      BTRFS_RESERVE_NO_FLUSH);
7640         if (!ret)
7641                 return block_rsv;
7642         /*
7643          * If we couldn't reserve metadata bytes try and use some from
7644          * the global reserve if its space type is the same as the global
7645          * reservation.
7646          */
7647         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7648             block_rsv->space_info == global_rsv->space_info) {
7649                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7650                 if (!ret)
7651                         return global_rsv;
7652         }
7653         return ERR_PTR(ret);
7654 }
7655
7656 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7657                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7658 {
7659         block_rsv_add_bytes(block_rsv, blocksize, 0);
7660         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7661 }
7662
7663 /*
7664  * finds a free extent and does all the dirty work required for allocation
7665  * returns the tree buffer or an ERR_PTR on error.
7666  */
7667 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7668                                         struct btrfs_root *root,
7669                                         u64 parent, u64 root_objectid,
7670                                         struct btrfs_disk_key *key, int level,
7671                                         u64 hint, u64 empty_size)
7672 {
7673         struct btrfs_key ins;
7674         struct btrfs_block_rsv *block_rsv;
7675         struct extent_buffer *buf;
7676         struct btrfs_delayed_extent_op *extent_op;
7677         u64 flags = 0;
7678         int ret;
7679         u32 blocksize = root->nodesize;
7680         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7681                                                  SKINNY_METADATA);
7682
7683         if (btrfs_test_is_dummy_root(root)) {
7684                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7685                                             level);
7686                 if (!IS_ERR(buf))
7687                         root->alloc_bytenr += blocksize;
7688                 return buf;
7689         }
7690
7691         block_rsv = use_block_rsv(trans, root, blocksize);
7692         if (IS_ERR(block_rsv))
7693                 return ERR_CAST(block_rsv);
7694
7695         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7696                                    empty_size, hint, &ins, 0, 0);
7697         if (ret)
7698                 goto out_unuse;
7699
7700         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7701         if (IS_ERR(buf)) {
7702                 ret = PTR_ERR(buf);
7703                 goto out_free_reserved;
7704         }
7705
7706         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7707                 if (parent == 0)
7708                         parent = ins.objectid;
7709                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7710         } else
7711                 BUG_ON(parent > 0);
7712
7713         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7714                 extent_op = btrfs_alloc_delayed_extent_op();
7715                 if (!extent_op) {
7716                         ret = -ENOMEM;
7717                         goto out_free_buf;
7718                 }
7719                 if (key)
7720                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7721                 else
7722                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7723                 extent_op->flags_to_set = flags;
7724                 if (skinny_metadata)
7725                         extent_op->update_key = 0;
7726                 else
7727                         extent_op->update_key = 1;
7728                 extent_op->update_flags = 1;
7729                 extent_op->is_data = 0;
7730                 extent_op->level = level;
7731
7732                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7733                                                  ins.objectid, ins.offset,
7734                                                  parent, root_objectid, level,
7735                                                  BTRFS_ADD_DELAYED_EXTENT,
7736                                                  extent_op, 0);
7737                 if (ret)
7738                         goto out_free_delayed;
7739         }
7740         return buf;
7741
7742 out_free_delayed:
7743         btrfs_free_delayed_extent_op(extent_op);
7744 out_free_buf:
7745         free_extent_buffer(buf);
7746 out_free_reserved:
7747         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
7748 out_unuse:
7749         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7750         return ERR_PTR(ret);
7751 }
7752
7753 struct walk_control {
7754         u64 refs[BTRFS_MAX_LEVEL];
7755         u64 flags[BTRFS_MAX_LEVEL];
7756         struct btrfs_key update_progress;
7757         int stage;
7758         int level;
7759         int shared_level;
7760         int update_ref;
7761         int keep_locks;
7762         int reada_slot;
7763         int reada_count;
7764         int for_reloc;
7765 };
7766
7767 #define DROP_REFERENCE  1
7768 #define UPDATE_BACKREF  2
7769
7770 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7771                                      struct btrfs_root *root,
7772                                      struct walk_control *wc,
7773                                      struct btrfs_path *path)
7774 {
7775         u64 bytenr;
7776         u64 generation;
7777         u64 refs;
7778         u64 flags;
7779         u32 nritems;
7780         u32 blocksize;
7781         struct btrfs_key key;
7782         struct extent_buffer *eb;
7783         int ret;
7784         int slot;
7785         int nread = 0;
7786
7787         if (path->slots[wc->level] < wc->reada_slot) {
7788                 wc->reada_count = wc->reada_count * 2 / 3;
7789                 wc->reada_count = max(wc->reada_count, 2);
7790         } else {
7791                 wc->reada_count = wc->reada_count * 3 / 2;
7792                 wc->reada_count = min_t(int, wc->reada_count,
7793                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7794         }
7795
7796         eb = path->nodes[wc->level];
7797         nritems = btrfs_header_nritems(eb);
7798         blocksize = root->nodesize;
7799
7800         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7801                 if (nread >= wc->reada_count)
7802                         break;
7803
7804                 cond_resched();
7805                 bytenr = btrfs_node_blockptr(eb, slot);
7806                 generation = btrfs_node_ptr_generation(eb, slot);
7807
7808                 if (slot == path->slots[wc->level])
7809                         goto reada;
7810
7811                 if (wc->stage == UPDATE_BACKREF &&
7812                     generation <= root->root_key.offset)
7813                         continue;
7814
7815                 /* We don't lock the tree block, it's OK to be racy here */
7816                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7817                                                wc->level - 1, 1, &refs,
7818                                                &flags);
7819                 /* We don't care about errors in readahead. */
7820                 if (ret < 0)
7821                         continue;
7822                 BUG_ON(refs == 0);
7823
7824                 if (wc->stage == DROP_REFERENCE) {
7825                         if (refs == 1)
7826                                 goto reada;
7827
7828                         if (wc->level == 1 &&
7829                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7830                                 continue;
7831                         if (!wc->update_ref ||
7832                             generation <= root->root_key.offset)
7833                                 continue;
7834                         btrfs_node_key_to_cpu(eb, &key, slot);
7835                         ret = btrfs_comp_cpu_keys(&key,
7836                                                   &wc->update_progress);
7837                         if (ret < 0)
7838                                 continue;
7839                 } else {
7840                         if (wc->level == 1 &&
7841                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7842                                 continue;
7843                 }
7844 reada:
7845                 readahead_tree_block(root, bytenr);
7846                 nread++;
7847         }
7848         wc->reada_slot = slot;
7849 }
7850
7851 /*
7852  * TODO: Modify related function to add related node/leaf to dirty_extent_root,
7853  * for later qgroup accounting.
7854  *
7855  * Current, this function does nothing.
7856  */
7857 static int account_leaf_items(struct btrfs_trans_handle *trans,
7858                               struct btrfs_root *root,
7859                               struct extent_buffer *eb)
7860 {
7861         int nr = btrfs_header_nritems(eb);
7862         int i, extent_type;
7863         struct btrfs_key key;
7864         struct btrfs_file_extent_item *fi;
7865         u64 bytenr, num_bytes;
7866
7867         for (i = 0; i < nr; i++) {
7868                 btrfs_item_key_to_cpu(eb, &key, i);
7869
7870                 if (key.type != BTRFS_EXTENT_DATA_KEY)
7871                         continue;
7872
7873                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7874                 /* filter out non qgroup-accountable extents  */
7875                 extent_type = btrfs_file_extent_type(eb, fi);
7876
7877                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7878                         continue;
7879
7880                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7881                 if (!bytenr)
7882                         continue;
7883
7884                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7885         }
7886         return 0;
7887 }
7888
7889 /*
7890  * Walk up the tree from the bottom, freeing leaves and any interior
7891  * nodes which have had all slots visited. If a node (leaf or
7892  * interior) is freed, the node above it will have it's slot
7893  * incremented. The root node will never be freed.
7894  *
7895  * At the end of this function, we should have a path which has all
7896  * slots incremented to the next position for a search. If we need to
7897  * read a new node it will be NULL and the node above it will have the
7898  * correct slot selected for a later read.
7899  *
7900  * If we increment the root nodes slot counter past the number of
7901  * elements, 1 is returned to signal completion of the search.
7902  */
7903 static int adjust_slots_upwards(struct btrfs_root *root,
7904                                 struct btrfs_path *path, int root_level)
7905 {
7906         int level = 0;
7907         int nr, slot;
7908         struct extent_buffer *eb;
7909
7910         if (root_level == 0)
7911                 return 1;
7912
7913         while (level <= root_level) {
7914                 eb = path->nodes[level];
7915                 nr = btrfs_header_nritems(eb);
7916                 path->slots[level]++;
7917                 slot = path->slots[level];
7918                 if (slot >= nr || level == 0) {
7919                         /*
7920                          * Don't free the root -  we will detect this
7921                          * condition after our loop and return a
7922                          * positive value for caller to stop walking the tree.
7923                          */
7924                         if (level != root_level) {
7925                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7926                                 path->locks[level] = 0;
7927
7928                                 free_extent_buffer(eb);
7929                                 path->nodes[level] = NULL;
7930                                 path->slots[level] = 0;
7931                         }
7932                 } else {
7933                         /*
7934                          * We have a valid slot to walk back down
7935                          * from. Stop here so caller can process these
7936                          * new nodes.
7937                          */
7938                         break;
7939                 }
7940
7941                 level++;
7942         }
7943
7944         eb = path->nodes[root_level];
7945         if (path->slots[root_level] >= btrfs_header_nritems(eb))
7946                 return 1;
7947
7948         return 0;
7949 }
7950
7951 /*
7952  * root_eb is the subtree root and is locked before this function is called.
7953  * TODO: Modify this function to mark all (including complete shared node)
7954  * to dirty_extent_root to allow it get accounted in qgroup.
7955  */
7956 static int account_shared_subtree(struct btrfs_trans_handle *trans,
7957                                   struct btrfs_root *root,
7958                                   struct extent_buffer *root_eb,
7959                                   u64 root_gen,
7960                                   int root_level)
7961 {
7962         int ret = 0;
7963         int level;
7964         struct extent_buffer *eb = root_eb;
7965         struct btrfs_path *path = NULL;
7966
7967         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
7968         BUG_ON(root_eb == NULL);
7969
7970         if (!root->fs_info->quota_enabled)
7971                 return 0;
7972
7973         if (!extent_buffer_uptodate(root_eb)) {
7974                 ret = btrfs_read_buffer(root_eb, root_gen);
7975                 if (ret)
7976                         goto out;
7977         }
7978
7979         if (root_level == 0) {
7980                 ret = account_leaf_items(trans, root, root_eb);
7981                 goto out;
7982         }
7983
7984         path = btrfs_alloc_path();
7985         if (!path)
7986                 return -ENOMEM;
7987
7988         /*
7989          * Walk down the tree.  Missing extent blocks are filled in as
7990          * we go. Metadata is accounted every time we read a new
7991          * extent block.
7992          *
7993          * When we reach a leaf, we account for file extent items in it,
7994          * walk back up the tree (adjusting slot pointers as we go)
7995          * and restart the search process.
7996          */
7997         extent_buffer_get(root_eb); /* For path */
7998         path->nodes[root_level] = root_eb;
7999         path->slots[root_level] = 0;
8000         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8001 walk_down:
8002         level = root_level;
8003         while (level >= 0) {
8004                 if (path->nodes[level] == NULL) {
8005                         int parent_slot;
8006                         u64 child_gen;
8007                         u64 child_bytenr;
8008
8009                         /* We need to get child blockptr/gen from
8010                          * parent before we can read it. */
8011                         eb = path->nodes[level + 1];
8012                         parent_slot = path->slots[level + 1];
8013                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8014                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8015
8016                         eb = read_tree_block(root, child_bytenr, child_gen);
8017                         if (IS_ERR(eb)) {
8018                                 ret = PTR_ERR(eb);
8019                                 goto out;
8020                         } else if (!extent_buffer_uptodate(eb)) {
8021                                 free_extent_buffer(eb);
8022                                 ret = -EIO;
8023                                 goto out;
8024                         }
8025
8026                         path->nodes[level] = eb;
8027                         path->slots[level] = 0;
8028
8029                         btrfs_tree_read_lock(eb);
8030                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8031                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8032                 }
8033
8034                 if (level == 0) {
8035                         ret = account_leaf_items(trans, root, path->nodes[level]);
8036                         if (ret)
8037                                 goto out;
8038
8039                         /* Nonzero return here means we completed our search */
8040                         ret = adjust_slots_upwards(root, path, root_level);
8041                         if (ret)
8042                                 break;
8043
8044                         /* Restart search with new slots */
8045                         goto walk_down;
8046                 }
8047
8048                 level--;
8049         }
8050
8051         ret = 0;
8052 out:
8053         btrfs_free_path(path);
8054
8055         return ret;
8056 }
8057
8058 /*
8059  * helper to process tree block while walking down the tree.
8060  *
8061  * when wc->stage == UPDATE_BACKREF, this function updates
8062  * back refs for pointers in the block.
8063  *
8064  * NOTE: return value 1 means we should stop walking down.
8065  */
8066 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8067                                    struct btrfs_root *root,
8068                                    struct btrfs_path *path,
8069                                    struct walk_control *wc, int lookup_info)
8070 {
8071         int level = wc->level;
8072         struct extent_buffer *eb = path->nodes[level];
8073         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8074         int ret;
8075
8076         if (wc->stage == UPDATE_BACKREF &&
8077             btrfs_header_owner(eb) != root->root_key.objectid)
8078                 return 1;
8079
8080         /*
8081          * when reference count of tree block is 1, it won't increase
8082          * again. once full backref flag is set, we never clear it.
8083          */
8084         if (lookup_info &&
8085             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8086              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8087                 BUG_ON(!path->locks[level]);
8088                 ret = btrfs_lookup_extent_info(trans, root,
8089                                                eb->start, level, 1,
8090                                                &wc->refs[level],
8091                                                &wc->flags[level]);
8092                 BUG_ON(ret == -ENOMEM);
8093                 if (ret)
8094                         return ret;
8095                 BUG_ON(wc->refs[level] == 0);
8096         }
8097
8098         if (wc->stage == DROP_REFERENCE) {
8099                 if (wc->refs[level] > 1)
8100                         return 1;
8101
8102                 if (path->locks[level] && !wc->keep_locks) {
8103                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8104                         path->locks[level] = 0;
8105                 }
8106                 return 0;
8107         }
8108
8109         /* wc->stage == UPDATE_BACKREF */
8110         if (!(wc->flags[level] & flag)) {
8111                 BUG_ON(!path->locks[level]);
8112                 ret = btrfs_inc_ref(trans, root, eb, 1);
8113                 BUG_ON(ret); /* -ENOMEM */
8114                 ret = btrfs_dec_ref(trans, root, eb, 0);
8115                 BUG_ON(ret); /* -ENOMEM */
8116                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8117                                                   eb->len, flag,
8118                                                   btrfs_header_level(eb), 0);
8119                 BUG_ON(ret); /* -ENOMEM */
8120                 wc->flags[level] |= flag;
8121         }
8122
8123         /*
8124          * the block is shared by multiple trees, so it's not good to
8125          * keep the tree lock
8126          */
8127         if (path->locks[level] && level > 0) {
8128                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8129                 path->locks[level] = 0;
8130         }
8131         return 0;
8132 }
8133
8134 /*
8135  * helper to process tree block pointer.
8136  *
8137  * when wc->stage == DROP_REFERENCE, this function checks
8138  * reference count of the block pointed to. if the block
8139  * is shared and we need update back refs for the subtree
8140  * rooted at the block, this function changes wc->stage to
8141  * UPDATE_BACKREF. if the block is shared and there is no
8142  * need to update back, this function drops the reference
8143  * to the block.
8144  *
8145  * NOTE: return value 1 means we should stop walking down.
8146  */
8147 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8148                                  struct btrfs_root *root,
8149                                  struct btrfs_path *path,
8150                                  struct walk_control *wc, int *lookup_info)
8151 {
8152         u64 bytenr;
8153         u64 generation;
8154         u64 parent;
8155         u32 blocksize;
8156         struct btrfs_key key;
8157         struct extent_buffer *next;
8158         int level = wc->level;
8159         int reada = 0;
8160         int ret = 0;
8161         bool need_account = false;
8162
8163         generation = btrfs_node_ptr_generation(path->nodes[level],
8164                                                path->slots[level]);
8165         /*
8166          * if the lower level block was created before the snapshot
8167          * was created, we know there is no need to update back refs
8168          * for the subtree
8169          */
8170         if (wc->stage == UPDATE_BACKREF &&
8171             generation <= root->root_key.offset) {
8172                 *lookup_info = 1;
8173                 return 1;
8174         }
8175
8176         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8177         blocksize = root->nodesize;
8178
8179         next = btrfs_find_tree_block(root->fs_info, bytenr);
8180         if (!next) {
8181                 next = btrfs_find_create_tree_block(root, bytenr);
8182                 if (!next)
8183                         return -ENOMEM;
8184                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8185                                                level - 1);
8186                 reada = 1;
8187         }
8188         btrfs_tree_lock(next);
8189         btrfs_set_lock_blocking(next);
8190
8191         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8192                                        &wc->refs[level - 1],
8193                                        &wc->flags[level - 1]);
8194         if (ret < 0) {
8195                 btrfs_tree_unlock(next);
8196                 return ret;
8197         }
8198
8199         if (unlikely(wc->refs[level - 1] == 0)) {
8200                 btrfs_err(root->fs_info, "Missing references.");
8201                 BUG();
8202         }
8203         *lookup_info = 0;
8204
8205         if (wc->stage == DROP_REFERENCE) {
8206                 if (wc->refs[level - 1] > 1) {
8207                         need_account = true;
8208                         if (level == 1 &&
8209                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8210                                 goto skip;
8211
8212                         if (!wc->update_ref ||
8213                             generation <= root->root_key.offset)
8214                                 goto skip;
8215
8216                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8217                                               path->slots[level]);
8218                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8219                         if (ret < 0)
8220                                 goto skip;
8221
8222                         wc->stage = UPDATE_BACKREF;
8223                         wc->shared_level = level - 1;
8224                 }
8225         } else {
8226                 if (level == 1 &&
8227                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8228                         goto skip;
8229         }
8230
8231         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8232                 btrfs_tree_unlock(next);
8233                 free_extent_buffer(next);
8234                 next = NULL;
8235                 *lookup_info = 1;
8236         }
8237
8238         if (!next) {
8239                 if (reada && level == 1)
8240                         reada_walk_down(trans, root, wc, path);
8241                 next = read_tree_block(root, bytenr, generation);
8242                 if (IS_ERR(next)) {
8243                         return PTR_ERR(next);
8244                 } else if (!extent_buffer_uptodate(next)) {
8245                         free_extent_buffer(next);
8246                         return -EIO;
8247                 }
8248                 btrfs_tree_lock(next);
8249                 btrfs_set_lock_blocking(next);
8250         }
8251
8252         level--;
8253         BUG_ON(level != btrfs_header_level(next));
8254         path->nodes[level] = next;
8255         path->slots[level] = 0;
8256         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8257         wc->level = level;
8258         if (wc->level == 1)
8259                 wc->reada_slot = 0;
8260         return 0;
8261 skip:
8262         wc->refs[level - 1] = 0;
8263         wc->flags[level - 1] = 0;
8264         if (wc->stage == DROP_REFERENCE) {
8265                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8266                         parent = path->nodes[level]->start;
8267                 } else {
8268                         BUG_ON(root->root_key.objectid !=
8269                                btrfs_header_owner(path->nodes[level]));
8270                         parent = 0;
8271                 }
8272
8273                 if (need_account) {
8274                         ret = account_shared_subtree(trans, root, next,
8275                                                      generation, level - 1);
8276                         if (ret) {
8277                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8278                                         "%d accounting shared subtree. Quota "
8279                                         "is out of sync, rescan required.\n",
8280                                         root->fs_info->sb->s_id, ret);
8281                         }
8282                 }
8283                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8284                                 root->root_key.objectid, level - 1, 0, 0);
8285                 BUG_ON(ret); /* -ENOMEM */
8286         }
8287         btrfs_tree_unlock(next);
8288         free_extent_buffer(next);
8289         *lookup_info = 1;
8290         return 1;
8291 }
8292
8293 /*
8294  * helper to process tree block while walking up the tree.
8295  *
8296  * when wc->stage == DROP_REFERENCE, this function drops
8297  * reference count on the block.
8298  *
8299  * when wc->stage == UPDATE_BACKREF, this function changes
8300  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8301  * to UPDATE_BACKREF previously while processing the block.
8302  *
8303  * NOTE: return value 1 means we should stop walking up.
8304  */
8305 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8306                                  struct btrfs_root *root,
8307                                  struct btrfs_path *path,
8308                                  struct walk_control *wc)
8309 {
8310         int ret;
8311         int level = wc->level;
8312         struct extent_buffer *eb = path->nodes[level];
8313         u64 parent = 0;
8314
8315         if (wc->stage == UPDATE_BACKREF) {
8316                 BUG_ON(wc->shared_level < level);
8317                 if (level < wc->shared_level)
8318                         goto out;
8319
8320                 ret = find_next_key(path, level + 1, &wc->update_progress);
8321                 if (ret > 0)
8322                         wc->update_ref = 0;
8323
8324                 wc->stage = DROP_REFERENCE;
8325                 wc->shared_level = -1;
8326                 path->slots[level] = 0;
8327
8328                 /*
8329                  * check reference count again if the block isn't locked.
8330                  * we should start walking down the tree again if reference
8331                  * count is one.
8332                  */
8333                 if (!path->locks[level]) {
8334                         BUG_ON(level == 0);
8335                         btrfs_tree_lock(eb);
8336                         btrfs_set_lock_blocking(eb);
8337                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8338
8339                         ret = btrfs_lookup_extent_info(trans, root,
8340                                                        eb->start, level, 1,
8341                                                        &wc->refs[level],
8342                                                        &wc->flags[level]);
8343                         if (ret < 0) {
8344                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8345                                 path->locks[level] = 0;
8346                                 return ret;
8347                         }
8348                         BUG_ON(wc->refs[level] == 0);
8349                         if (wc->refs[level] == 1) {
8350                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8351                                 path->locks[level] = 0;
8352                                 return 1;
8353                         }
8354                 }
8355         }
8356
8357         /* wc->stage == DROP_REFERENCE */
8358         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8359
8360         if (wc->refs[level] == 1) {
8361                 if (level == 0) {
8362                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8363                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8364                         else
8365                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8366                         BUG_ON(ret); /* -ENOMEM */
8367                         ret = account_leaf_items(trans, root, eb);
8368                         if (ret) {
8369                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8370                                         "%d accounting leaf items. Quota "
8371                                         "is out of sync, rescan required.\n",
8372                                         root->fs_info->sb->s_id, ret);
8373                         }
8374                 }
8375                 /* make block locked assertion in clean_tree_block happy */
8376                 if (!path->locks[level] &&
8377                     btrfs_header_generation(eb) == trans->transid) {
8378                         btrfs_tree_lock(eb);
8379                         btrfs_set_lock_blocking(eb);
8380                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8381                 }
8382                 clean_tree_block(trans, root->fs_info, eb);
8383         }
8384
8385         if (eb == root->node) {
8386                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8387                         parent = eb->start;
8388                 else
8389                         BUG_ON(root->root_key.objectid !=
8390                                btrfs_header_owner(eb));
8391         } else {
8392                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8393                         parent = path->nodes[level + 1]->start;
8394                 else
8395                         BUG_ON(root->root_key.objectid !=
8396                                btrfs_header_owner(path->nodes[level + 1]));
8397         }
8398
8399         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8400 out:
8401         wc->refs[level] = 0;
8402         wc->flags[level] = 0;
8403         return 0;
8404 }
8405
8406 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8407                                    struct btrfs_root *root,
8408                                    struct btrfs_path *path,
8409                                    struct walk_control *wc)
8410 {
8411         int level = wc->level;
8412         int lookup_info = 1;
8413         int ret;
8414
8415         while (level >= 0) {
8416                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8417                 if (ret > 0)
8418                         break;
8419
8420                 if (level == 0)
8421                         break;
8422
8423                 if (path->slots[level] >=
8424                     btrfs_header_nritems(path->nodes[level]))
8425                         break;
8426
8427                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8428                 if (ret > 0) {
8429                         path->slots[level]++;
8430                         continue;
8431                 } else if (ret < 0)
8432                         return ret;
8433                 level = wc->level;
8434         }
8435         return 0;
8436 }
8437
8438 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8439                                  struct btrfs_root *root,
8440                                  struct btrfs_path *path,
8441                                  struct walk_control *wc, int max_level)
8442 {
8443         int level = wc->level;
8444         int ret;
8445
8446         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8447         while (level < max_level && path->nodes[level]) {
8448                 wc->level = level;
8449                 if (path->slots[level] + 1 <
8450                     btrfs_header_nritems(path->nodes[level])) {
8451                         path->slots[level]++;
8452                         return 0;
8453                 } else {
8454                         ret = walk_up_proc(trans, root, path, wc);
8455                         if (ret > 0)
8456                                 return 0;
8457
8458                         if (path->locks[level]) {
8459                                 btrfs_tree_unlock_rw(path->nodes[level],
8460                                                      path->locks[level]);
8461                                 path->locks[level] = 0;
8462                         }
8463                         free_extent_buffer(path->nodes[level]);
8464                         path->nodes[level] = NULL;
8465                         level++;
8466                 }
8467         }
8468         return 1;
8469 }
8470
8471 /*
8472  * drop a subvolume tree.
8473  *
8474  * this function traverses the tree freeing any blocks that only
8475  * referenced by the tree.
8476  *
8477  * when a shared tree block is found. this function decreases its
8478  * reference count by one. if update_ref is true, this function
8479  * also make sure backrefs for the shared block and all lower level
8480  * blocks are properly updated.
8481  *
8482  * If called with for_reloc == 0, may exit early with -EAGAIN
8483  */
8484 int btrfs_drop_snapshot(struct btrfs_root *root,
8485                          struct btrfs_block_rsv *block_rsv, int update_ref,
8486                          int for_reloc)
8487 {
8488         struct btrfs_path *path;
8489         struct btrfs_trans_handle *trans;
8490         struct btrfs_root *tree_root = root->fs_info->tree_root;
8491         struct btrfs_root_item *root_item = &root->root_item;
8492         struct walk_control *wc;
8493         struct btrfs_key key;
8494         int err = 0;
8495         int ret;
8496         int level;
8497         bool root_dropped = false;
8498
8499         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8500
8501         path = btrfs_alloc_path();
8502         if (!path) {
8503                 err = -ENOMEM;
8504                 goto out;
8505         }
8506
8507         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8508         if (!wc) {
8509                 btrfs_free_path(path);
8510                 err = -ENOMEM;
8511                 goto out;
8512         }
8513
8514         trans = btrfs_start_transaction(tree_root, 0);
8515         if (IS_ERR(trans)) {
8516                 err = PTR_ERR(trans);
8517                 goto out_free;
8518         }
8519
8520         if (block_rsv)
8521                 trans->block_rsv = block_rsv;
8522
8523         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8524                 level = btrfs_header_level(root->node);
8525                 path->nodes[level] = btrfs_lock_root_node(root);
8526                 btrfs_set_lock_blocking(path->nodes[level]);
8527                 path->slots[level] = 0;
8528                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8529                 memset(&wc->update_progress, 0,
8530                        sizeof(wc->update_progress));
8531         } else {
8532                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8533                 memcpy(&wc->update_progress, &key,
8534                        sizeof(wc->update_progress));
8535
8536                 level = root_item->drop_level;
8537                 BUG_ON(level == 0);
8538                 path->lowest_level = level;
8539                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8540                 path->lowest_level = 0;
8541                 if (ret < 0) {
8542                         err = ret;
8543                         goto out_end_trans;
8544                 }
8545                 WARN_ON(ret > 0);
8546
8547                 /*
8548                  * unlock our path, this is safe because only this
8549                  * function is allowed to delete this snapshot
8550                  */
8551                 btrfs_unlock_up_safe(path, 0);
8552
8553                 level = btrfs_header_level(root->node);
8554                 while (1) {
8555                         btrfs_tree_lock(path->nodes[level]);
8556                         btrfs_set_lock_blocking(path->nodes[level]);
8557                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8558
8559                         ret = btrfs_lookup_extent_info(trans, root,
8560                                                 path->nodes[level]->start,
8561                                                 level, 1, &wc->refs[level],
8562                                                 &wc->flags[level]);
8563                         if (ret < 0) {
8564                                 err = ret;
8565                                 goto out_end_trans;
8566                         }
8567                         BUG_ON(wc->refs[level] == 0);
8568
8569                         if (level == root_item->drop_level)
8570                                 break;
8571
8572                         btrfs_tree_unlock(path->nodes[level]);
8573                         path->locks[level] = 0;
8574                         WARN_ON(wc->refs[level] != 1);
8575                         level--;
8576                 }
8577         }
8578
8579         wc->level = level;
8580         wc->shared_level = -1;
8581         wc->stage = DROP_REFERENCE;
8582         wc->update_ref = update_ref;
8583         wc->keep_locks = 0;
8584         wc->for_reloc = for_reloc;
8585         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8586
8587         while (1) {
8588
8589                 ret = walk_down_tree(trans, root, path, wc);
8590                 if (ret < 0) {
8591                         err = ret;
8592                         break;
8593                 }
8594
8595                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8596                 if (ret < 0) {
8597                         err = ret;
8598                         break;
8599                 }
8600
8601                 if (ret > 0) {
8602                         BUG_ON(wc->stage != DROP_REFERENCE);
8603                         break;
8604                 }
8605
8606                 if (wc->stage == DROP_REFERENCE) {
8607                         level = wc->level;
8608                         btrfs_node_key(path->nodes[level],
8609                                        &root_item->drop_progress,
8610                                        path->slots[level]);
8611                         root_item->drop_level = level;
8612                 }
8613
8614                 BUG_ON(wc->level == 0);
8615                 if (btrfs_should_end_transaction(trans, tree_root) ||
8616                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8617                         ret = btrfs_update_root(trans, tree_root,
8618                                                 &root->root_key,
8619                                                 root_item);
8620                         if (ret) {
8621                                 btrfs_abort_transaction(trans, tree_root, ret);
8622                                 err = ret;
8623                                 goto out_end_trans;
8624                         }
8625
8626                         btrfs_end_transaction_throttle(trans, tree_root);
8627                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8628                                 pr_debug("BTRFS: drop snapshot early exit\n");
8629                                 err = -EAGAIN;
8630                                 goto out_free;
8631                         }
8632
8633                         trans = btrfs_start_transaction(tree_root, 0);
8634                         if (IS_ERR(trans)) {
8635                                 err = PTR_ERR(trans);
8636                                 goto out_free;
8637                         }
8638                         if (block_rsv)
8639                                 trans->block_rsv = block_rsv;
8640                 }
8641         }
8642         btrfs_release_path(path);
8643         if (err)
8644                 goto out_end_trans;
8645
8646         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8647         if (ret) {
8648                 btrfs_abort_transaction(trans, tree_root, ret);
8649                 goto out_end_trans;
8650         }
8651
8652         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8653                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8654                                       NULL, NULL);
8655                 if (ret < 0) {
8656                         btrfs_abort_transaction(trans, tree_root, ret);
8657                         err = ret;
8658                         goto out_end_trans;
8659                 } else if (ret > 0) {
8660                         /* if we fail to delete the orphan item this time
8661                          * around, it'll get picked up the next time.
8662                          *
8663                          * The most common failure here is just -ENOENT.
8664                          */
8665                         btrfs_del_orphan_item(trans, tree_root,
8666                                               root->root_key.objectid);
8667                 }
8668         }
8669
8670         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8671                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
8672         } else {
8673                 free_extent_buffer(root->node);
8674                 free_extent_buffer(root->commit_root);
8675                 btrfs_put_fs_root(root);
8676         }
8677         root_dropped = true;
8678 out_end_trans:
8679         btrfs_end_transaction_throttle(trans, tree_root);
8680 out_free:
8681         kfree(wc);
8682         btrfs_free_path(path);
8683 out:
8684         /*
8685          * So if we need to stop dropping the snapshot for whatever reason we
8686          * need to make sure to add it back to the dead root list so that we
8687          * keep trying to do the work later.  This also cleans up roots if we
8688          * don't have it in the radix (like when we recover after a power fail
8689          * or unmount) so we don't leak memory.
8690          */
8691         if (!for_reloc && root_dropped == false)
8692                 btrfs_add_dead_root(root);
8693         if (err && err != -EAGAIN)
8694                 btrfs_std_error(root->fs_info, err);
8695         return err;
8696 }
8697
8698 /*
8699  * drop subtree rooted at tree block 'node'.
8700  *
8701  * NOTE: this function will unlock and release tree block 'node'
8702  * only used by relocation code
8703  */
8704 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8705                         struct btrfs_root *root,
8706                         struct extent_buffer *node,
8707                         struct extent_buffer *parent)
8708 {
8709         struct btrfs_path *path;
8710         struct walk_control *wc;
8711         int level;
8712         int parent_level;
8713         int ret = 0;
8714         int wret;
8715
8716         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8717
8718         path = btrfs_alloc_path();
8719         if (!path)
8720                 return -ENOMEM;
8721
8722         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8723         if (!wc) {
8724                 btrfs_free_path(path);
8725                 return -ENOMEM;
8726         }
8727
8728         btrfs_assert_tree_locked(parent);
8729         parent_level = btrfs_header_level(parent);
8730         extent_buffer_get(parent);
8731         path->nodes[parent_level] = parent;
8732         path->slots[parent_level] = btrfs_header_nritems(parent);
8733
8734         btrfs_assert_tree_locked(node);
8735         level = btrfs_header_level(node);
8736         path->nodes[level] = node;
8737         path->slots[level] = 0;
8738         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8739
8740         wc->refs[parent_level] = 1;
8741         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8742         wc->level = level;
8743         wc->shared_level = -1;
8744         wc->stage = DROP_REFERENCE;
8745         wc->update_ref = 0;
8746         wc->keep_locks = 1;
8747         wc->for_reloc = 1;
8748         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8749
8750         while (1) {
8751                 wret = walk_down_tree(trans, root, path, wc);
8752                 if (wret < 0) {
8753                         ret = wret;
8754                         break;
8755                 }
8756
8757                 wret = walk_up_tree(trans, root, path, wc, parent_level);
8758                 if (wret < 0)
8759                         ret = wret;
8760                 if (wret != 0)
8761                         break;
8762         }
8763
8764         kfree(wc);
8765         btrfs_free_path(path);
8766         return ret;
8767 }
8768
8769 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8770 {
8771         u64 num_devices;
8772         u64 stripped;
8773
8774         /*
8775          * if restripe for this chunk_type is on pick target profile and
8776          * return, otherwise do the usual balance
8777          */
8778         stripped = get_restripe_target(root->fs_info, flags);
8779         if (stripped)
8780                 return extended_to_chunk(stripped);
8781
8782         num_devices = root->fs_info->fs_devices->rw_devices;
8783
8784         stripped = BTRFS_BLOCK_GROUP_RAID0 |
8785                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8786                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8787
8788         if (num_devices == 1) {
8789                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8790                 stripped = flags & ~stripped;
8791
8792                 /* turn raid0 into single device chunks */
8793                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
8794                         return stripped;
8795
8796                 /* turn mirroring into duplication */
8797                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8798                              BTRFS_BLOCK_GROUP_RAID10))
8799                         return stripped | BTRFS_BLOCK_GROUP_DUP;
8800         } else {
8801                 /* they already had raid on here, just return */
8802                 if (flags & stripped)
8803                         return flags;
8804
8805                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8806                 stripped = flags & ~stripped;
8807
8808                 /* switch duplicated blocks with raid1 */
8809                 if (flags & BTRFS_BLOCK_GROUP_DUP)
8810                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
8811
8812                 /* this is drive concat, leave it alone */
8813         }
8814
8815         return flags;
8816 }
8817
8818 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8819 {
8820         struct btrfs_space_info *sinfo = cache->space_info;
8821         u64 num_bytes;
8822         u64 min_allocable_bytes;
8823         int ret = -ENOSPC;
8824
8825         /*
8826          * We need some metadata space and system metadata space for
8827          * allocating chunks in some corner cases until we force to set
8828          * it to be readonly.
8829          */
8830         if ((sinfo->flags &
8831              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8832             !force)
8833                 min_allocable_bytes = 1 * 1024 * 1024;
8834         else
8835                 min_allocable_bytes = 0;
8836
8837         spin_lock(&sinfo->lock);
8838         spin_lock(&cache->lock);
8839
8840         if (cache->ro) {
8841                 cache->ro++;
8842                 ret = 0;
8843                 goto out;
8844         }
8845
8846         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8847                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8848
8849         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8850             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8851             min_allocable_bytes <= sinfo->total_bytes) {
8852                 sinfo->bytes_readonly += num_bytes;
8853                 cache->ro++;
8854                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8855                 ret = 0;
8856         }
8857 out:
8858         spin_unlock(&cache->lock);
8859         spin_unlock(&sinfo->lock);
8860         return ret;
8861 }
8862
8863 int btrfs_inc_block_group_ro(struct btrfs_root *root,
8864                              struct btrfs_block_group_cache *cache)
8865
8866 {
8867         struct btrfs_trans_handle *trans;
8868         u64 alloc_flags;
8869         int ret;
8870
8871 again:
8872         trans = btrfs_join_transaction(root);
8873         if (IS_ERR(trans))
8874                 return PTR_ERR(trans);
8875
8876         /*
8877          * we're not allowed to set block groups readonly after the dirty
8878          * block groups cache has started writing.  If it already started,
8879          * back off and let this transaction commit
8880          */
8881         mutex_lock(&root->fs_info->ro_block_group_mutex);
8882         if (trans->transaction->dirty_bg_run) {
8883                 u64 transid = trans->transid;
8884
8885                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
8886                 btrfs_end_transaction(trans, root);
8887
8888                 ret = btrfs_wait_for_commit(root, transid);
8889                 if (ret)
8890                         return ret;
8891                 goto again;
8892         }
8893
8894         /*
8895          * if we are changing raid levels, try to allocate a corresponding
8896          * block group with the new raid level.
8897          */
8898         alloc_flags = update_block_group_flags(root, cache->flags);
8899         if (alloc_flags != cache->flags) {
8900                 ret = do_chunk_alloc(trans, root, alloc_flags,
8901                                      CHUNK_ALLOC_FORCE);
8902                 /*
8903                  * ENOSPC is allowed here, we may have enough space
8904                  * already allocated at the new raid level to
8905                  * carry on
8906                  */
8907                 if (ret == -ENOSPC)
8908                         ret = 0;
8909                 if (ret < 0)
8910                         goto out;
8911         }
8912
8913         ret = inc_block_group_ro(cache, 0);
8914         if (!ret)
8915                 goto out;
8916         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8917         ret = do_chunk_alloc(trans, root, alloc_flags,
8918                              CHUNK_ALLOC_FORCE);
8919         if (ret < 0)
8920                 goto out;
8921         ret = inc_block_group_ro(cache, 0);
8922 out:
8923         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
8924                 alloc_flags = update_block_group_flags(root, cache->flags);
8925                 lock_chunks(root->fs_info->chunk_root);
8926                 check_system_chunk(trans, root, alloc_flags);
8927                 unlock_chunks(root->fs_info->chunk_root);
8928         }
8929         mutex_unlock(&root->fs_info->ro_block_group_mutex);
8930
8931         btrfs_end_transaction(trans, root);
8932         return ret;
8933 }
8934
8935 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8936                             struct btrfs_root *root, u64 type)
8937 {
8938         u64 alloc_flags = get_alloc_profile(root, type);
8939         return do_chunk_alloc(trans, root, alloc_flags,
8940                               CHUNK_ALLOC_FORCE);
8941 }
8942
8943 /*
8944  * helper to account the unused space of all the readonly block group in the
8945  * space_info. takes mirrors into account.
8946  */
8947 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8948 {
8949         struct btrfs_block_group_cache *block_group;
8950         u64 free_bytes = 0;
8951         int factor;
8952
8953         /* It's df, we don't care if it's racey */
8954         if (list_empty(&sinfo->ro_bgs))
8955                 return 0;
8956
8957         spin_lock(&sinfo->lock);
8958         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
8959                 spin_lock(&block_group->lock);
8960
8961                 if (!block_group->ro) {
8962                         spin_unlock(&block_group->lock);
8963                         continue;
8964                 }
8965
8966                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8967                                           BTRFS_BLOCK_GROUP_RAID10 |
8968                                           BTRFS_BLOCK_GROUP_DUP))
8969                         factor = 2;
8970                 else
8971                         factor = 1;
8972
8973                 free_bytes += (block_group->key.offset -
8974                                btrfs_block_group_used(&block_group->item)) *
8975                                factor;
8976
8977                 spin_unlock(&block_group->lock);
8978         }
8979         spin_unlock(&sinfo->lock);
8980
8981         return free_bytes;
8982 }
8983
8984 void btrfs_dec_block_group_ro(struct btrfs_root *root,
8985                               struct btrfs_block_group_cache *cache)
8986 {
8987         struct btrfs_space_info *sinfo = cache->space_info;
8988         u64 num_bytes;
8989
8990         BUG_ON(!cache->ro);
8991
8992         spin_lock(&sinfo->lock);
8993         spin_lock(&cache->lock);
8994         if (!--cache->ro) {
8995                 num_bytes = cache->key.offset - cache->reserved -
8996                             cache->pinned - cache->bytes_super -
8997                             btrfs_block_group_used(&cache->item);
8998                 sinfo->bytes_readonly -= num_bytes;
8999                 list_del_init(&cache->ro_list);
9000         }
9001         spin_unlock(&cache->lock);
9002         spin_unlock(&sinfo->lock);
9003 }
9004
9005 /*
9006  * checks to see if its even possible to relocate this block group.
9007  *
9008  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9009  * ok to go ahead and try.
9010  */
9011 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9012 {
9013         struct btrfs_block_group_cache *block_group;
9014         struct btrfs_space_info *space_info;
9015         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9016         struct btrfs_device *device;
9017         struct btrfs_trans_handle *trans;
9018         u64 min_free;
9019         u64 dev_min = 1;
9020         u64 dev_nr = 0;
9021         u64 target;
9022         int index;
9023         int full = 0;
9024         int ret = 0;
9025
9026         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9027
9028         /* odd, couldn't find the block group, leave it alone */
9029         if (!block_group)
9030                 return -1;
9031
9032         min_free = btrfs_block_group_used(&block_group->item);
9033
9034         /* no bytes used, we're good */
9035         if (!min_free)
9036                 goto out;
9037
9038         space_info = block_group->space_info;
9039         spin_lock(&space_info->lock);
9040
9041         full = space_info->full;
9042
9043         /*
9044          * if this is the last block group we have in this space, we can't
9045          * relocate it unless we're able to allocate a new chunk below.
9046          *
9047          * Otherwise, we need to make sure we have room in the space to handle
9048          * all of the extents from this block group.  If we can, we're good
9049          */
9050         if ((space_info->total_bytes != block_group->key.offset) &&
9051             (space_info->bytes_used + space_info->bytes_reserved +
9052              space_info->bytes_pinned + space_info->bytes_readonly +
9053              min_free < space_info->total_bytes)) {
9054                 spin_unlock(&space_info->lock);
9055                 goto out;
9056         }
9057         spin_unlock(&space_info->lock);
9058
9059         /*
9060          * ok we don't have enough space, but maybe we have free space on our
9061          * devices to allocate new chunks for relocation, so loop through our
9062          * alloc devices and guess if we have enough space.  if this block
9063          * group is going to be restriped, run checks against the target
9064          * profile instead of the current one.
9065          */
9066         ret = -1;
9067
9068         /*
9069          * index:
9070          *      0: raid10
9071          *      1: raid1
9072          *      2: dup
9073          *      3: raid0
9074          *      4: single
9075          */
9076         target = get_restripe_target(root->fs_info, block_group->flags);
9077         if (target) {
9078                 index = __get_raid_index(extended_to_chunk(target));
9079         } else {
9080                 /*
9081                  * this is just a balance, so if we were marked as full
9082                  * we know there is no space for a new chunk
9083                  */
9084                 if (full)
9085                         goto out;
9086
9087                 index = get_block_group_index(block_group);
9088         }
9089
9090         if (index == BTRFS_RAID_RAID10) {
9091                 dev_min = 4;
9092                 /* Divide by 2 */
9093                 min_free >>= 1;
9094         } else if (index == BTRFS_RAID_RAID1) {
9095                 dev_min = 2;
9096         } else if (index == BTRFS_RAID_DUP) {
9097                 /* Multiply by 2 */
9098                 min_free <<= 1;
9099         } else if (index == BTRFS_RAID_RAID0) {
9100                 dev_min = fs_devices->rw_devices;
9101                 min_free = div64_u64(min_free, dev_min);
9102         }
9103
9104         /* We need to do this so that we can look at pending chunks */
9105         trans = btrfs_join_transaction(root);
9106         if (IS_ERR(trans)) {
9107                 ret = PTR_ERR(trans);
9108                 goto out;
9109         }
9110
9111         mutex_lock(&root->fs_info->chunk_mutex);
9112         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9113                 u64 dev_offset;
9114
9115                 /*
9116                  * check to make sure we can actually find a chunk with enough
9117                  * space to fit our block group in.
9118                  */
9119                 if (device->total_bytes > device->bytes_used + min_free &&
9120                     !device->is_tgtdev_for_dev_replace) {
9121                         ret = find_free_dev_extent(trans, device, min_free,
9122                                                    &dev_offset, NULL);
9123                         if (!ret)
9124                                 dev_nr++;
9125
9126                         if (dev_nr >= dev_min)
9127                                 break;
9128
9129                         ret = -1;
9130                 }
9131         }
9132         mutex_unlock(&root->fs_info->chunk_mutex);
9133         btrfs_end_transaction(trans, root);
9134 out:
9135         btrfs_put_block_group(block_group);
9136         return ret;
9137 }
9138
9139 static int find_first_block_group(struct btrfs_root *root,
9140                 struct btrfs_path *path, struct btrfs_key *key)
9141 {
9142         int ret = 0;
9143         struct btrfs_key found_key;
9144         struct extent_buffer *leaf;
9145         int slot;
9146
9147         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9148         if (ret < 0)
9149                 goto out;
9150
9151         while (1) {
9152                 slot = path->slots[0];
9153                 leaf = path->nodes[0];
9154                 if (slot >= btrfs_header_nritems(leaf)) {
9155                         ret = btrfs_next_leaf(root, path);
9156                         if (ret == 0)
9157                                 continue;
9158                         if (ret < 0)
9159                                 goto out;
9160                         break;
9161                 }
9162                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9163
9164                 if (found_key.objectid >= key->objectid &&
9165                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9166                         ret = 0;
9167                         goto out;
9168                 }
9169                 path->slots[0]++;
9170         }
9171 out:
9172         return ret;
9173 }
9174
9175 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9176 {
9177         struct btrfs_block_group_cache *block_group;
9178         u64 last = 0;
9179
9180         while (1) {
9181                 struct inode *inode;
9182
9183                 block_group = btrfs_lookup_first_block_group(info, last);
9184                 while (block_group) {
9185                         spin_lock(&block_group->lock);
9186                         if (block_group->iref)
9187                                 break;
9188                         spin_unlock(&block_group->lock);
9189                         block_group = next_block_group(info->tree_root,
9190                                                        block_group);
9191                 }
9192                 if (!block_group) {
9193                         if (last == 0)
9194                                 break;
9195                         last = 0;
9196                         continue;
9197                 }
9198
9199                 inode = block_group->inode;
9200                 block_group->iref = 0;
9201                 block_group->inode = NULL;
9202                 spin_unlock(&block_group->lock);
9203                 iput(inode);
9204                 last = block_group->key.objectid + block_group->key.offset;
9205                 btrfs_put_block_group(block_group);
9206         }
9207 }
9208
9209 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9210 {
9211         struct btrfs_block_group_cache *block_group;
9212         struct btrfs_space_info *space_info;
9213         struct btrfs_caching_control *caching_ctl;
9214         struct rb_node *n;
9215
9216         down_write(&info->commit_root_sem);
9217         while (!list_empty(&info->caching_block_groups)) {
9218                 caching_ctl = list_entry(info->caching_block_groups.next,
9219                                          struct btrfs_caching_control, list);
9220                 list_del(&caching_ctl->list);
9221                 put_caching_control(caching_ctl);
9222         }
9223         up_write(&info->commit_root_sem);
9224
9225         spin_lock(&info->unused_bgs_lock);
9226         while (!list_empty(&info->unused_bgs)) {
9227                 block_group = list_first_entry(&info->unused_bgs,
9228                                                struct btrfs_block_group_cache,
9229                                                bg_list);
9230                 list_del_init(&block_group->bg_list);
9231                 btrfs_put_block_group(block_group);
9232         }
9233         spin_unlock(&info->unused_bgs_lock);
9234
9235         spin_lock(&info->block_group_cache_lock);
9236         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9237                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9238                                        cache_node);
9239                 rb_erase(&block_group->cache_node,
9240                          &info->block_group_cache_tree);
9241                 RB_CLEAR_NODE(&block_group->cache_node);
9242                 spin_unlock(&info->block_group_cache_lock);
9243
9244                 down_write(&block_group->space_info->groups_sem);
9245                 list_del(&block_group->list);
9246                 up_write(&block_group->space_info->groups_sem);
9247
9248                 if (block_group->cached == BTRFS_CACHE_STARTED)
9249                         wait_block_group_cache_done(block_group);
9250
9251                 /*
9252                  * We haven't cached this block group, which means we could
9253                  * possibly have excluded extents on this block group.
9254                  */
9255                 if (block_group->cached == BTRFS_CACHE_NO ||
9256                     block_group->cached == BTRFS_CACHE_ERROR)
9257                         free_excluded_extents(info->extent_root, block_group);
9258
9259                 btrfs_remove_free_space_cache(block_group);
9260                 btrfs_put_block_group(block_group);
9261
9262                 spin_lock(&info->block_group_cache_lock);
9263         }
9264         spin_unlock(&info->block_group_cache_lock);
9265
9266         /* now that all the block groups are freed, go through and
9267          * free all the space_info structs.  This is only called during
9268          * the final stages of unmount, and so we know nobody is
9269          * using them.  We call synchronize_rcu() once before we start,
9270          * just to be on the safe side.
9271          */
9272         synchronize_rcu();
9273
9274         release_global_block_rsv(info);
9275
9276         while (!list_empty(&info->space_info)) {
9277                 int i;
9278
9279                 space_info = list_entry(info->space_info.next,
9280                                         struct btrfs_space_info,
9281                                         list);
9282                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9283                         if (WARN_ON(space_info->bytes_pinned > 0 ||
9284                             space_info->bytes_reserved > 0 ||
9285                             space_info->bytes_may_use > 0)) {
9286                                 dump_space_info(space_info, 0, 0);
9287                         }
9288                 }
9289                 list_del(&space_info->list);
9290                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9291                         struct kobject *kobj;
9292                         kobj = space_info->block_group_kobjs[i];
9293                         space_info->block_group_kobjs[i] = NULL;
9294                         if (kobj) {
9295                                 kobject_del(kobj);
9296                                 kobject_put(kobj);
9297                         }
9298                 }
9299                 kobject_del(&space_info->kobj);
9300                 kobject_put(&space_info->kobj);
9301         }
9302         return 0;
9303 }
9304
9305 static void __link_block_group(struct btrfs_space_info *space_info,
9306                                struct btrfs_block_group_cache *cache)
9307 {
9308         int index = get_block_group_index(cache);
9309         bool first = false;
9310
9311         down_write(&space_info->groups_sem);
9312         if (list_empty(&space_info->block_groups[index]))
9313                 first = true;
9314         list_add_tail(&cache->list, &space_info->block_groups[index]);
9315         up_write(&space_info->groups_sem);
9316
9317         if (first) {
9318                 struct raid_kobject *rkobj;
9319                 int ret;
9320
9321                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9322                 if (!rkobj)
9323                         goto out_err;
9324                 rkobj->raid_type = index;
9325                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9326                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9327                                   "%s", get_raid_name(index));
9328                 if (ret) {
9329                         kobject_put(&rkobj->kobj);
9330                         goto out_err;
9331                 }
9332                 space_info->block_group_kobjs[index] = &rkobj->kobj;
9333         }
9334
9335         return;
9336 out_err:
9337         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9338 }
9339
9340 static struct btrfs_block_group_cache *
9341 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9342 {
9343         struct btrfs_block_group_cache *cache;
9344
9345         cache = kzalloc(sizeof(*cache), GFP_NOFS);
9346         if (!cache)
9347                 return NULL;
9348
9349         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9350                                         GFP_NOFS);
9351         if (!cache->free_space_ctl) {
9352                 kfree(cache);
9353                 return NULL;
9354         }
9355
9356         cache->key.objectid = start;
9357         cache->key.offset = size;
9358         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9359
9360         cache->sectorsize = root->sectorsize;
9361         cache->fs_info = root->fs_info;
9362         cache->full_stripe_len = btrfs_full_stripe_len(root,
9363                                                &root->fs_info->mapping_tree,
9364                                                start);
9365         atomic_set(&cache->count, 1);
9366         spin_lock_init(&cache->lock);
9367         init_rwsem(&cache->data_rwsem);
9368         INIT_LIST_HEAD(&cache->list);
9369         INIT_LIST_HEAD(&cache->cluster_list);
9370         INIT_LIST_HEAD(&cache->bg_list);
9371         INIT_LIST_HEAD(&cache->ro_list);
9372         INIT_LIST_HEAD(&cache->dirty_list);
9373         INIT_LIST_HEAD(&cache->io_list);
9374         btrfs_init_free_space_ctl(cache);
9375         atomic_set(&cache->trimming, 0);
9376
9377         return cache;
9378 }
9379
9380 int btrfs_read_block_groups(struct btrfs_root *root)
9381 {
9382         struct btrfs_path *path;
9383         int ret;
9384         struct btrfs_block_group_cache *cache;
9385         struct btrfs_fs_info *info = root->fs_info;
9386         struct btrfs_space_info *space_info;
9387         struct btrfs_key key;
9388         struct btrfs_key found_key;
9389         struct extent_buffer *leaf;
9390         int need_clear = 0;
9391         u64 cache_gen;
9392
9393         root = info->extent_root;
9394         key.objectid = 0;
9395         key.offset = 0;
9396         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9397         path = btrfs_alloc_path();
9398         if (!path)
9399                 return -ENOMEM;
9400         path->reada = 1;
9401
9402         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9403         if (btrfs_test_opt(root, SPACE_CACHE) &&
9404             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9405                 need_clear = 1;
9406         if (btrfs_test_opt(root, CLEAR_CACHE))
9407                 need_clear = 1;
9408
9409         while (1) {
9410                 ret = find_first_block_group(root, path, &key);
9411                 if (ret > 0)
9412                         break;
9413                 if (ret != 0)
9414                         goto error;
9415
9416                 leaf = path->nodes[0];
9417                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9418
9419                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9420                                                        found_key.offset);
9421                 if (!cache) {
9422                         ret = -ENOMEM;
9423                         goto error;
9424                 }
9425
9426                 if (need_clear) {
9427                         /*
9428                          * When we mount with old space cache, we need to
9429                          * set BTRFS_DC_CLEAR and set dirty flag.
9430                          *
9431                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9432                          *    truncate the old free space cache inode and
9433                          *    setup a new one.
9434                          * b) Setting 'dirty flag' makes sure that we flush
9435                          *    the new space cache info onto disk.
9436                          */
9437                         if (btrfs_test_opt(root, SPACE_CACHE))
9438                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
9439                 }
9440
9441                 read_extent_buffer(leaf, &cache->item,
9442                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9443                                    sizeof(cache->item));
9444                 cache->flags = btrfs_block_group_flags(&cache->item);
9445
9446                 key.objectid = found_key.objectid + found_key.offset;
9447                 btrfs_release_path(path);
9448
9449                 /*
9450                  * We need to exclude the super stripes now so that the space
9451                  * info has super bytes accounted for, otherwise we'll think
9452                  * we have more space than we actually do.
9453                  */
9454                 ret = exclude_super_stripes(root, cache);
9455                 if (ret) {
9456                         /*
9457                          * We may have excluded something, so call this just in
9458                          * case.
9459                          */
9460                         free_excluded_extents(root, cache);
9461                         btrfs_put_block_group(cache);
9462                         goto error;
9463                 }
9464
9465                 /*
9466                  * check for two cases, either we are full, and therefore
9467                  * don't need to bother with the caching work since we won't
9468                  * find any space, or we are empty, and we can just add all
9469                  * the space in and be done with it.  This saves us _alot_ of
9470                  * time, particularly in the full case.
9471                  */
9472                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9473                         cache->last_byte_to_unpin = (u64)-1;
9474                         cache->cached = BTRFS_CACHE_FINISHED;
9475                         free_excluded_extents(root, cache);
9476                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9477                         cache->last_byte_to_unpin = (u64)-1;
9478                         cache->cached = BTRFS_CACHE_FINISHED;
9479                         add_new_free_space(cache, root->fs_info,
9480                                            found_key.objectid,
9481                                            found_key.objectid +
9482                                            found_key.offset);
9483                         free_excluded_extents(root, cache);
9484                 }
9485
9486                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9487                 if (ret) {
9488                         btrfs_remove_free_space_cache(cache);
9489                         btrfs_put_block_group(cache);
9490                         goto error;
9491                 }
9492
9493                 ret = update_space_info(info, cache->flags, found_key.offset,
9494                                         btrfs_block_group_used(&cache->item),
9495                                         &space_info);
9496                 if (ret) {
9497                         btrfs_remove_free_space_cache(cache);
9498                         spin_lock(&info->block_group_cache_lock);
9499                         rb_erase(&cache->cache_node,
9500                                  &info->block_group_cache_tree);
9501                         RB_CLEAR_NODE(&cache->cache_node);
9502                         spin_unlock(&info->block_group_cache_lock);
9503                         btrfs_put_block_group(cache);
9504                         goto error;
9505                 }
9506
9507                 cache->space_info = space_info;
9508                 spin_lock(&cache->space_info->lock);
9509                 cache->space_info->bytes_readonly += cache->bytes_super;
9510                 spin_unlock(&cache->space_info->lock);
9511
9512                 __link_block_group(space_info, cache);
9513
9514                 set_avail_alloc_bits(root->fs_info, cache->flags);
9515                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9516                         inc_block_group_ro(cache, 1);
9517                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9518                         spin_lock(&info->unused_bgs_lock);
9519                         /* Should always be true but just in case. */
9520                         if (list_empty(&cache->bg_list)) {
9521                                 btrfs_get_block_group(cache);
9522                                 list_add_tail(&cache->bg_list,
9523                                               &info->unused_bgs);
9524                         }
9525                         spin_unlock(&info->unused_bgs_lock);
9526                 }
9527         }
9528
9529         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9530                 if (!(get_alloc_profile(root, space_info->flags) &
9531                       (BTRFS_BLOCK_GROUP_RAID10 |
9532                        BTRFS_BLOCK_GROUP_RAID1 |
9533                        BTRFS_BLOCK_GROUP_RAID5 |
9534                        BTRFS_BLOCK_GROUP_RAID6 |
9535                        BTRFS_BLOCK_GROUP_DUP)))
9536                         continue;
9537                 /*
9538                  * avoid allocating from un-mirrored block group if there are
9539                  * mirrored block groups.
9540                  */
9541                 list_for_each_entry(cache,
9542                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9543                                 list)
9544                         inc_block_group_ro(cache, 1);
9545                 list_for_each_entry(cache,
9546                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9547                                 list)
9548                         inc_block_group_ro(cache, 1);
9549         }
9550
9551         init_global_block_rsv(info);
9552         ret = 0;
9553 error:
9554         btrfs_free_path(path);
9555         return ret;
9556 }
9557
9558 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9559                                        struct btrfs_root *root)
9560 {
9561         struct btrfs_block_group_cache *block_group, *tmp;
9562         struct btrfs_root *extent_root = root->fs_info->extent_root;
9563         struct btrfs_block_group_item item;
9564         struct btrfs_key key;
9565         int ret = 0;
9566
9567         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9568                 if (ret)
9569                         goto next;
9570
9571                 spin_lock(&block_group->lock);
9572                 memcpy(&item, &block_group->item, sizeof(item));
9573                 memcpy(&key, &block_group->key, sizeof(key));
9574                 spin_unlock(&block_group->lock);
9575
9576                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9577                                         sizeof(item));
9578                 if (ret)
9579                         btrfs_abort_transaction(trans, extent_root, ret);
9580                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9581                                                key.objectid, key.offset);
9582                 if (ret)
9583                         btrfs_abort_transaction(trans, extent_root, ret);
9584 next:
9585                 list_del_init(&block_group->bg_list);
9586         }
9587 }
9588
9589 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9590                            struct btrfs_root *root, u64 bytes_used,
9591                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9592                            u64 size)
9593 {
9594         int ret;
9595         struct btrfs_root *extent_root;
9596         struct btrfs_block_group_cache *cache;
9597
9598         extent_root = root->fs_info->extent_root;
9599
9600         btrfs_set_log_full_commit(root->fs_info, trans);
9601
9602         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9603         if (!cache)
9604                 return -ENOMEM;
9605
9606         btrfs_set_block_group_used(&cache->item, bytes_used);
9607         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9608         btrfs_set_block_group_flags(&cache->item, type);
9609
9610         cache->flags = type;
9611         cache->last_byte_to_unpin = (u64)-1;
9612         cache->cached = BTRFS_CACHE_FINISHED;
9613         ret = exclude_super_stripes(root, cache);
9614         if (ret) {
9615                 /*
9616                  * We may have excluded something, so call this just in
9617                  * case.
9618                  */
9619                 free_excluded_extents(root, cache);
9620                 btrfs_put_block_group(cache);
9621                 return ret;
9622         }
9623
9624         add_new_free_space(cache, root->fs_info, chunk_offset,
9625                            chunk_offset + size);
9626
9627         free_excluded_extents(root, cache);
9628
9629         /*
9630          * Call to ensure the corresponding space_info object is created and
9631          * assigned to our block group, but don't update its counters just yet.
9632          * We want our bg to be added to the rbtree with its ->space_info set.
9633          */
9634         ret = update_space_info(root->fs_info, cache->flags, 0, 0,
9635                                 &cache->space_info);
9636         if (ret) {
9637                 btrfs_remove_free_space_cache(cache);
9638                 btrfs_put_block_group(cache);
9639                 return ret;
9640         }
9641
9642         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9643         if (ret) {
9644                 btrfs_remove_free_space_cache(cache);
9645                 btrfs_put_block_group(cache);
9646                 return ret;
9647         }
9648
9649         /*
9650          * Now that our block group has its ->space_info set and is inserted in
9651          * the rbtree, update the space info's counters.
9652          */
9653         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9654                                 &cache->space_info);
9655         if (ret) {
9656                 btrfs_remove_free_space_cache(cache);
9657                 spin_lock(&root->fs_info->block_group_cache_lock);
9658                 rb_erase(&cache->cache_node,
9659                          &root->fs_info->block_group_cache_tree);
9660                 RB_CLEAR_NODE(&cache->cache_node);
9661                 spin_unlock(&root->fs_info->block_group_cache_lock);
9662                 btrfs_put_block_group(cache);
9663                 return ret;
9664         }
9665         update_global_block_rsv(root->fs_info);
9666
9667         spin_lock(&cache->space_info->lock);
9668         cache->space_info->bytes_readonly += cache->bytes_super;
9669         spin_unlock(&cache->space_info->lock);
9670
9671         __link_block_group(cache->space_info, cache);
9672
9673         list_add_tail(&cache->bg_list, &trans->new_bgs);
9674
9675         set_avail_alloc_bits(extent_root->fs_info, type);
9676
9677         return 0;
9678 }
9679
9680 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9681 {
9682         u64 extra_flags = chunk_to_extended(flags) &
9683                                 BTRFS_EXTENDED_PROFILE_MASK;
9684
9685         write_seqlock(&fs_info->profiles_lock);
9686         if (flags & BTRFS_BLOCK_GROUP_DATA)
9687                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9688         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9689                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9690         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9691                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9692         write_sequnlock(&fs_info->profiles_lock);
9693 }
9694
9695 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9696                              struct btrfs_root *root, u64 group_start,
9697                              struct extent_map *em)
9698 {
9699         struct btrfs_path *path;
9700         struct btrfs_block_group_cache *block_group;
9701         struct btrfs_free_cluster *cluster;
9702         struct btrfs_root *tree_root = root->fs_info->tree_root;
9703         struct btrfs_key key;
9704         struct inode *inode;
9705         struct kobject *kobj = NULL;
9706         int ret;
9707         int index;
9708         int factor;
9709         struct btrfs_caching_control *caching_ctl = NULL;
9710         bool remove_em;
9711
9712         root = root->fs_info->extent_root;
9713
9714         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9715         BUG_ON(!block_group);
9716         BUG_ON(!block_group->ro);
9717
9718         /*
9719          * Free the reserved super bytes from this block group before
9720          * remove it.
9721          */
9722         free_excluded_extents(root, block_group);
9723
9724         memcpy(&key, &block_group->key, sizeof(key));
9725         index = get_block_group_index(block_group);
9726         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9727                                   BTRFS_BLOCK_GROUP_RAID1 |
9728                                   BTRFS_BLOCK_GROUP_RAID10))
9729                 factor = 2;
9730         else
9731                 factor = 1;
9732
9733         /* make sure this block group isn't part of an allocation cluster */
9734         cluster = &root->fs_info->data_alloc_cluster;
9735         spin_lock(&cluster->refill_lock);
9736         btrfs_return_cluster_to_free_space(block_group, cluster);
9737         spin_unlock(&cluster->refill_lock);
9738
9739         /*
9740          * make sure this block group isn't part of a metadata
9741          * allocation cluster
9742          */
9743         cluster = &root->fs_info->meta_alloc_cluster;
9744         spin_lock(&cluster->refill_lock);
9745         btrfs_return_cluster_to_free_space(block_group, cluster);
9746         spin_unlock(&cluster->refill_lock);
9747
9748         path = btrfs_alloc_path();
9749         if (!path) {
9750                 ret = -ENOMEM;
9751                 goto out;
9752         }
9753
9754         /*
9755          * get the inode first so any iput calls done for the io_list
9756          * aren't the final iput (no unlinks allowed now)
9757          */
9758         inode = lookup_free_space_inode(tree_root, block_group, path);
9759
9760         mutex_lock(&trans->transaction->cache_write_mutex);
9761         /*
9762          * make sure our free spache cache IO is done before remove the
9763          * free space inode
9764          */
9765         spin_lock(&trans->transaction->dirty_bgs_lock);
9766         if (!list_empty(&block_group->io_list)) {
9767                 list_del_init(&block_group->io_list);
9768
9769                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
9770
9771                 spin_unlock(&trans->transaction->dirty_bgs_lock);
9772                 btrfs_wait_cache_io(root, trans, block_group,
9773                                     &block_group->io_ctl, path,
9774                                     block_group->key.objectid);
9775                 btrfs_put_block_group(block_group);
9776                 spin_lock(&trans->transaction->dirty_bgs_lock);
9777         }
9778
9779         if (!list_empty(&block_group->dirty_list)) {
9780                 list_del_init(&block_group->dirty_list);
9781                 btrfs_put_block_group(block_group);
9782         }
9783         spin_unlock(&trans->transaction->dirty_bgs_lock);
9784         mutex_unlock(&trans->transaction->cache_write_mutex);
9785
9786         if (!IS_ERR(inode)) {
9787                 ret = btrfs_orphan_add(trans, inode);
9788                 if (ret) {
9789                         btrfs_add_delayed_iput(inode);
9790                         goto out;
9791                 }
9792                 clear_nlink(inode);
9793                 /* One for the block groups ref */
9794                 spin_lock(&block_group->lock);
9795                 if (block_group->iref) {
9796                         block_group->iref = 0;
9797                         block_group->inode = NULL;
9798                         spin_unlock(&block_group->lock);
9799                         iput(inode);
9800                 } else {
9801                         spin_unlock(&block_group->lock);
9802                 }
9803                 /* One for our lookup ref */
9804                 btrfs_add_delayed_iput(inode);
9805         }
9806
9807         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9808         key.offset = block_group->key.objectid;
9809         key.type = 0;
9810
9811         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9812         if (ret < 0)
9813                 goto out;
9814         if (ret > 0)
9815                 btrfs_release_path(path);
9816         if (ret == 0) {
9817                 ret = btrfs_del_item(trans, tree_root, path);
9818                 if (ret)
9819                         goto out;
9820                 btrfs_release_path(path);
9821         }
9822
9823         spin_lock(&root->fs_info->block_group_cache_lock);
9824         rb_erase(&block_group->cache_node,
9825                  &root->fs_info->block_group_cache_tree);
9826         RB_CLEAR_NODE(&block_group->cache_node);
9827
9828         if (root->fs_info->first_logical_byte == block_group->key.objectid)
9829                 root->fs_info->first_logical_byte = (u64)-1;
9830         spin_unlock(&root->fs_info->block_group_cache_lock);
9831
9832         down_write(&block_group->space_info->groups_sem);
9833         /*
9834          * we must use list_del_init so people can check to see if they
9835          * are still on the list after taking the semaphore
9836          */
9837         list_del_init(&block_group->list);
9838         if (list_empty(&block_group->space_info->block_groups[index])) {
9839                 kobj = block_group->space_info->block_group_kobjs[index];
9840                 block_group->space_info->block_group_kobjs[index] = NULL;
9841                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
9842         }
9843         up_write(&block_group->space_info->groups_sem);
9844         if (kobj) {
9845                 kobject_del(kobj);
9846                 kobject_put(kobj);
9847         }
9848
9849         if (block_group->has_caching_ctl)
9850                 caching_ctl = get_caching_control(block_group);
9851         if (block_group->cached == BTRFS_CACHE_STARTED)
9852                 wait_block_group_cache_done(block_group);
9853         if (block_group->has_caching_ctl) {
9854                 down_write(&root->fs_info->commit_root_sem);
9855                 if (!caching_ctl) {
9856                         struct btrfs_caching_control *ctl;
9857
9858                         list_for_each_entry(ctl,
9859                                     &root->fs_info->caching_block_groups, list)
9860                                 if (ctl->block_group == block_group) {
9861                                         caching_ctl = ctl;
9862                                         atomic_inc(&caching_ctl->count);
9863                                         break;
9864                                 }
9865                 }
9866                 if (caching_ctl)
9867                         list_del_init(&caching_ctl->list);
9868                 up_write(&root->fs_info->commit_root_sem);
9869                 if (caching_ctl) {
9870                         /* Once for the caching bgs list and once for us. */
9871                         put_caching_control(caching_ctl);
9872                         put_caching_control(caching_ctl);
9873                 }
9874         }
9875
9876         spin_lock(&trans->transaction->dirty_bgs_lock);
9877         if (!list_empty(&block_group->dirty_list)) {
9878                 WARN_ON(1);
9879         }
9880         if (!list_empty(&block_group->io_list)) {
9881                 WARN_ON(1);
9882         }
9883         spin_unlock(&trans->transaction->dirty_bgs_lock);
9884         btrfs_remove_free_space_cache(block_group);
9885
9886         spin_lock(&block_group->space_info->lock);
9887         list_del_init(&block_group->ro_list);
9888
9889         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
9890                 WARN_ON(block_group->space_info->total_bytes
9891                         < block_group->key.offset);
9892                 WARN_ON(block_group->space_info->bytes_readonly
9893                         < block_group->key.offset);
9894                 WARN_ON(block_group->space_info->disk_total
9895                         < block_group->key.offset * factor);
9896         }
9897         block_group->space_info->total_bytes -= block_group->key.offset;
9898         block_group->space_info->bytes_readonly -= block_group->key.offset;
9899         block_group->space_info->disk_total -= block_group->key.offset * factor;
9900
9901         spin_unlock(&block_group->space_info->lock);
9902
9903         memcpy(&key, &block_group->key, sizeof(key));
9904
9905         lock_chunks(root);
9906         if (!list_empty(&em->list)) {
9907                 /* We're in the transaction->pending_chunks list. */
9908                 free_extent_map(em);
9909         }
9910         spin_lock(&block_group->lock);
9911         block_group->removed = 1;
9912         /*
9913          * At this point trimming can't start on this block group, because we
9914          * removed the block group from the tree fs_info->block_group_cache_tree
9915          * so no one can't find it anymore and even if someone already got this
9916          * block group before we removed it from the rbtree, they have already
9917          * incremented block_group->trimming - if they didn't, they won't find
9918          * any free space entries because we already removed them all when we
9919          * called btrfs_remove_free_space_cache().
9920          *
9921          * And we must not remove the extent map from the fs_info->mapping_tree
9922          * to prevent the same logical address range and physical device space
9923          * ranges from being reused for a new block group. This is because our
9924          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
9925          * completely transactionless, so while it is trimming a range the
9926          * currently running transaction might finish and a new one start,
9927          * allowing for new block groups to be created that can reuse the same
9928          * physical device locations unless we take this special care.
9929          *
9930          * There may also be an implicit trim operation if the file system
9931          * is mounted with -odiscard. The same protections must remain
9932          * in place until the extents have been discarded completely when
9933          * the transaction commit has completed.
9934          */
9935         remove_em = (atomic_read(&block_group->trimming) == 0);
9936         /*
9937          * Make sure a trimmer task always sees the em in the pinned_chunks list
9938          * if it sees block_group->removed == 1 (needs to lock block_group->lock
9939          * before checking block_group->removed).
9940          */
9941         if (!remove_em) {
9942                 /*
9943                  * Our em might be in trans->transaction->pending_chunks which
9944                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
9945                  * and so is the fs_info->pinned_chunks list.
9946                  *
9947                  * So at this point we must be holding the chunk_mutex to avoid
9948                  * any races with chunk allocation (more specifically at
9949                  * volumes.c:contains_pending_extent()), to ensure it always
9950                  * sees the em, either in the pending_chunks list or in the
9951                  * pinned_chunks list.
9952                  */
9953                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
9954         }
9955         spin_unlock(&block_group->lock);
9956
9957         if (remove_em) {
9958                 struct extent_map_tree *em_tree;
9959
9960                 em_tree = &root->fs_info->mapping_tree.map_tree;
9961                 write_lock(&em_tree->lock);
9962                 /*
9963                  * The em might be in the pending_chunks list, so make sure the
9964                  * chunk mutex is locked, since remove_extent_mapping() will
9965                  * delete us from that list.
9966                  */
9967                 remove_extent_mapping(em_tree, em);
9968                 write_unlock(&em_tree->lock);
9969                 /* once for the tree */
9970                 free_extent_map(em);
9971         }
9972
9973         unlock_chunks(root);
9974
9975         btrfs_put_block_group(block_group);
9976         btrfs_put_block_group(block_group);
9977
9978         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
9979         if (ret > 0)
9980                 ret = -EIO;
9981         if (ret < 0)
9982                 goto out;
9983
9984         ret = btrfs_del_item(trans, root, path);
9985 out:
9986         btrfs_free_path(path);
9987         return ret;
9988 }
9989
9990 /*
9991  * Process the unused_bgs list and remove any that don't have any allocated
9992  * space inside of them.
9993  */
9994 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9995 {
9996         struct btrfs_block_group_cache *block_group;
9997         struct btrfs_space_info *space_info;
9998         struct btrfs_root *root = fs_info->extent_root;
9999         struct btrfs_trans_handle *trans;
10000         int ret = 0;
10001
10002         if (!fs_info->open)
10003                 return;
10004
10005         spin_lock(&fs_info->unused_bgs_lock);
10006         while (!list_empty(&fs_info->unused_bgs)) {
10007                 u64 start, end;
10008                 int trimming;
10009
10010                 block_group = list_first_entry(&fs_info->unused_bgs,
10011                                                struct btrfs_block_group_cache,
10012                                                bg_list);
10013                 space_info = block_group->space_info;
10014                 list_del_init(&block_group->bg_list);
10015                 if (ret || btrfs_mixed_space_info(space_info)) {
10016                         btrfs_put_block_group(block_group);
10017                         continue;
10018                 }
10019                 spin_unlock(&fs_info->unused_bgs_lock);
10020
10021                 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
10022
10023                 /* Don't want to race with allocators so take the groups_sem */
10024                 down_write(&space_info->groups_sem);
10025                 spin_lock(&block_group->lock);
10026                 if (block_group->reserved ||
10027                     btrfs_block_group_used(&block_group->item) ||
10028                     block_group->ro) {
10029                         /*
10030                          * We want to bail if we made new allocations or have
10031                          * outstanding allocations in this block group.  We do
10032                          * the ro check in case balance is currently acting on
10033                          * this block group.
10034                          */
10035                         spin_unlock(&block_group->lock);
10036                         up_write(&space_info->groups_sem);
10037                         goto next;
10038                 }
10039                 spin_unlock(&block_group->lock);
10040
10041                 /* We don't want to force the issue, only flip if it's ok. */
10042                 ret = inc_block_group_ro(block_group, 0);
10043                 up_write(&space_info->groups_sem);
10044                 if (ret < 0) {
10045                         ret = 0;
10046                         goto next;
10047                 }
10048
10049                 /*
10050                  * Want to do this before we do anything else so we can recover
10051                  * properly if we fail to join the transaction.
10052                  */
10053                 /* 1 for btrfs_orphan_reserve_metadata() */
10054                 trans = btrfs_start_transaction(root, 1);
10055                 if (IS_ERR(trans)) {
10056                         btrfs_dec_block_group_ro(root, block_group);
10057                         ret = PTR_ERR(trans);
10058                         goto next;
10059                 }
10060
10061                 /*
10062                  * We could have pending pinned extents for this block group,
10063                  * just delete them, we don't care about them anymore.
10064                  */
10065                 start = block_group->key.objectid;
10066                 end = start + block_group->key.offset - 1;
10067                 /*
10068                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
10069                  * btrfs_finish_extent_commit(). If we are at transaction N,
10070                  * another task might be running finish_extent_commit() for the
10071                  * previous transaction N - 1, and have seen a range belonging
10072                  * to the block group in freed_extents[] before we were able to
10073                  * clear the whole block group range from freed_extents[]. This
10074                  * means that task can lookup for the block group after we
10075                  * unpinned it from freed_extents[] and removed it, leading to
10076                  * a BUG_ON() at btrfs_unpin_extent_range().
10077                  */
10078                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10079                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10080                                   EXTENT_DIRTY, GFP_NOFS);
10081                 if (ret) {
10082                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10083                         btrfs_dec_block_group_ro(root, block_group);
10084                         goto end_trans;
10085                 }
10086                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10087                                   EXTENT_DIRTY, GFP_NOFS);
10088                 if (ret) {
10089                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10090                         btrfs_dec_block_group_ro(root, block_group);
10091                         goto end_trans;
10092                 }
10093                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10094
10095                 /* Reset pinned so btrfs_put_block_group doesn't complain */
10096                 spin_lock(&space_info->lock);
10097                 spin_lock(&block_group->lock);
10098
10099                 space_info->bytes_pinned -= block_group->pinned;
10100                 space_info->bytes_readonly += block_group->pinned;
10101                 percpu_counter_add(&space_info->total_bytes_pinned,
10102                                    -block_group->pinned);
10103                 block_group->pinned = 0;
10104
10105                 spin_unlock(&block_group->lock);
10106                 spin_unlock(&space_info->lock);
10107
10108                 /* DISCARD can flip during remount */
10109                 trimming = btrfs_test_opt(root, DISCARD);
10110
10111                 /* Implicit trim during transaction commit. */
10112                 if (trimming)
10113                         btrfs_get_block_group_trimming(block_group);
10114
10115                 /*
10116                  * Btrfs_remove_chunk will abort the transaction if things go
10117                  * horribly wrong.
10118                  */
10119                 ret = btrfs_remove_chunk(trans, root,
10120                                          block_group->key.objectid);
10121
10122                 if (ret) {
10123                         if (trimming)
10124                                 btrfs_put_block_group_trimming(block_group);
10125                         goto end_trans;
10126                 }
10127
10128                 /*
10129                  * If we're not mounted with -odiscard, we can just forget
10130                  * about this block group. Otherwise we'll need to wait
10131                  * until transaction commit to do the actual discard.
10132                  */
10133                 if (trimming) {
10134                         WARN_ON(!list_empty(&block_group->bg_list));
10135                         spin_lock(&trans->transaction->deleted_bgs_lock);
10136                         list_move(&block_group->bg_list,
10137                                   &trans->transaction->deleted_bgs);
10138                         spin_unlock(&trans->transaction->deleted_bgs_lock);
10139                         btrfs_get_block_group(block_group);
10140                 }
10141 end_trans:
10142                 btrfs_end_transaction(trans, root);
10143 next:
10144                 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
10145                 btrfs_put_block_group(block_group);
10146                 spin_lock(&fs_info->unused_bgs_lock);
10147         }
10148         spin_unlock(&fs_info->unused_bgs_lock);
10149 }
10150
10151 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10152 {
10153         struct btrfs_space_info *space_info;
10154         struct btrfs_super_block *disk_super;
10155         u64 features;
10156         u64 flags;
10157         int mixed = 0;
10158         int ret;
10159
10160         disk_super = fs_info->super_copy;
10161         if (!btrfs_super_root(disk_super))
10162                 return 1;
10163
10164         features = btrfs_super_incompat_flags(disk_super);
10165         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10166                 mixed = 1;
10167
10168         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10169         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10170         if (ret)
10171                 goto out;
10172
10173         if (mixed) {
10174                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10175                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10176         } else {
10177                 flags = BTRFS_BLOCK_GROUP_METADATA;
10178                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10179                 if (ret)
10180                         goto out;
10181
10182                 flags = BTRFS_BLOCK_GROUP_DATA;
10183                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10184         }
10185 out:
10186         return ret;
10187 }
10188
10189 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10190 {
10191         return unpin_extent_range(root, start, end, false);
10192 }
10193
10194 /*
10195  * It used to be that old block groups would be left around forever.
10196  * Iterating over them would be enough to trim unused space.  Since we
10197  * now automatically remove them, we also need to iterate over unallocated
10198  * space.
10199  *
10200  * We don't want a transaction for this since the discard may take a
10201  * substantial amount of time.  We don't require that a transaction be
10202  * running, but we do need to take a running transaction into account
10203  * to ensure that we're not discarding chunks that were released in
10204  * the current transaction.
10205  *
10206  * Holding the chunks lock will prevent other threads from allocating
10207  * or releasing chunks, but it won't prevent a running transaction
10208  * from committing and releasing the memory that the pending chunks
10209  * list head uses.  For that, we need to take a reference to the
10210  * transaction.
10211  */
10212 static int btrfs_trim_free_extents(struct btrfs_device *device,
10213                                    u64 minlen, u64 *trimmed)
10214 {
10215         u64 start = 0, len = 0;
10216         int ret;
10217
10218         *trimmed = 0;
10219
10220         /* Not writeable = nothing to do. */
10221         if (!device->writeable)
10222                 return 0;
10223
10224         /* No free space = nothing to do. */
10225         if (device->total_bytes <= device->bytes_used)
10226                 return 0;
10227
10228         ret = 0;
10229
10230         while (1) {
10231                 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
10232                 struct btrfs_transaction *trans;
10233                 u64 bytes;
10234
10235                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10236                 if (ret)
10237                         return ret;
10238
10239                 down_read(&fs_info->commit_root_sem);
10240
10241                 spin_lock(&fs_info->trans_lock);
10242                 trans = fs_info->running_transaction;
10243                 if (trans)
10244                         atomic_inc(&trans->use_count);
10245                 spin_unlock(&fs_info->trans_lock);
10246
10247                 ret = find_free_dev_extent_start(trans, device, minlen, start,
10248                                                  &start, &len);
10249                 if (trans)
10250                         btrfs_put_transaction(trans);
10251
10252                 if (ret) {
10253                         up_read(&fs_info->commit_root_sem);
10254                         mutex_unlock(&fs_info->chunk_mutex);
10255                         if (ret == -ENOSPC)
10256                                 ret = 0;
10257                         break;
10258                 }
10259
10260                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10261                 up_read(&fs_info->commit_root_sem);
10262                 mutex_unlock(&fs_info->chunk_mutex);
10263
10264                 if (ret)
10265                         break;
10266
10267                 start += len;
10268                 *trimmed += bytes;
10269
10270                 if (fatal_signal_pending(current)) {
10271                         ret = -ERESTARTSYS;
10272                         break;
10273                 }
10274
10275                 cond_resched();
10276         }
10277
10278         return ret;
10279 }
10280
10281 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10282 {
10283         struct btrfs_fs_info *fs_info = root->fs_info;
10284         struct btrfs_block_group_cache *cache = NULL;
10285         struct btrfs_device *device;
10286         struct list_head *devices;
10287         u64 group_trimmed;
10288         u64 start;
10289         u64 end;
10290         u64 trimmed = 0;
10291         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10292         int ret = 0;
10293
10294         /*
10295          * try to trim all FS space, our block group may start from non-zero.
10296          */
10297         if (range->len == total_bytes)
10298                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10299         else
10300                 cache = btrfs_lookup_block_group(fs_info, range->start);
10301
10302         while (cache) {
10303                 if (cache->key.objectid >= (range->start + range->len)) {
10304                         btrfs_put_block_group(cache);
10305                         break;
10306                 }
10307
10308                 start = max(range->start, cache->key.objectid);
10309                 end = min(range->start + range->len,
10310                                 cache->key.objectid + cache->key.offset);
10311
10312                 if (end - start >= range->minlen) {
10313                         if (!block_group_cache_done(cache)) {
10314                                 ret = cache_block_group(cache, 0);
10315                                 if (ret) {
10316                                         btrfs_put_block_group(cache);
10317                                         break;
10318                                 }
10319                                 ret = wait_block_group_cache_done(cache);
10320                                 if (ret) {
10321                                         btrfs_put_block_group(cache);
10322                                         break;
10323                                 }
10324                         }
10325                         ret = btrfs_trim_block_group(cache,
10326                                                      &group_trimmed,
10327                                                      start,
10328                                                      end,
10329                                                      range->minlen);
10330
10331                         trimmed += group_trimmed;
10332                         if (ret) {
10333                                 btrfs_put_block_group(cache);
10334                                 break;
10335                         }
10336                 }
10337
10338                 cache = next_block_group(fs_info->tree_root, cache);
10339         }
10340
10341         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
10342         devices = &root->fs_info->fs_devices->alloc_list;
10343         list_for_each_entry(device, devices, dev_alloc_list) {
10344                 ret = btrfs_trim_free_extents(device, range->minlen,
10345                                               &group_trimmed);
10346                 if (ret)
10347                         break;
10348
10349                 trimmed += group_trimmed;
10350         }
10351         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
10352
10353         range->len = trimmed;
10354         return ret;
10355 }
10356
10357 /*
10358  * btrfs_{start,end}_write_no_snapshoting() are similar to
10359  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10360  * data into the page cache through nocow before the subvolume is snapshoted,
10361  * but flush the data into disk after the snapshot creation, or to prevent
10362  * operations while snapshoting is ongoing and that cause the snapshot to be
10363  * inconsistent (writes followed by expanding truncates for example).
10364  */
10365 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10366 {
10367         percpu_counter_dec(&root->subv_writers->counter);
10368         /*
10369          * Make sure counter is updated before we wake up
10370          * waiters.
10371          */
10372         smp_mb();
10373         if (waitqueue_active(&root->subv_writers->wait))
10374                 wake_up(&root->subv_writers->wait);
10375 }
10376
10377 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10378 {
10379         if (atomic_read(&root->will_be_snapshoted))
10380                 return 0;
10381
10382         percpu_counter_inc(&root->subv_writers->counter);
10383         /*
10384          * Make sure counter is updated before we check for snapshot creation.
10385          */
10386         smp_mb();
10387         if (atomic_read(&root->will_be_snapshoted)) {
10388                 btrfs_end_write_no_snapshoting(root);
10389                 return 0;
10390         }
10391         return 1;
10392 }