fa57965f60a33c20670e555bb1dc6b28439143a3
[linux-drm-fsl-dcu.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "raid56.h"
35 #include "locking.h"
36 #include "free-space-cache.h"
37 #include "math.h"
38
39 #undef SCRAMBLE_DELAYED_REFS
40
41 /*
42  * control flags for do_chunk_alloc's force field
43  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
44  * if we really need one.
45  *
46  * CHUNK_ALLOC_LIMITED means to only try and allocate one
47  * if we have very few chunks already allocated.  This is
48  * used as part of the clustering code to help make sure
49  * we have a good pool of storage to cluster in, without
50  * filling the FS with empty chunks
51  *
52  * CHUNK_ALLOC_FORCE means it must try to allocate one
53  *
54  */
55 enum {
56         CHUNK_ALLOC_NO_FORCE = 0,
57         CHUNK_ALLOC_LIMITED = 1,
58         CHUNK_ALLOC_FORCE = 2,
59 };
60
61 /*
62  * Control how reservations are dealt with.
63  *
64  * RESERVE_FREE - freeing a reservation.
65  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
66  *   ENOSPC accounting
67  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
68  *   bytes_may_use as the ENOSPC accounting is done elsewhere
69  */
70 enum {
71         RESERVE_FREE = 0,
72         RESERVE_ALLOC = 1,
73         RESERVE_ALLOC_NO_ACCOUNT = 2,
74 };
75
76 static int update_block_group(struct btrfs_root *root,
77                               u64 bytenr, u64 num_bytes, int alloc);
78 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
79                                 struct btrfs_root *root,
80                                 u64 bytenr, u64 num_bytes, u64 parent,
81                                 u64 root_objectid, u64 owner_objectid,
82                                 u64 owner_offset, int refs_to_drop,
83                                 struct btrfs_delayed_extent_op *extra_op);
84 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
85                                     struct extent_buffer *leaf,
86                                     struct btrfs_extent_item *ei);
87 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
88                                       struct btrfs_root *root,
89                                       u64 parent, u64 root_objectid,
90                                       u64 flags, u64 owner, u64 offset,
91                                       struct btrfs_key *ins, int ref_mod);
92 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
93                                      struct btrfs_root *root,
94                                      u64 parent, u64 root_objectid,
95                                      u64 flags, struct btrfs_disk_key *key,
96                                      int level, struct btrfs_key *ins);
97 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
98                           struct btrfs_root *extent_root, u64 flags,
99                           int force);
100 static int find_next_key(struct btrfs_path *path, int level,
101                          struct btrfs_key *key);
102 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
103                             int dump_block_groups);
104 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
105                                        u64 num_bytes, int reserve);
106 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
107                                u64 num_bytes);
108
109 static noinline int
110 block_group_cache_done(struct btrfs_block_group_cache *cache)
111 {
112         smp_mb();
113         return cache->cached == BTRFS_CACHE_FINISHED;
114 }
115
116 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
117 {
118         return (cache->flags & bits) == bits;
119 }
120
121 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
122 {
123         atomic_inc(&cache->count);
124 }
125
126 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
127 {
128         if (atomic_dec_and_test(&cache->count)) {
129                 WARN_ON(cache->pinned > 0);
130                 WARN_ON(cache->reserved > 0);
131                 kfree(cache->free_space_ctl);
132                 kfree(cache);
133         }
134 }
135
136 /*
137  * this adds the block group to the fs_info rb tree for the block group
138  * cache
139  */
140 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
141                                 struct btrfs_block_group_cache *block_group)
142 {
143         struct rb_node **p;
144         struct rb_node *parent = NULL;
145         struct btrfs_block_group_cache *cache;
146
147         spin_lock(&info->block_group_cache_lock);
148         p = &info->block_group_cache_tree.rb_node;
149
150         while (*p) {
151                 parent = *p;
152                 cache = rb_entry(parent, struct btrfs_block_group_cache,
153                                  cache_node);
154                 if (block_group->key.objectid < cache->key.objectid) {
155                         p = &(*p)->rb_left;
156                 } else if (block_group->key.objectid > cache->key.objectid) {
157                         p = &(*p)->rb_right;
158                 } else {
159                         spin_unlock(&info->block_group_cache_lock);
160                         return -EEXIST;
161                 }
162         }
163
164         rb_link_node(&block_group->cache_node, parent, p);
165         rb_insert_color(&block_group->cache_node,
166                         &info->block_group_cache_tree);
167
168         if (info->first_logical_byte > block_group->key.objectid)
169                 info->first_logical_byte = block_group->key.objectid;
170
171         spin_unlock(&info->block_group_cache_lock);
172
173         return 0;
174 }
175
176 /*
177  * This will return the block group at or after bytenr if contains is 0, else
178  * it will return the block group that contains the bytenr
179  */
180 static struct btrfs_block_group_cache *
181 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
182                               int contains)
183 {
184         struct btrfs_block_group_cache *cache, *ret = NULL;
185         struct rb_node *n;
186         u64 end, start;
187
188         spin_lock(&info->block_group_cache_lock);
189         n = info->block_group_cache_tree.rb_node;
190
191         while (n) {
192                 cache = rb_entry(n, struct btrfs_block_group_cache,
193                                  cache_node);
194                 end = cache->key.objectid + cache->key.offset - 1;
195                 start = cache->key.objectid;
196
197                 if (bytenr < start) {
198                         if (!contains && (!ret || start < ret->key.objectid))
199                                 ret = cache;
200                         n = n->rb_left;
201                 } else if (bytenr > start) {
202                         if (contains && bytenr <= end) {
203                                 ret = cache;
204                                 break;
205                         }
206                         n = n->rb_right;
207                 } else {
208                         ret = cache;
209                         break;
210                 }
211         }
212         if (ret) {
213                 btrfs_get_block_group(ret);
214                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
215                         info->first_logical_byte = ret->key.objectid;
216         }
217         spin_unlock(&info->block_group_cache_lock);
218
219         return ret;
220 }
221
222 static int add_excluded_extent(struct btrfs_root *root,
223                                u64 start, u64 num_bytes)
224 {
225         u64 end = start + num_bytes - 1;
226         set_extent_bits(&root->fs_info->freed_extents[0],
227                         start, end, EXTENT_UPTODATE, GFP_NOFS);
228         set_extent_bits(&root->fs_info->freed_extents[1],
229                         start, end, EXTENT_UPTODATE, GFP_NOFS);
230         return 0;
231 }
232
233 static void free_excluded_extents(struct btrfs_root *root,
234                                   struct btrfs_block_group_cache *cache)
235 {
236         u64 start, end;
237
238         start = cache->key.objectid;
239         end = start + cache->key.offset - 1;
240
241         clear_extent_bits(&root->fs_info->freed_extents[0],
242                           start, end, EXTENT_UPTODATE, GFP_NOFS);
243         clear_extent_bits(&root->fs_info->freed_extents[1],
244                           start, end, EXTENT_UPTODATE, GFP_NOFS);
245 }
246
247 static int exclude_super_stripes(struct btrfs_root *root,
248                                  struct btrfs_block_group_cache *cache)
249 {
250         u64 bytenr;
251         u64 *logical;
252         int stripe_len;
253         int i, nr, ret;
254
255         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
256                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
257                 cache->bytes_super += stripe_len;
258                 ret = add_excluded_extent(root, cache->key.objectid,
259                                           stripe_len);
260                 if (ret)
261                         return ret;
262         }
263
264         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
265                 bytenr = btrfs_sb_offset(i);
266                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
267                                        cache->key.objectid, bytenr,
268                                        0, &logical, &nr, &stripe_len);
269                 if (ret)
270                         return ret;
271
272                 while (nr--) {
273                         cache->bytes_super += stripe_len;
274                         ret = add_excluded_extent(root, logical[nr],
275                                                   stripe_len);
276                         if (ret) {
277                                 kfree(logical);
278                                 return ret;
279                         }
280                 }
281
282                 kfree(logical);
283         }
284         return 0;
285 }
286
287 static struct btrfs_caching_control *
288 get_caching_control(struct btrfs_block_group_cache *cache)
289 {
290         struct btrfs_caching_control *ctl;
291
292         spin_lock(&cache->lock);
293         if (cache->cached != BTRFS_CACHE_STARTED) {
294                 spin_unlock(&cache->lock);
295                 return NULL;
296         }
297
298         /* We're loading it the fast way, so we don't have a caching_ctl. */
299         if (!cache->caching_ctl) {
300                 spin_unlock(&cache->lock);
301                 return NULL;
302         }
303
304         ctl = cache->caching_ctl;
305         atomic_inc(&ctl->count);
306         spin_unlock(&cache->lock);
307         return ctl;
308 }
309
310 static void put_caching_control(struct btrfs_caching_control *ctl)
311 {
312         if (atomic_dec_and_test(&ctl->count))
313                 kfree(ctl);
314 }
315
316 /*
317  * this is only called by cache_block_group, since we could have freed extents
318  * we need to check the pinned_extents for any extents that can't be used yet
319  * since their free space will be released as soon as the transaction commits.
320  */
321 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
322                               struct btrfs_fs_info *info, u64 start, u64 end)
323 {
324         u64 extent_start, extent_end, size, total_added = 0;
325         int ret;
326
327         while (start < end) {
328                 ret = find_first_extent_bit(info->pinned_extents, start,
329                                             &extent_start, &extent_end,
330                                             EXTENT_DIRTY | EXTENT_UPTODATE,
331                                             NULL);
332                 if (ret)
333                         break;
334
335                 if (extent_start <= start) {
336                         start = extent_end + 1;
337                 } else if (extent_start > start && extent_start < end) {
338                         size = extent_start - start;
339                         total_added += size;
340                         ret = btrfs_add_free_space(block_group, start,
341                                                    size);
342                         BUG_ON(ret); /* -ENOMEM or logic error */
343                         start = extent_end + 1;
344                 } else {
345                         break;
346                 }
347         }
348
349         if (start < end) {
350                 size = end - start;
351                 total_added += size;
352                 ret = btrfs_add_free_space(block_group, start, size);
353                 BUG_ON(ret); /* -ENOMEM or logic error */
354         }
355
356         return total_added;
357 }
358
359 static noinline void caching_thread(struct btrfs_work *work)
360 {
361         struct btrfs_block_group_cache *block_group;
362         struct btrfs_fs_info *fs_info;
363         struct btrfs_caching_control *caching_ctl;
364         struct btrfs_root *extent_root;
365         struct btrfs_path *path;
366         struct extent_buffer *leaf;
367         struct btrfs_key key;
368         u64 total_found = 0;
369         u64 last = 0;
370         u32 nritems;
371         int ret = 0;
372
373         caching_ctl = container_of(work, struct btrfs_caching_control, work);
374         block_group = caching_ctl->block_group;
375         fs_info = block_group->fs_info;
376         extent_root = fs_info->extent_root;
377
378         path = btrfs_alloc_path();
379         if (!path)
380                 goto out;
381
382         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
383
384         /*
385          * We don't want to deadlock with somebody trying to allocate a new
386          * extent for the extent root while also trying to search the extent
387          * root to add free space.  So we skip locking and search the commit
388          * root, since its read-only
389          */
390         path->skip_locking = 1;
391         path->search_commit_root = 1;
392         path->reada = 1;
393
394         key.objectid = last;
395         key.offset = 0;
396         key.type = BTRFS_EXTENT_ITEM_KEY;
397 again:
398         mutex_lock(&caching_ctl->mutex);
399         /* need to make sure the commit_root doesn't disappear */
400         down_read(&fs_info->extent_commit_sem);
401
402         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
403         if (ret < 0)
404                 goto err;
405
406         leaf = path->nodes[0];
407         nritems = btrfs_header_nritems(leaf);
408
409         while (1) {
410                 if (btrfs_fs_closing(fs_info) > 1) {
411                         last = (u64)-1;
412                         break;
413                 }
414
415                 if (path->slots[0] < nritems) {
416                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
417                 } else {
418                         ret = find_next_key(path, 0, &key);
419                         if (ret)
420                                 break;
421
422                         if (need_resched()) {
423                                 caching_ctl->progress = last;
424                                 btrfs_release_path(path);
425                                 up_read(&fs_info->extent_commit_sem);
426                                 mutex_unlock(&caching_ctl->mutex);
427                                 cond_resched();
428                                 goto again;
429                         }
430
431                         ret = btrfs_next_leaf(extent_root, path);
432                         if (ret < 0)
433                                 goto err;
434                         if (ret)
435                                 break;
436                         leaf = path->nodes[0];
437                         nritems = btrfs_header_nritems(leaf);
438                         continue;
439                 }
440
441                 if (key.objectid < block_group->key.objectid) {
442                         path->slots[0]++;
443                         continue;
444                 }
445
446                 if (key.objectid >= block_group->key.objectid +
447                     block_group->key.offset)
448                         break;
449
450                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
451                     key.type == BTRFS_METADATA_ITEM_KEY) {
452                         total_found += add_new_free_space(block_group,
453                                                           fs_info, last,
454                                                           key.objectid);
455                         if (key.type == BTRFS_METADATA_ITEM_KEY)
456                                 last = key.objectid +
457                                         fs_info->tree_root->leafsize;
458                         else
459                                 last = key.objectid + key.offset;
460
461                         if (total_found > (1024 * 1024 * 2)) {
462                                 total_found = 0;
463                                 wake_up(&caching_ctl->wait);
464                         }
465                 }
466                 path->slots[0]++;
467         }
468         ret = 0;
469
470         total_found += add_new_free_space(block_group, fs_info, last,
471                                           block_group->key.objectid +
472                                           block_group->key.offset);
473         caching_ctl->progress = (u64)-1;
474
475         spin_lock(&block_group->lock);
476         block_group->caching_ctl = NULL;
477         block_group->cached = BTRFS_CACHE_FINISHED;
478         spin_unlock(&block_group->lock);
479
480 err:
481         btrfs_free_path(path);
482         up_read(&fs_info->extent_commit_sem);
483
484         free_excluded_extents(extent_root, block_group);
485
486         mutex_unlock(&caching_ctl->mutex);
487 out:
488         wake_up(&caching_ctl->wait);
489
490         put_caching_control(caching_ctl);
491         btrfs_put_block_group(block_group);
492 }
493
494 static int cache_block_group(struct btrfs_block_group_cache *cache,
495                              int load_cache_only)
496 {
497         DEFINE_WAIT(wait);
498         struct btrfs_fs_info *fs_info = cache->fs_info;
499         struct btrfs_caching_control *caching_ctl;
500         int ret = 0;
501
502         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
503         if (!caching_ctl)
504                 return -ENOMEM;
505
506         INIT_LIST_HEAD(&caching_ctl->list);
507         mutex_init(&caching_ctl->mutex);
508         init_waitqueue_head(&caching_ctl->wait);
509         caching_ctl->block_group = cache;
510         caching_ctl->progress = cache->key.objectid;
511         atomic_set(&caching_ctl->count, 1);
512         caching_ctl->work.func = caching_thread;
513
514         spin_lock(&cache->lock);
515         /*
516          * This should be a rare occasion, but this could happen I think in the
517          * case where one thread starts to load the space cache info, and then
518          * some other thread starts a transaction commit which tries to do an
519          * allocation while the other thread is still loading the space cache
520          * info.  The previous loop should have kept us from choosing this block
521          * group, but if we've moved to the state where we will wait on caching
522          * block groups we need to first check if we're doing a fast load here,
523          * so we can wait for it to finish, otherwise we could end up allocating
524          * from a block group who's cache gets evicted for one reason or
525          * another.
526          */
527         while (cache->cached == BTRFS_CACHE_FAST) {
528                 struct btrfs_caching_control *ctl;
529
530                 ctl = cache->caching_ctl;
531                 atomic_inc(&ctl->count);
532                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
533                 spin_unlock(&cache->lock);
534
535                 schedule();
536
537                 finish_wait(&ctl->wait, &wait);
538                 put_caching_control(ctl);
539                 spin_lock(&cache->lock);
540         }
541
542         if (cache->cached != BTRFS_CACHE_NO) {
543                 spin_unlock(&cache->lock);
544                 kfree(caching_ctl);
545                 return 0;
546         }
547         WARN_ON(cache->caching_ctl);
548         cache->caching_ctl = caching_ctl;
549         cache->cached = BTRFS_CACHE_FAST;
550         spin_unlock(&cache->lock);
551
552         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
553                 ret = load_free_space_cache(fs_info, cache);
554
555                 spin_lock(&cache->lock);
556                 if (ret == 1) {
557                         cache->caching_ctl = NULL;
558                         cache->cached = BTRFS_CACHE_FINISHED;
559                         cache->last_byte_to_unpin = (u64)-1;
560                 } else {
561                         if (load_cache_only) {
562                                 cache->caching_ctl = NULL;
563                                 cache->cached = BTRFS_CACHE_NO;
564                         } else {
565                                 cache->cached = BTRFS_CACHE_STARTED;
566                         }
567                 }
568                 spin_unlock(&cache->lock);
569                 wake_up(&caching_ctl->wait);
570                 if (ret == 1) {
571                         put_caching_control(caching_ctl);
572                         free_excluded_extents(fs_info->extent_root, cache);
573                         return 0;
574                 }
575         } else {
576                 /*
577                  * We are not going to do the fast caching, set cached to the
578                  * appropriate value and wakeup any waiters.
579                  */
580                 spin_lock(&cache->lock);
581                 if (load_cache_only) {
582                         cache->caching_ctl = NULL;
583                         cache->cached = BTRFS_CACHE_NO;
584                 } else {
585                         cache->cached = BTRFS_CACHE_STARTED;
586                 }
587                 spin_unlock(&cache->lock);
588                 wake_up(&caching_ctl->wait);
589         }
590
591         if (load_cache_only) {
592                 put_caching_control(caching_ctl);
593                 return 0;
594         }
595
596         down_write(&fs_info->extent_commit_sem);
597         atomic_inc(&caching_ctl->count);
598         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
599         up_write(&fs_info->extent_commit_sem);
600
601         btrfs_get_block_group(cache);
602
603         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
604
605         return ret;
606 }
607
608 /*
609  * return the block group that starts at or after bytenr
610  */
611 static struct btrfs_block_group_cache *
612 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
613 {
614         struct btrfs_block_group_cache *cache;
615
616         cache = block_group_cache_tree_search(info, bytenr, 0);
617
618         return cache;
619 }
620
621 /*
622  * return the block group that contains the given bytenr
623  */
624 struct btrfs_block_group_cache *btrfs_lookup_block_group(
625                                                  struct btrfs_fs_info *info,
626                                                  u64 bytenr)
627 {
628         struct btrfs_block_group_cache *cache;
629
630         cache = block_group_cache_tree_search(info, bytenr, 1);
631
632         return cache;
633 }
634
635 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
636                                                   u64 flags)
637 {
638         struct list_head *head = &info->space_info;
639         struct btrfs_space_info *found;
640
641         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
642
643         rcu_read_lock();
644         list_for_each_entry_rcu(found, head, list) {
645                 if (found->flags & flags) {
646                         rcu_read_unlock();
647                         return found;
648                 }
649         }
650         rcu_read_unlock();
651         return NULL;
652 }
653
654 /*
655  * after adding space to the filesystem, we need to clear the full flags
656  * on all the space infos.
657  */
658 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
659 {
660         struct list_head *head = &info->space_info;
661         struct btrfs_space_info *found;
662
663         rcu_read_lock();
664         list_for_each_entry_rcu(found, head, list)
665                 found->full = 0;
666         rcu_read_unlock();
667 }
668
669 u64 btrfs_find_block_group(struct btrfs_root *root,
670                            u64 search_start, u64 search_hint, int owner)
671 {
672         struct btrfs_block_group_cache *cache;
673         u64 used;
674         u64 last = max(search_hint, search_start);
675         u64 group_start = 0;
676         int full_search = 0;
677         int factor = 9;
678         int wrapped = 0;
679 again:
680         while (1) {
681                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
682                 if (!cache)
683                         break;
684
685                 spin_lock(&cache->lock);
686                 last = cache->key.objectid + cache->key.offset;
687                 used = btrfs_block_group_used(&cache->item);
688
689                 if ((full_search || !cache->ro) &&
690                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
691                         if (used + cache->pinned + cache->reserved <
692                             div_factor(cache->key.offset, factor)) {
693                                 group_start = cache->key.objectid;
694                                 spin_unlock(&cache->lock);
695                                 btrfs_put_block_group(cache);
696                                 goto found;
697                         }
698                 }
699                 spin_unlock(&cache->lock);
700                 btrfs_put_block_group(cache);
701                 cond_resched();
702         }
703         if (!wrapped) {
704                 last = search_start;
705                 wrapped = 1;
706                 goto again;
707         }
708         if (!full_search && factor < 10) {
709                 last = search_start;
710                 full_search = 1;
711                 factor = 10;
712                 goto again;
713         }
714 found:
715         return group_start;
716 }
717
718 /* simple helper to search for an existing extent at a given offset */
719 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
720 {
721         int ret;
722         struct btrfs_key key;
723         struct btrfs_path *path;
724
725         path = btrfs_alloc_path();
726         if (!path)
727                 return -ENOMEM;
728
729         key.objectid = start;
730         key.offset = len;
731         key.type = BTRFS_EXTENT_ITEM_KEY;
732         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
733                                 0, 0);
734         if (ret > 0) {
735                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
736                 if (key.objectid == start &&
737                     key.type == BTRFS_METADATA_ITEM_KEY)
738                         ret = 0;
739         }
740         btrfs_free_path(path);
741         return ret;
742 }
743
744 /*
745  * helper function to lookup reference count and flags of a tree block.
746  *
747  * the head node for delayed ref is used to store the sum of all the
748  * reference count modifications queued up in the rbtree. the head
749  * node may also store the extent flags to set. This way you can check
750  * to see what the reference count and extent flags would be if all of
751  * the delayed refs are not processed.
752  */
753 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
754                              struct btrfs_root *root, u64 bytenr,
755                              u64 offset, int metadata, u64 *refs, u64 *flags)
756 {
757         struct btrfs_delayed_ref_head *head;
758         struct btrfs_delayed_ref_root *delayed_refs;
759         struct btrfs_path *path;
760         struct btrfs_extent_item *ei;
761         struct extent_buffer *leaf;
762         struct btrfs_key key;
763         u32 item_size;
764         u64 num_refs;
765         u64 extent_flags;
766         int ret;
767
768         /*
769          * If we don't have skinny metadata, don't bother doing anything
770          * different
771          */
772         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
773                 offset = root->leafsize;
774                 metadata = 0;
775         }
776
777         path = btrfs_alloc_path();
778         if (!path)
779                 return -ENOMEM;
780
781         if (metadata) {
782                 key.objectid = bytenr;
783                 key.type = BTRFS_METADATA_ITEM_KEY;
784                 key.offset = offset;
785         } else {
786                 key.objectid = bytenr;
787                 key.type = BTRFS_EXTENT_ITEM_KEY;
788                 key.offset = offset;
789         }
790
791         if (!trans) {
792                 path->skip_locking = 1;
793                 path->search_commit_root = 1;
794         }
795 again:
796         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
797                                 &key, path, 0, 0);
798         if (ret < 0)
799                 goto out_free;
800
801         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
802                 key.type = BTRFS_EXTENT_ITEM_KEY;
803                 key.offset = root->leafsize;
804                 btrfs_release_path(path);
805                 goto again;
806         }
807
808         if (ret == 0) {
809                 leaf = path->nodes[0];
810                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
811                 if (item_size >= sizeof(*ei)) {
812                         ei = btrfs_item_ptr(leaf, path->slots[0],
813                                             struct btrfs_extent_item);
814                         num_refs = btrfs_extent_refs(leaf, ei);
815                         extent_flags = btrfs_extent_flags(leaf, ei);
816                 } else {
817 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
818                         struct btrfs_extent_item_v0 *ei0;
819                         BUG_ON(item_size != sizeof(*ei0));
820                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
821                                              struct btrfs_extent_item_v0);
822                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
823                         /* FIXME: this isn't correct for data */
824                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
825 #else
826                         BUG();
827 #endif
828                 }
829                 BUG_ON(num_refs == 0);
830         } else {
831                 num_refs = 0;
832                 extent_flags = 0;
833                 ret = 0;
834         }
835
836         if (!trans)
837                 goto out;
838
839         delayed_refs = &trans->transaction->delayed_refs;
840         spin_lock(&delayed_refs->lock);
841         head = btrfs_find_delayed_ref_head(trans, bytenr);
842         if (head) {
843                 if (!mutex_trylock(&head->mutex)) {
844                         atomic_inc(&head->node.refs);
845                         spin_unlock(&delayed_refs->lock);
846
847                         btrfs_release_path(path);
848
849                         /*
850                          * Mutex was contended, block until it's released and try
851                          * again
852                          */
853                         mutex_lock(&head->mutex);
854                         mutex_unlock(&head->mutex);
855                         btrfs_put_delayed_ref(&head->node);
856                         goto again;
857                 }
858                 if (head->extent_op && head->extent_op->update_flags)
859                         extent_flags |= head->extent_op->flags_to_set;
860                 else
861                         BUG_ON(num_refs == 0);
862
863                 num_refs += head->node.ref_mod;
864                 mutex_unlock(&head->mutex);
865         }
866         spin_unlock(&delayed_refs->lock);
867 out:
868         WARN_ON(num_refs == 0);
869         if (refs)
870                 *refs = num_refs;
871         if (flags)
872                 *flags = extent_flags;
873 out_free:
874         btrfs_free_path(path);
875         return ret;
876 }
877
878 /*
879  * Back reference rules.  Back refs have three main goals:
880  *
881  * 1) differentiate between all holders of references to an extent so that
882  *    when a reference is dropped we can make sure it was a valid reference
883  *    before freeing the extent.
884  *
885  * 2) Provide enough information to quickly find the holders of an extent
886  *    if we notice a given block is corrupted or bad.
887  *
888  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
889  *    maintenance.  This is actually the same as #2, but with a slightly
890  *    different use case.
891  *
892  * There are two kinds of back refs. The implicit back refs is optimized
893  * for pointers in non-shared tree blocks. For a given pointer in a block,
894  * back refs of this kind provide information about the block's owner tree
895  * and the pointer's key. These information allow us to find the block by
896  * b-tree searching. The full back refs is for pointers in tree blocks not
897  * referenced by their owner trees. The location of tree block is recorded
898  * in the back refs. Actually the full back refs is generic, and can be
899  * used in all cases the implicit back refs is used. The major shortcoming
900  * of the full back refs is its overhead. Every time a tree block gets
901  * COWed, we have to update back refs entry for all pointers in it.
902  *
903  * For a newly allocated tree block, we use implicit back refs for
904  * pointers in it. This means most tree related operations only involve
905  * implicit back refs. For a tree block created in old transaction, the
906  * only way to drop a reference to it is COW it. So we can detect the
907  * event that tree block loses its owner tree's reference and do the
908  * back refs conversion.
909  *
910  * When a tree block is COW'd through a tree, there are four cases:
911  *
912  * The reference count of the block is one and the tree is the block's
913  * owner tree. Nothing to do in this case.
914  *
915  * The reference count of the block is one and the tree is not the
916  * block's owner tree. In this case, full back refs is used for pointers
917  * in the block. Remove these full back refs, add implicit back refs for
918  * every pointers in the new block.
919  *
920  * The reference count of the block is greater than one and the tree is
921  * the block's owner tree. In this case, implicit back refs is used for
922  * pointers in the block. Add full back refs for every pointers in the
923  * block, increase lower level extents' reference counts. The original
924  * implicit back refs are entailed to the new block.
925  *
926  * The reference count of the block is greater than one and the tree is
927  * not the block's owner tree. Add implicit back refs for every pointer in
928  * the new block, increase lower level extents' reference count.
929  *
930  * Back Reference Key composing:
931  *
932  * The key objectid corresponds to the first byte in the extent,
933  * The key type is used to differentiate between types of back refs.
934  * There are different meanings of the key offset for different types
935  * of back refs.
936  *
937  * File extents can be referenced by:
938  *
939  * - multiple snapshots, subvolumes, or different generations in one subvol
940  * - different files inside a single subvolume
941  * - different offsets inside a file (bookend extents in file.c)
942  *
943  * The extent ref structure for the implicit back refs has fields for:
944  *
945  * - Objectid of the subvolume root
946  * - objectid of the file holding the reference
947  * - original offset in the file
948  * - how many bookend extents
949  *
950  * The key offset for the implicit back refs is hash of the first
951  * three fields.
952  *
953  * The extent ref structure for the full back refs has field for:
954  *
955  * - number of pointers in the tree leaf
956  *
957  * The key offset for the implicit back refs is the first byte of
958  * the tree leaf
959  *
960  * When a file extent is allocated, The implicit back refs is used.
961  * the fields are filled in:
962  *
963  *     (root_key.objectid, inode objectid, offset in file, 1)
964  *
965  * When a file extent is removed file truncation, we find the
966  * corresponding implicit back refs and check the following fields:
967  *
968  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
969  *
970  * Btree extents can be referenced by:
971  *
972  * - Different subvolumes
973  *
974  * Both the implicit back refs and the full back refs for tree blocks
975  * only consist of key. The key offset for the implicit back refs is
976  * objectid of block's owner tree. The key offset for the full back refs
977  * is the first byte of parent block.
978  *
979  * When implicit back refs is used, information about the lowest key and
980  * level of the tree block are required. These information are stored in
981  * tree block info structure.
982  */
983
984 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
985 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
986                                   struct btrfs_root *root,
987                                   struct btrfs_path *path,
988                                   u64 owner, u32 extra_size)
989 {
990         struct btrfs_extent_item *item;
991         struct btrfs_extent_item_v0 *ei0;
992         struct btrfs_extent_ref_v0 *ref0;
993         struct btrfs_tree_block_info *bi;
994         struct extent_buffer *leaf;
995         struct btrfs_key key;
996         struct btrfs_key found_key;
997         u32 new_size = sizeof(*item);
998         u64 refs;
999         int ret;
1000
1001         leaf = path->nodes[0];
1002         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1003
1004         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1005         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1006                              struct btrfs_extent_item_v0);
1007         refs = btrfs_extent_refs_v0(leaf, ei0);
1008
1009         if (owner == (u64)-1) {
1010                 while (1) {
1011                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1012                                 ret = btrfs_next_leaf(root, path);
1013                                 if (ret < 0)
1014                                         return ret;
1015                                 BUG_ON(ret > 0); /* Corruption */
1016                                 leaf = path->nodes[0];
1017                         }
1018                         btrfs_item_key_to_cpu(leaf, &found_key,
1019                                               path->slots[0]);
1020                         BUG_ON(key.objectid != found_key.objectid);
1021                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1022                                 path->slots[0]++;
1023                                 continue;
1024                         }
1025                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1026                                               struct btrfs_extent_ref_v0);
1027                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1028                         break;
1029                 }
1030         }
1031         btrfs_release_path(path);
1032
1033         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1034                 new_size += sizeof(*bi);
1035
1036         new_size -= sizeof(*ei0);
1037         ret = btrfs_search_slot(trans, root, &key, path,
1038                                 new_size + extra_size, 1);
1039         if (ret < 0)
1040                 return ret;
1041         BUG_ON(ret); /* Corruption */
1042
1043         btrfs_extend_item(root, path, new_size);
1044
1045         leaf = path->nodes[0];
1046         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1047         btrfs_set_extent_refs(leaf, item, refs);
1048         /* FIXME: get real generation */
1049         btrfs_set_extent_generation(leaf, item, 0);
1050         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1051                 btrfs_set_extent_flags(leaf, item,
1052                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1053                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1054                 bi = (struct btrfs_tree_block_info *)(item + 1);
1055                 /* FIXME: get first key of the block */
1056                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1057                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1058         } else {
1059                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1060         }
1061         btrfs_mark_buffer_dirty(leaf);
1062         return 0;
1063 }
1064 #endif
1065
1066 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1067 {
1068         u32 high_crc = ~(u32)0;
1069         u32 low_crc = ~(u32)0;
1070         __le64 lenum;
1071
1072         lenum = cpu_to_le64(root_objectid);
1073         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1074         lenum = cpu_to_le64(owner);
1075         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1076         lenum = cpu_to_le64(offset);
1077         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1078
1079         return ((u64)high_crc << 31) ^ (u64)low_crc;
1080 }
1081
1082 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1083                                      struct btrfs_extent_data_ref *ref)
1084 {
1085         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1086                                     btrfs_extent_data_ref_objectid(leaf, ref),
1087                                     btrfs_extent_data_ref_offset(leaf, ref));
1088 }
1089
1090 static int match_extent_data_ref(struct extent_buffer *leaf,
1091                                  struct btrfs_extent_data_ref *ref,
1092                                  u64 root_objectid, u64 owner, u64 offset)
1093 {
1094         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1095             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1096             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1097                 return 0;
1098         return 1;
1099 }
1100
1101 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1102                                            struct btrfs_root *root,
1103                                            struct btrfs_path *path,
1104                                            u64 bytenr, u64 parent,
1105                                            u64 root_objectid,
1106                                            u64 owner, u64 offset)
1107 {
1108         struct btrfs_key key;
1109         struct btrfs_extent_data_ref *ref;
1110         struct extent_buffer *leaf;
1111         u32 nritems;
1112         int ret;
1113         int recow;
1114         int err = -ENOENT;
1115
1116         key.objectid = bytenr;
1117         if (parent) {
1118                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1119                 key.offset = parent;
1120         } else {
1121                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1122                 key.offset = hash_extent_data_ref(root_objectid,
1123                                                   owner, offset);
1124         }
1125 again:
1126         recow = 0;
1127         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1128         if (ret < 0) {
1129                 err = ret;
1130                 goto fail;
1131         }
1132
1133         if (parent) {
1134                 if (!ret)
1135                         return 0;
1136 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1137                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1138                 btrfs_release_path(path);
1139                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1140                 if (ret < 0) {
1141                         err = ret;
1142                         goto fail;
1143                 }
1144                 if (!ret)
1145                         return 0;
1146 #endif
1147                 goto fail;
1148         }
1149
1150         leaf = path->nodes[0];
1151         nritems = btrfs_header_nritems(leaf);
1152         while (1) {
1153                 if (path->slots[0] >= nritems) {
1154                         ret = btrfs_next_leaf(root, path);
1155                         if (ret < 0)
1156                                 err = ret;
1157                         if (ret)
1158                                 goto fail;
1159
1160                         leaf = path->nodes[0];
1161                         nritems = btrfs_header_nritems(leaf);
1162                         recow = 1;
1163                 }
1164
1165                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1166                 if (key.objectid != bytenr ||
1167                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1168                         goto fail;
1169
1170                 ref = btrfs_item_ptr(leaf, path->slots[0],
1171                                      struct btrfs_extent_data_ref);
1172
1173                 if (match_extent_data_ref(leaf, ref, root_objectid,
1174                                           owner, offset)) {
1175                         if (recow) {
1176                                 btrfs_release_path(path);
1177                                 goto again;
1178                         }
1179                         err = 0;
1180                         break;
1181                 }
1182                 path->slots[0]++;
1183         }
1184 fail:
1185         return err;
1186 }
1187
1188 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1189                                            struct btrfs_root *root,
1190                                            struct btrfs_path *path,
1191                                            u64 bytenr, u64 parent,
1192                                            u64 root_objectid, u64 owner,
1193                                            u64 offset, int refs_to_add)
1194 {
1195         struct btrfs_key key;
1196         struct extent_buffer *leaf;
1197         u32 size;
1198         u32 num_refs;
1199         int ret;
1200
1201         key.objectid = bytenr;
1202         if (parent) {
1203                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1204                 key.offset = parent;
1205                 size = sizeof(struct btrfs_shared_data_ref);
1206         } else {
1207                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1208                 key.offset = hash_extent_data_ref(root_objectid,
1209                                                   owner, offset);
1210                 size = sizeof(struct btrfs_extent_data_ref);
1211         }
1212
1213         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1214         if (ret && ret != -EEXIST)
1215                 goto fail;
1216
1217         leaf = path->nodes[0];
1218         if (parent) {
1219                 struct btrfs_shared_data_ref *ref;
1220                 ref = btrfs_item_ptr(leaf, path->slots[0],
1221                                      struct btrfs_shared_data_ref);
1222                 if (ret == 0) {
1223                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1224                 } else {
1225                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1226                         num_refs += refs_to_add;
1227                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1228                 }
1229         } else {
1230                 struct btrfs_extent_data_ref *ref;
1231                 while (ret == -EEXIST) {
1232                         ref = btrfs_item_ptr(leaf, path->slots[0],
1233                                              struct btrfs_extent_data_ref);
1234                         if (match_extent_data_ref(leaf, ref, root_objectid,
1235                                                   owner, offset))
1236                                 break;
1237                         btrfs_release_path(path);
1238                         key.offset++;
1239                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1240                                                       size);
1241                         if (ret && ret != -EEXIST)
1242                                 goto fail;
1243
1244                         leaf = path->nodes[0];
1245                 }
1246                 ref = btrfs_item_ptr(leaf, path->slots[0],
1247                                      struct btrfs_extent_data_ref);
1248                 if (ret == 0) {
1249                         btrfs_set_extent_data_ref_root(leaf, ref,
1250                                                        root_objectid);
1251                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1252                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1253                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1254                 } else {
1255                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1256                         num_refs += refs_to_add;
1257                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1258                 }
1259         }
1260         btrfs_mark_buffer_dirty(leaf);
1261         ret = 0;
1262 fail:
1263         btrfs_release_path(path);
1264         return ret;
1265 }
1266
1267 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1268                                            struct btrfs_root *root,
1269                                            struct btrfs_path *path,
1270                                            int refs_to_drop)
1271 {
1272         struct btrfs_key key;
1273         struct btrfs_extent_data_ref *ref1 = NULL;
1274         struct btrfs_shared_data_ref *ref2 = NULL;
1275         struct extent_buffer *leaf;
1276         u32 num_refs = 0;
1277         int ret = 0;
1278
1279         leaf = path->nodes[0];
1280         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1281
1282         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1283                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1284                                       struct btrfs_extent_data_ref);
1285                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1286         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1287                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1288                                       struct btrfs_shared_data_ref);
1289                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1290 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1291         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1292                 struct btrfs_extent_ref_v0 *ref0;
1293                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1294                                       struct btrfs_extent_ref_v0);
1295                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1296 #endif
1297         } else {
1298                 BUG();
1299         }
1300
1301         BUG_ON(num_refs < refs_to_drop);
1302         num_refs -= refs_to_drop;
1303
1304         if (num_refs == 0) {
1305                 ret = btrfs_del_item(trans, root, path);
1306         } else {
1307                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1308                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1309                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1310                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1311 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1312                 else {
1313                         struct btrfs_extent_ref_v0 *ref0;
1314                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1315                                         struct btrfs_extent_ref_v0);
1316                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1317                 }
1318 #endif
1319                 btrfs_mark_buffer_dirty(leaf);
1320         }
1321         return ret;
1322 }
1323
1324 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1325                                           struct btrfs_path *path,
1326                                           struct btrfs_extent_inline_ref *iref)
1327 {
1328         struct btrfs_key key;
1329         struct extent_buffer *leaf;
1330         struct btrfs_extent_data_ref *ref1;
1331         struct btrfs_shared_data_ref *ref2;
1332         u32 num_refs = 0;
1333
1334         leaf = path->nodes[0];
1335         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1336         if (iref) {
1337                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1338                     BTRFS_EXTENT_DATA_REF_KEY) {
1339                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1340                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1341                 } else {
1342                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1343                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1344                 }
1345         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1346                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1347                                       struct btrfs_extent_data_ref);
1348                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1349         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1350                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1351                                       struct btrfs_shared_data_ref);
1352                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1353 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1354         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1355                 struct btrfs_extent_ref_v0 *ref0;
1356                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1357                                       struct btrfs_extent_ref_v0);
1358                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1359 #endif
1360         } else {
1361                 WARN_ON(1);
1362         }
1363         return num_refs;
1364 }
1365
1366 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1367                                           struct btrfs_root *root,
1368                                           struct btrfs_path *path,
1369                                           u64 bytenr, u64 parent,
1370                                           u64 root_objectid)
1371 {
1372         struct btrfs_key key;
1373         int ret;
1374
1375         key.objectid = bytenr;
1376         if (parent) {
1377                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1378                 key.offset = parent;
1379         } else {
1380                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1381                 key.offset = root_objectid;
1382         }
1383
1384         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1385         if (ret > 0)
1386                 ret = -ENOENT;
1387 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1388         if (ret == -ENOENT && parent) {
1389                 btrfs_release_path(path);
1390                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1391                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1392                 if (ret > 0)
1393                         ret = -ENOENT;
1394         }
1395 #endif
1396         return ret;
1397 }
1398
1399 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1400                                           struct btrfs_root *root,
1401                                           struct btrfs_path *path,
1402                                           u64 bytenr, u64 parent,
1403                                           u64 root_objectid)
1404 {
1405         struct btrfs_key key;
1406         int ret;
1407
1408         key.objectid = bytenr;
1409         if (parent) {
1410                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1411                 key.offset = parent;
1412         } else {
1413                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1414                 key.offset = root_objectid;
1415         }
1416
1417         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1418         btrfs_release_path(path);
1419         return ret;
1420 }
1421
1422 static inline int extent_ref_type(u64 parent, u64 owner)
1423 {
1424         int type;
1425         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1426                 if (parent > 0)
1427                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1428                 else
1429                         type = BTRFS_TREE_BLOCK_REF_KEY;
1430         } else {
1431                 if (parent > 0)
1432                         type = BTRFS_SHARED_DATA_REF_KEY;
1433                 else
1434                         type = BTRFS_EXTENT_DATA_REF_KEY;
1435         }
1436         return type;
1437 }
1438
1439 static int find_next_key(struct btrfs_path *path, int level,
1440                          struct btrfs_key *key)
1441
1442 {
1443         for (; level < BTRFS_MAX_LEVEL; level++) {
1444                 if (!path->nodes[level])
1445                         break;
1446                 if (path->slots[level] + 1 >=
1447                     btrfs_header_nritems(path->nodes[level]))
1448                         continue;
1449                 if (level == 0)
1450                         btrfs_item_key_to_cpu(path->nodes[level], key,
1451                                               path->slots[level] + 1);
1452                 else
1453                         btrfs_node_key_to_cpu(path->nodes[level], key,
1454                                               path->slots[level] + 1);
1455                 return 0;
1456         }
1457         return 1;
1458 }
1459
1460 /*
1461  * look for inline back ref. if back ref is found, *ref_ret is set
1462  * to the address of inline back ref, and 0 is returned.
1463  *
1464  * if back ref isn't found, *ref_ret is set to the address where it
1465  * should be inserted, and -ENOENT is returned.
1466  *
1467  * if insert is true and there are too many inline back refs, the path
1468  * points to the extent item, and -EAGAIN is returned.
1469  *
1470  * NOTE: inline back refs are ordered in the same way that back ref
1471  *       items in the tree are ordered.
1472  */
1473 static noinline_for_stack
1474 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1475                                  struct btrfs_root *root,
1476                                  struct btrfs_path *path,
1477                                  struct btrfs_extent_inline_ref **ref_ret,
1478                                  u64 bytenr, u64 num_bytes,
1479                                  u64 parent, u64 root_objectid,
1480                                  u64 owner, u64 offset, int insert)
1481 {
1482         struct btrfs_key key;
1483         struct extent_buffer *leaf;
1484         struct btrfs_extent_item *ei;
1485         struct btrfs_extent_inline_ref *iref;
1486         u64 flags;
1487         u64 item_size;
1488         unsigned long ptr;
1489         unsigned long end;
1490         int extra_size;
1491         int type;
1492         int want;
1493         int ret;
1494         int err = 0;
1495         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1496                                                  SKINNY_METADATA);
1497
1498         key.objectid = bytenr;
1499         key.type = BTRFS_EXTENT_ITEM_KEY;
1500         key.offset = num_bytes;
1501
1502         want = extent_ref_type(parent, owner);
1503         if (insert) {
1504                 extra_size = btrfs_extent_inline_ref_size(want);
1505                 path->keep_locks = 1;
1506         } else
1507                 extra_size = -1;
1508
1509         /*
1510          * Owner is our parent level, so we can just add one to get the level
1511          * for the block we are interested in.
1512          */
1513         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1514                 key.type = BTRFS_METADATA_ITEM_KEY;
1515                 key.offset = owner;
1516         }
1517
1518 again:
1519         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1520         if (ret < 0) {
1521                 err = ret;
1522                 goto out;
1523         }
1524
1525         /*
1526          * We may be a newly converted file system which still has the old fat
1527          * extent entries for metadata, so try and see if we have one of those.
1528          */
1529         if (ret > 0 && skinny_metadata) {
1530                 skinny_metadata = false;
1531                 if (path->slots[0]) {
1532                         path->slots[0]--;
1533                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1534                                               path->slots[0]);
1535                         if (key.objectid == bytenr &&
1536                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1537                             key.offset == num_bytes)
1538                                 ret = 0;
1539                 }
1540                 if (ret) {
1541                         key.type = BTRFS_EXTENT_ITEM_KEY;
1542                         key.offset = num_bytes;
1543                         btrfs_release_path(path);
1544                         goto again;
1545                 }
1546         }
1547
1548         if (ret && !insert) {
1549                 err = -ENOENT;
1550                 goto out;
1551         } else if (ret) {
1552                 err = -EIO;
1553                 WARN_ON(1);
1554                 goto out;
1555         }
1556
1557         leaf = path->nodes[0];
1558         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1559 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1560         if (item_size < sizeof(*ei)) {
1561                 if (!insert) {
1562                         err = -ENOENT;
1563                         goto out;
1564                 }
1565                 ret = convert_extent_item_v0(trans, root, path, owner,
1566                                              extra_size);
1567                 if (ret < 0) {
1568                         err = ret;
1569                         goto out;
1570                 }
1571                 leaf = path->nodes[0];
1572                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1573         }
1574 #endif
1575         BUG_ON(item_size < sizeof(*ei));
1576
1577         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1578         flags = btrfs_extent_flags(leaf, ei);
1579
1580         ptr = (unsigned long)(ei + 1);
1581         end = (unsigned long)ei + item_size;
1582
1583         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1584                 ptr += sizeof(struct btrfs_tree_block_info);
1585                 BUG_ON(ptr > end);
1586         }
1587
1588         err = -ENOENT;
1589         while (1) {
1590                 if (ptr >= end) {
1591                         WARN_ON(ptr > end);
1592                         break;
1593                 }
1594                 iref = (struct btrfs_extent_inline_ref *)ptr;
1595                 type = btrfs_extent_inline_ref_type(leaf, iref);
1596                 if (want < type)
1597                         break;
1598                 if (want > type) {
1599                         ptr += btrfs_extent_inline_ref_size(type);
1600                         continue;
1601                 }
1602
1603                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1604                         struct btrfs_extent_data_ref *dref;
1605                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1606                         if (match_extent_data_ref(leaf, dref, root_objectid,
1607                                                   owner, offset)) {
1608                                 err = 0;
1609                                 break;
1610                         }
1611                         if (hash_extent_data_ref_item(leaf, dref) <
1612                             hash_extent_data_ref(root_objectid, owner, offset))
1613                                 break;
1614                 } else {
1615                         u64 ref_offset;
1616                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1617                         if (parent > 0) {
1618                                 if (parent == ref_offset) {
1619                                         err = 0;
1620                                         break;
1621                                 }
1622                                 if (ref_offset < parent)
1623                                         break;
1624                         } else {
1625                                 if (root_objectid == ref_offset) {
1626                                         err = 0;
1627                                         break;
1628                                 }
1629                                 if (ref_offset < root_objectid)
1630                                         break;
1631                         }
1632                 }
1633                 ptr += btrfs_extent_inline_ref_size(type);
1634         }
1635         if (err == -ENOENT && insert) {
1636                 if (item_size + extra_size >=
1637                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1638                         err = -EAGAIN;
1639                         goto out;
1640                 }
1641                 /*
1642                  * To add new inline back ref, we have to make sure
1643                  * there is no corresponding back ref item.
1644                  * For simplicity, we just do not add new inline back
1645                  * ref if there is any kind of item for this block
1646                  */
1647                 if (find_next_key(path, 0, &key) == 0 &&
1648                     key.objectid == bytenr &&
1649                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1650                         err = -EAGAIN;
1651                         goto out;
1652                 }
1653         }
1654         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1655 out:
1656         if (insert) {
1657                 path->keep_locks = 0;
1658                 btrfs_unlock_up_safe(path, 1);
1659         }
1660         return err;
1661 }
1662
1663 /*
1664  * helper to add new inline back ref
1665  */
1666 static noinline_for_stack
1667 void setup_inline_extent_backref(struct btrfs_root *root,
1668                                  struct btrfs_path *path,
1669                                  struct btrfs_extent_inline_ref *iref,
1670                                  u64 parent, u64 root_objectid,
1671                                  u64 owner, u64 offset, int refs_to_add,
1672                                  struct btrfs_delayed_extent_op *extent_op)
1673 {
1674         struct extent_buffer *leaf;
1675         struct btrfs_extent_item *ei;
1676         unsigned long ptr;
1677         unsigned long end;
1678         unsigned long item_offset;
1679         u64 refs;
1680         int size;
1681         int type;
1682
1683         leaf = path->nodes[0];
1684         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1685         item_offset = (unsigned long)iref - (unsigned long)ei;
1686
1687         type = extent_ref_type(parent, owner);
1688         size = btrfs_extent_inline_ref_size(type);
1689
1690         btrfs_extend_item(root, path, size);
1691
1692         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1693         refs = btrfs_extent_refs(leaf, ei);
1694         refs += refs_to_add;
1695         btrfs_set_extent_refs(leaf, ei, refs);
1696         if (extent_op)
1697                 __run_delayed_extent_op(extent_op, leaf, ei);
1698
1699         ptr = (unsigned long)ei + item_offset;
1700         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1701         if (ptr < end - size)
1702                 memmove_extent_buffer(leaf, ptr + size, ptr,
1703                                       end - size - ptr);
1704
1705         iref = (struct btrfs_extent_inline_ref *)ptr;
1706         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1707         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1708                 struct btrfs_extent_data_ref *dref;
1709                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1710                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1711                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1712                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1713                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1714         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1715                 struct btrfs_shared_data_ref *sref;
1716                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1717                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1718                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1719         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1720                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1721         } else {
1722                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1723         }
1724         btrfs_mark_buffer_dirty(leaf);
1725 }
1726
1727 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1728                                  struct btrfs_root *root,
1729                                  struct btrfs_path *path,
1730                                  struct btrfs_extent_inline_ref **ref_ret,
1731                                  u64 bytenr, u64 num_bytes, u64 parent,
1732                                  u64 root_objectid, u64 owner, u64 offset)
1733 {
1734         int ret;
1735
1736         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1737                                            bytenr, num_bytes, parent,
1738                                            root_objectid, owner, offset, 0);
1739         if (ret != -ENOENT)
1740                 return ret;
1741
1742         btrfs_release_path(path);
1743         *ref_ret = NULL;
1744
1745         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1746                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1747                                             root_objectid);
1748         } else {
1749                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1750                                              root_objectid, owner, offset);
1751         }
1752         return ret;
1753 }
1754
1755 /*
1756  * helper to update/remove inline back ref
1757  */
1758 static noinline_for_stack
1759 void update_inline_extent_backref(struct btrfs_root *root,
1760                                   struct btrfs_path *path,
1761                                   struct btrfs_extent_inline_ref *iref,
1762                                   int refs_to_mod,
1763                                   struct btrfs_delayed_extent_op *extent_op)
1764 {
1765         struct extent_buffer *leaf;
1766         struct btrfs_extent_item *ei;
1767         struct btrfs_extent_data_ref *dref = NULL;
1768         struct btrfs_shared_data_ref *sref = NULL;
1769         unsigned long ptr;
1770         unsigned long end;
1771         u32 item_size;
1772         int size;
1773         int type;
1774         u64 refs;
1775
1776         leaf = path->nodes[0];
1777         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1778         refs = btrfs_extent_refs(leaf, ei);
1779         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1780         refs += refs_to_mod;
1781         btrfs_set_extent_refs(leaf, ei, refs);
1782         if (extent_op)
1783                 __run_delayed_extent_op(extent_op, leaf, ei);
1784
1785         type = btrfs_extent_inline_ref_type(leaf, iref);
1786
1787         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1788                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1789                 refs = btrfs_extent_data_ref_count(leaf, dref);
1790         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1791                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1792                 refs = btrfs_shared_data_ref_count(leaf, sref);
1793         } else {
1794                 refs = 1;
1795                 BUG_ON(refs_to_mod != -1);
1796         }
1797
1798         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1799         refs += refs_to_mod;
1800
1801         if (refs > 0) {
1802                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1803                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1804                 else
1805                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1806         } else {
1807                 size =  btrfs_extent_inline_ref_size(type);
1808                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1809                 ptr = (unsigned long)iref;
1810                 end = (unsigned long)ei + item_size;
1811                 if (ptr + size < end)
1812                         memmove_extent_buffer(leaf, ptr, ptr + size,
1813                                               end - ptr - size);
1814                 item_size -= size;
1815                 btrfs_truncate_item(root, path, item_size, 1);
1816         }
1817         btrfs_mark_buffer_dirty(leaf);
1818 }
1819
1820 static noinline_for_stack
1821 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1822                                  struct btrfs_root *root,
1823                                  struct btrfs_path *path,
1824                                  u64 bytenr, u64 num_bytes, u64 parent,
1825                                  u64 root_objectid, u64 owner,
1826                                  u64 offset, int refs_to_add,
1827                                  struct btrfs_delayed_extent_op *extent_op)
1828 {
1829         struct btrfs_extent_inline_ref *iref;
1830         int ret;
1831
1832         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1833                                            bytenr, num_bytes, parent,
1834                                            root_objectid, owner, offset, 1);
1835         if (ret == 0) {
1836                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1837                 update_inline_extent_backref(root, path, iref,
1838                                              refs_to_add, extent_op);
1839         } else if (ret == -ENOENT) {
1840                 setup_inline_extent_backref(root, path, iref, parent,
1841                                             root_objectid, owner, offset,
1842                                             refs_to_add, extent_op);
1843                 ret = 0;
1844         }
1845         return ret;
1846 }
1847
1848 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1849                                  struct btrfs_root *root,
1850                                  struct btrfs_path *path,
1851                                  u64 bytenr, u64 parent, u64 root_objectid,
1852                                  u64 owner, u64 offset, int refs_to_add)
1853 {
1854         int ret;
1855         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1856                 BUG_ON(refs_to_add != 1);
1857                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1858                                             parent, root_objectid);
1859         } else {
1860                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1861                                              parent, root_objectid,
1862                                              owner, offset, refs_to_add);
1863         }
1864         return ret;
1865 }
1866
1867 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1868                                  struct btrfs_root *root,
1869                                  struct btrfs_path *path,
1870                                  struct btrfs_extent_inline_ref *iref,
1871                                  int refs_to_drop, int is_data)
1872 {
1873         int ret = 0;
1874
1875         BUG_ON(!is_data && refs_to_drop != 1);
1876         if (iref) {
1877                 update_inline_extent_backref(root, path, iref,
1878                                              -refs_to_drop, NULL);
1879         } else if (is_data) {
1880                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1881         } else {
1882                 ret = btrfs_del_item(trans, root, path);
1883         }
1884         return ret;
1885 }
1886
1887 static int btrfs_issue_discard(struct block_device *bdev,
1888                                 u64 start, u64 len)
1889 {
1890         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1891 }
1892
1893 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1894                                 u64 num_bytes, u64 *actual_bytes)
1895 {
1896         int ret;
1897         u64 discarded_bytes = 0;
1898         struct btrfs_bio *bbio = NULL;
1899
1900
1901         /* Tell the block device(s) that the sectors can be discarded */
1902         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1903                               bytenr, &num_bytes, &bbio, 0);
1904         /* Error condition is -ENOMEM */
1905         if (!ret) {
1906                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1907                 int i;
1908
1909
1910                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1911                         if (!stripe->dev->can_discard)
1912                                 continue;
1913
1914                         ret = btrfs_issue_discard(stripe->dev->bdev,
1915                                                   stripe->physical,
1916                                                   stripe->length);
1917                         if (!ret)
1918                                 discarded_bytes += stripe->length;
1919                         else if (ret != -EOPNOTSUPP)
1920                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1921
1922                         /*
1923                          * Just in case we get back EOPNOTSUPP for some reason,
1924                          * just ignore the return value so we don't screw up
1925                          * people calling discard_extent.
1926                          */
1927                         ret = 0;
1928                 }
1929                 kfree(bbio);
1930         }
1931
1932         if (actual_bytes)
1933                 *actual_bytes = discarded_bytes;
1934
1935
1936         if (ret == -EOPNOTSUPP)
1937                 ret = 0;
1938         return ret;
1939 }
1940
1941 /* Can return -ENOMEM */
1942 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1943                          struct btrfs_root *root,
1944                          u64 bytenr, u64 num_bytes, u64 parent,
1945                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1946 {
1947         int ret;
1948         struct btrfs_fs_info *fs_info = root->fs_info;
1949
1950         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1951                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1952
1953         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1954                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1955                                         num_bytes,
1956                                         parent, root_objectid, (int)owner,
1957                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1958         } else {
1959                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1960                                         num_bytes,
1961                                         parent, root_objectid, owner, offset,
1962                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1963         }
1964         return ret;
1965 }
1966
1967 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1968                                   struct btrfs_root *root,
1969                                   u64 bytenr, u64 num_bytes,
1970                                   u64 parent, u64 root_objectid,
1971                                   u64 owner, u64 offset, int refs_to_add,
1972                                   struct btrfs_delayed_extent_op *extent_op)
1973 {
1974         struct btrfs_path *path;
1975         struct extent_buffer *leaf;
1976         struct btrfs_extent_item *item;
1977         u64 refs;
1978         int ret;
1979         int err = 0;
1980
1981         path = btrfs_alloc_path();
1982         if (!path)
1983                 return -ENOMEM;
1984
1985         path->reada = 1;
1986         path->leave_spinning = 1;
1987         /* this will setup the path even if it fails to insert the back ref */
1988         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1989                                            path, bytenr, num_bytes, parent,
1990                                            root_objectid, owner, offset,
1991                                            refs_to_add, extent_op);
1992         if (ret == 0)
1993                 goto out;
1994
1995         if (ret != -EAGAIN) {
1996                 err = ret;
1997                 goto out;
1998         }
1999
2000         leaf = path->nodes[0];
2001         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2002         refs = btrfs_extent_refs(leaf, item);
2003         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2004         if (extent_op)
2005                 __run_delayed_extent_op(extent_op, leaf, item);
2006
2007         btrfs_mark_buffer_dirty(leaf);
2008         btrfs_release_path(path);
2009
2010         path->reada = 1;
2011         path->leave_spinning = 1;
2012
2013         /* now insert the actual backref */
2014         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2015                                     path, bytenr, parent, root_objectid,
2016                                     owner, offset, refs_to_add);
2017         if (ret)
2018                 btrfs_abort_transaction(trans, root, ret);
2019 out:
2020         btrfs_free_path(path);
2021         return err;
2022 }
2023
2024 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2025                                 struct btrfs_root *root,
2026                                 struct btrfs_delayed_ref_node *node,
2027                                 struct btrfs_delayed_extent_op *extent_op,
2028                                 int insert_reserved)
2029 {
2030         int ret = 0;
2031         struct btrfs_delayed_data_ref *ref;
2032         struct btrfs_key ins;
2033         u64 parent = 0;
2034         u64 ref_root = 0;
2035         u64 flags = 0;
2036
2037         ins.objectid = node->bytenr;
2038         ins.offset = node->num_bytes;
2039         ins.type = BTRFS_EXTENT_ITEM_KEY;
2040
2041         ref = btrfs_delayed_node_to_data_ref(node);
2042         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2043                 parent = ref->parent;
2044         else
2045                 ref_root = ref->root;
2046
2047         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2048                 if (extent_op)
2049                         flags |= extent_op->flags_to_set;
2050                 ret = alloc_reserved_file_extent(trans, root,
2051                                                  parent, ref_root, flags,
2052                                                  ref->objectid, ref->offset,
2053                                                  &ins, node->ref_mod);
2054         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2055                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2056                                              node->num_bytes, parent,
2057                                              ref_root, ref->objectid,
2058                                              ref->offset, node->ref_mod,
2059                                              extent_op);
2060         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2061                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2062                                           node->num_bytes, parent,
2063                                           ref_root, ref->objectid,
2064                                           ref->offset, node->ref_mod,
2065                                           extent_op);
2066         } else {
2067                 BUG();
2068         }
2069         return ret;
2070 }
2071
2072 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2073                                     struct extent_buffer *leaf,
2074                                     struct btrfs_extent_item *ei)
2075 {
2076         u64 flags = btrfs_extent_flags(leaf, ei);
2077         if (extent_op->update_flags) {
2078                 flags |= extent_op->flags_to_set;
2079                 btrfs_set_extent_flags(leaf, ei, flags);
2080         }
2081
2082         if (extent_op->update_key) {
2083                 struct btrfs_tree_block_info *bi;
2084                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2085                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2086                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2087         }
2088 }
2089
2090 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2091                                  struct btrfs_root *root,
2092                                  struct btrfs_delayed_ref_node *node,
2093                                  struct btrfs_delayed_extent_op *extent_op)
2094 {
2095         struct btrfs_key key;
2096         struct btrfs_path *path;
2097         struct btrfs_extent_item *ei;
2098         struct extent_buffer *leaf;
2099         u32 item_size;
2100         int ret;
2101         int err = 0;
2102         int metadata = (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2103                         node->type == BTRFS_SHARED_BLOCK_REF_KEY);
2104
2105         if (trans->aborted)
2106                 return 0;
2107
2108         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2109                 metadata = 0;
2110
2111         path = btrfs_alloc_path();
2112         if (!path)
2113                 return -ENOMEM;
2114
2115         key.objectid = node->bytenr;
2116
2117         if (metadata) {
2118                 struct btrfs_delayed_tree_ref *tree_ref;
2119
2120                 tree_ref = btrfs_delayed_node_to_tree_ref(node);
2121                 key.type = BTRFS_METADATA_ITEM_KEY;
2122                 key.offset = tree_ref->level;
2123         } else {
2124                 key.type = BTRFS_EXTENT_ITEM_KEY;
2125                 key.offset = node->num_bytes;
2126         }
2127
2128 again:
2129         path->reada = 1;
2130         path->leave_spinning = 1;
2131         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2132                                 path, 0, 1);
2133         if (ret < 0) {
2134                 err = ret;
2135                 goto out;
2136         }
2137         if (ret > 0) {
2138                 if (metadata) {
2139                         btrfs_release_path(path);
2140                         metadata = 0;
2141
2142                         key.offset = node->num_bytes;
2143                         key.type = BTRFS_EXTENT_ITEM_KEY;
2144                         goto again;
2145                 }
2146                 err = -EIO;
2147                 goto out;
2148         }
2149
2150         leaf = path->nodes[0];
2151         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2152 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2153         if (item_size < sizeof(*ei)) {
2154                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2155                                              path, (u64)-1, 0);
2156                 if (ret < 0) {
2157                         err = ret;
2158                         goto out;
2159                 }
2160                 leaf = path->nodes[0];
2161                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2162         }
2163 #endif
2164         BUG_ON(item_size < sizeof(*ei));
2165         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2166         __run_delayed_extent_op(extent_op, leaf, ei);
2167
2168         btrfs_mark_buffer_dirty(leaf);
2169 out:
2170         btrfs_free_path(path);
2171         return err;
2172 }
2173
2174 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2175                                 struct btrfs_root *root,
2176                                 struct btrfs_delayed_ref_node *node,
2177                                 struct btrfs_delayed_extent_op *extent_op,
2178                                 int insert_reserved)
2179 {
2180         int ret = 0;
2181         struct btrfs_delayed_tree_ref *ref;
2182         struct btrfs_key ins;
2183         u64 parent = 0;
2184         u64 ref_root = 0;
2185         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2186                                                  SKINNY_METADATA);
2187
2188         ref = btrfs_delayed_node_to_tree_ref(node);
2189         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2190                 parent = ref->parent;
2191         else
2192                 ref_root = ref->root;
2193
2194         ins.objectid = node->bytenr;
2195         if (skinny_metadata) {
2196                 ins.offset = ref->level;
2197                 ins.type = BTRFS_METADATA_ITEM_KEY;
2198         } else {
2199                 ins.offset = node->num_bytes;
2200                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2201         }
2202
2203         BUG_ON(node->ref_mod != 1);
2204         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2205                 BUG_ON(!extent_op || !extent_op->update_flags);
2206                 ret = alloc_reserved_tree_block(trans, root,
2207                                                 parent, ref_root,
2208                                                 extent_op->flags_to_set,
2209                                                 &extent_op->key,
2210                                                 ref->level, &ins);
2211         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2212                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2213                                              node->num_bytes, parent, ref_root,
2214                                              ref->level, 0, 1, extent_op);
2215         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2216                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2217                                           node->num_bytes, parent, ref_root,
2218                                           ref->level, 0, 1, extent_op);
2219         } else {
2220                 BUG();
2221         }
2222         return ret;
2223 }
2224
2225 /* helper function to actually process a single delayed ref entry */
2226 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2227                                struct btrfs_root *root,
2228                                struct btrfs_delayed_ref_node *node,
2229                                struct btrfs_delayed_extent_op *extent_op,
2230                                int insert_reserved)
2231 {
2232         int ret = 0;
2233
2234         if (trans->aborted)
2235                 return 0;
2236
2237         if (btrfs_delayed_ref_is_head(node)) {
2238                 struct btrfs_delayed_ref_head *head;
2239                 /*
2240                  * we've hit the end of the chain and we were supposed
2241                  * to insert this extent into the tree.  But, it got
2242                  * deleted before we ever needed to insert it, so all
2243                  * we have to do is clean up the accounting
2244                  */
2245                 BUG_ON(extent_op);
2246                 head = btrfs_delayed_node_to_head(node);
2247                 if (insert_reserved) {
2248                         btrfs_pin_extent(root, node->bytenr,
2249                                          node->num_bytes, 1);
2250                         if (head->is_data) {
2251                                 ret = btrfs_del_csums(trans, root,
2252                                                       node->bytenr,
2253                                                       node->num_bytes);
2254                         }
2255                 }
2256                 return ret;
2257         }
2258
2259         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2260             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2261                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2262                                            insert_reserved);
2263         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2264                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2265                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2266                                            insert_reserved);
2267         else
2268                 BUG();
2269         return ret;
2270 }
2271
2272 static noinline struct btrfs_delayed_ref_node *
2273 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2274 {
2275         struct rb_node *node;
2276         struct btrfs_delayed_ref_node *ref;
2277         int action = BTRFS_ADD_DELAYED_REF;
2278 again:
2279         /*
2280          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2281          * this prevents ref count from going down to zero when
2282          * there still are pending delayed ref.
2283          */
2284         node = rb_prev(&head->node.rb_node);
2285         while (1) {
2286                 if (!node)
2287                         break;
2288                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2289                                 rb_node);
2290                 if (ref->bytenr != head->node.bytenr)
2291                         break;
2292                 if (ref->action == action)
2293                         return ref;
2294                 node = rb_prev(node);
2295         }
2296         if (action == BTRFS_ADD_DELAYED_REF) {
2297                 action = BTRFS_DROP_DELAYED_REF;
2298                 goto again;
2299         }
2300         return NULL;
2301 }
2302
2303 /*
2304  * Returns 0 on success or if called with an already aborted transaction.
2305  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2306  */
2307 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2308                                        struct btrfs_root *root,
2309                                        struct list_head *cluster)
2310 {
2311         struct btrfs_delayed_ref_root *delayed_refs;
2312         struct btrfs_delayed_ref_node *ref;
2313         struct btrfs_delayed_ref_head *locked_ref = NULL;
2314         struct btrfs_delayed_extent_op *extent_op;
2315         struct btrfs_fs_info *fs_info = root->fs_info;
2316         int ret;
2317         int count = 0;
2318         int must_insert_reserved = 0;
2319
2320         delayed_refs = &trans->transaction->delayed_refs;
2321         while (1) {
2322                 if (!locked_ref) {
2323                         /* pick a new head ref from the cluster list */
2324                         if (list_empty(cluster))
2325                                 break;
2326
2327                         locked_ref = list_entry(cluster->next,
2328                                      struct btrfs_delayed_ref_head, cluster);
2329
2330                         /* grab the lock that says we are going to process
2331                          * all the refs for this head */
2332                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2333
2334                         /*
2335                          * we may have dropped the spin lock to get the head
2336                          * mutex lock, and that might have given someone else
2337                          * time to free the head.  If that's true, it has been
2338                          * removed from our list and we can move on.
2339                          */
2340                         if (ret == -EAGAIN) {
2341                                 locked_ref = NULL;
2342                                 count++;
2343                                 continue;
2344                         }
2345                 }
2346
2347                 /*
2348                  * We need to try and merge add/drops of the same ref since we
2349                  * can run into issues with relocate dropping the implicit ref
2350                  * and then it being added back again before the drop can
2351                  * finish.  If we merged anything we need to re-loop so we can
2352                  * get a good ref.
2353                  */
2354                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2355                                          locked_ref);
2356
2357                 /*
2358                  * locked_ref is the head node, so we have to go one
2359                  * node back for any delayed ref updates
2360                  */
2361                 ref = select_delayed_ref(locked_ref);
2362
2363                 if (ref && ref->seq &&
2364                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2365                         /*
2366                          * there are still refs with lower seq numbers in the
2367                          * process of being added. Don't run this ref yet.
2368                          */
2369                         list_del_init(&locked_ref->cluster);
2370                         btrfs_delayed_ref_unlock(locked_ref);
2371                         locked_ref = NULL;
2372                         delayed_refs->num_heads_ready++;
2373                         spin_unlock(&delayed_refs->lock);
2374                         cond_resched();
2375                         spin_lock(&delayed_refs->lock);
2376                         continue;
2377                 }
2378
2379                 /*
2380                  * record the must insert reserved flag before we
2381                  * drop the spin lock.
2382                  */
2383                 must_insert_reserved = locked_ref->must_insert_reserved;
2384                 locked_ref->must_insert_reserved = 0;
2385
2386                 extent_op = locked_ref->extent_op;
2387                 locked_ref->extent_op = NULL;
2388
2389                 if (!ref) {
2390                         /* All delayed refs have been processed, Go ahead
2391                          * and send the head node to run_one_delayed_ref,
2392                          * so that any accounting fixes can happen
2393                          */
2394                         ref = &locked_ref->node;
2395
2396                         if (extent_op && must_insert_reserved) {
2397                                 btrfs_free_delayed_extent_op(extent_op);
2398                                 extent_op = NULL;
2399                         }
2400
2401                         if (extent_op) {
2402                                 spin_unlock(&delayed_refs->lock);
2403
2404                                 ret = run_delayed_extent_op(trans, root,
2405                                                             ref, extent_op);
2406                                 btrfs_free_delayed_extent_op(extent_op);
2407
2408                                 if (ret) {
2409                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2410                                         spin_lock(&delayed_refs->lock);
2411                                         btrfs_delayed_ref_unlock(locked_ref);
2412                                         return ret;
2413                                 }
2414
2415                                 goto next;
2416                         }
2417                 }
2418
2419                 ref->in_tree = 0;
2420                 rb_erase(&ref->rb_node, &delayed_refs->root);
2421                 delayed_refs->num_entries--;
2422                 if (!btrfs_delayed_ref_is_head(ref)) {
2423                         /*
2424                          * when we play the delayed ref, also correct the
2425                          * ref_mod on head
2426                          */
2427                         switch (ref->action) {
2428                         case BTRFS_ADD_DELAYED_REF:
2429                         case BTRFS_ADD_DELAYED_EXTENT:
2430                                 locked_ref->node.ref_mod -= ref->ref_mod;
2431                                 break;
2432                         case BTRFS_DROP_DELAYED_REF:
2433                                 locked_ref->node.ref_mod += ref->ref_mod;
2434                                 break;
2435                         default:
2436                                 WARN_ON(1);
2437                         }
2438                 }
2439                 spin_unlock(&delayed_refs->lock);
2440
2441                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2442                                           must_insert_reserved);
2443
2444                 btrfs_free_delayed_extent_op(extent_op);
2445                 if (ret) {
2446                         btrfs_delayed_ref_unlock(locked_ref);
2447                         btrfs_put_delayed_ref(ref);
2448                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2449                         spin_lock(&delayed_refs->lock);
2450                         return ret;
2451                 }
2452
2453                 /*
2454                  * If this node is a head, that means all the refs in this head
2455                  * have been dealt with, and we will pick the next head to deal
2456                  * with, so we must unlock the head and drop it from the cluster
2457                  * list before we release it.
2458                  */
2459                 if (btrfs_delayed_ref_is_head(ref)) {
2460                         list_del_init(&locked_ref->cluster);
2461                         btrfs_delayed_ref_unlock(locked_ref);
2462                         locked_ref = NULL;
2463                 }
2464                 btrfs_put_delayed_ref(ref);
2465                 count++;
2466 next:
2467                 cond_resched();
2468                 spin_lock(&delayed_refs->lock);
2469         }
2470         return count;
2471 }
2472
2473 #ifdef SCRAMBLE_DELAYED_REFS
2474 /*
2475  * Normally delayed refs get processed in ascending bytenr order. This
2476  * correlates in most cases to the order added. To expose dependencies on this
2477  * order, we start to process the tree in the middle instead of the beginning
2478  */
2479 static u64 find_middle(struct rb_root *root)
2480 {
2481         struct rb_node *n = root->rb_node;
2482         struct btrfs_delayed_ref_node *entry;
2483         int alt = 1;
2484         u64 middle;
2485         u64 first = 0, last = 0;
2486
2487         n = rb_first(root);
2488         if (n) {
2489                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2490                 first = entry->bytenr;
2491         }
2492         n = rb_last(root);
2493         if (n) {
2494                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2495                 last = entry->bytenr;
2496         }
2497         n = root->rb_node;
2498
2499         while (n) {
2500                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2501                 WARN_ON(!entry->in_tree);
2502
2503                 middle = entry->bytenr;
2504
2505                 if (alt)
2506                         n = n->rb_left;
2507                 else
2508                         n = n->rb_right;
2509
2510                 alt = 1 - alt;
2511         }
2512         return middle;
2513 }
2514 #endif
2515
2516 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2517                                          struct btrfs_fs_info *fs_info)
2518 {
2519         struct qgroup_update *qgroup_update;
2520         int ret = 0;
2521
2522         if (list_empty(&trans->qgroup_ref_list) !=
2523             !trans->delayed_ref_elem.seq) {
2524                 /* list without seq or seq without list */
2525                 btrfs_err(fs_info,
2526                         "qgroup accounting update error, list is%s empty, seq is %llu",
2527                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2528                         trans->delayed_ref_elem.seq);
2529                 BUG();
2530         }
2531
2532         if (!trans->delayed_ref_elem.seq)
2533                 return 0;
2534
2535         while (!list_empty(&trans->qgroup_ref_list)) {
2536                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2537                                                  struct qgroup_update, list);
2538                 list_del(&qgroup_update->list);
2539                 if (!ret)
2540                         ret = btrfs_qgroup_account_ref(
2541                                         trans, fs_info, qgroup_update->node,
2542                                         qgroup_update->extent_op);
2543                 kfree(qgroup_update);
2544         }
2545
2546         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2547
2548         return ret;
2549 }
2550
2551 static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
2552                       int count)
2553 {
2554         int val = atomic_read(&delayed_refs->ref_seq);
2555
2556         if (val < seq || val >= seq + count)
2557                 return 1;
2558         return 0;
2559 }
2560
2561 /*
2562  * this starts processing the delayed reference count updates and
2563  * extent insertions we have queued up so far.  count can be
2564  * 0, which means to process everything in the tree at the start
2565  * of the run (but not newly added entries), or it can be some target
2566  * number you'd like to process.
2567  *
2568  * Returns 0 on success or if called with an aborted transaction
2569  * Returns <0 on error and aborts the transaction
2570  */
2571 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2572                            struct btrfs_root *root, unsigned long count)
2573 {
2574         struct rb_node *node;
2575         struct btrfs_delayed_ref_root *delayed_refs;
2576         struct btrfs_delayed_ref_node *ref;
2577         struct list_head cluster;
2578         int ret;
2579         u64 delayed_start;
2580         int run_all = count == (unsigned long)-1;
2581         int run_most = 0;
2582         int loops;
2583
2584         /* We'll clean this up in btrfs_cleanup_transaction */
2585         if (trans->aborted)
2586                 return 0;
2587
2588         if (root == root->fs_info->extent_root)
2589                 root = root->fs_info->tree_root;
2590
2591         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2592
2593         delayed_refs = &trans->transaction->delayed_refs;
2594         INIT_LIST_HEAD(&cluster);
2595         if (count == 0) {
2596                 count = delayed_refs->num_entries * 2;
2597                 run_most = 1;
2598         }
2599
2600         if (!run_all && !run_most) {
2601                 int old;
2602                 int seq = atomic_read(&delayed_refs->ref_seq);
2603
2604 progress:
2605                 old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2606                 if (old) {
2607                         DEFINE_WAIT(__wait);
2608                         if (delayed_refs->num_entries < 16348)
2609                                 return 0;
2610
2611                         prepare_to_wait(&delayed_refs->wait, &__wait,
2612                                         TASK_UNINTERRUPTIBLE);
2613
2614                         old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2615                         if (old) {
2616                                 schedule();
2617                                 finish_wait(&delayed_refs->wait, &__wait);
2618
2619                                 if (!refs_newer(delayed_refs, seq, 256))
2620                                         goto progress;
2621                                 else
2622                                         return 0;
2623                         } else {
2624                                 finish_wait(&delayed_refs->wait, &__wait);
2625                                 goto again;
2626                         }
2627                 }
2628
2629         } else {
2630                 atomic_inc(&delayed_refs->procs_running_refs);
2631         }
2632
2633 again:
2634         loops = 0;
2635         spin_lock(&delayed_refs->lock);
2636
2637 #ifdef SCRAMBLE_DELAYED_REFS
2638         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2639 #endif
2640
2641         while (1) {
2642                 if (!(run_all || run_most) &&
2643                     delayed_refs->num_heads_ready < 64)
2644                         break;
2645
2646                 /*
2647                  * go find something we can process in the rbtree.  We start at
2648                  * the beginning of the tree, and then build a cluster
2649                  * of refs to process starting at the first one we are able to
2650                  * lock
2651                  */
2652                 delayed_start = delayed_refs->run_delayed_start;
2653                 ret = btrfs_find_ref_cluster(trans, &cluster,
2654                                              delayed_refs->run_delayed_start);
2655                 if (ret)
2656                         break;
2657
2658                 ret = run_clustered_refs(trans, root, &cluster);
2659                 if (ret < 0) {
2660                         btrfs_release_ref_cluster(&cluster);
2661                         spin_unlock(&delayed_refs->lock);
2662                         btrfs_abort_transaction(trans, root, ret);
2663                         atomic_dec(&delayed_refs->procs_running_refs);
2664                         return ret;
2665                 }
2666
2667                 atomic_add(ret, &delayed_refs->ref_seq);
2668
2669                 count -= min_t(unsigned long, ret, count);
2670
2671                 if (count == 0)
2672                         break;
2673
2674                 if (delayed_start >= delayed_refs->run_delayed_start) {
2675                         if (loops == 0) {
2676                                 /*
2677                                  * btrfs_find_ref_cluster looped. let's do one
2678                                  * more cycle. if we don't run any delayed ref
2679                                  * during that cycle (because we can't because
2680                                  * all of them are blocked), bail out.
2681                                  */
2682                                 loops = 1;
2683                         } else {
2684                                 /*
2685                                  * no runnable refs left, stop trying
2686                                  */
2687                                 BUG_ON(run_all);
2688                                 break;
2689                         }
2690                 }
2691                 if (ret) {
2692                         /* refs were run, let's reset staleness detection */
2693                         loops = 0;
2694                 }
2695         }
2696
2697         if (run_all) {
2698                 if (!list_empty(&trans->new_bgs)) {
2699                         spin_unlock(&delayed_refs->lock);
2700                         btrfs_create_pending_block_groups(trans, root);
2701                         spin_lock(&delayed_refs->lock);
2702                 }
2703
2704                 node = rb_first(&delayed_refs->root);
2705                 if (!node)
2706                         goto out;
2707                 count = (unsigned long)-1;
2708
2709                 while (node) {
2710                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2711                                        rb_node);
2712                         if (btrfs_delayed_ref_is_head(ref)) {
2713                                 struct btrfs_delayed_ref_head *head;
2714
2715                                 head = btrfs_delayed_node_to_head(ref);
2716                                 atomic_inc(&ref->refs);
2717
2718                                 spin_unlock(&delayed_refs->lock);
2719                                 /*
2720                                  * Mutex was contended, block until it's
2721                                  * released and try again
2722                                  */
2723                                 mutex_lock(&head->mutex);
2724                                 mutex_unlock(&head->mutex);
2725
2726                                 btrfs_put_delayed_ref(ref);
2727                                 cond_resched();
2728                                 goto again;
2729                         }
2730                         node = rb_next(node);
2731                 }
2732                 spin_unlock(&delayed_refs->lock);
2733                 schedule_timeout(1);
2734                 goto again;
2735         }
2736 out:
2737         atomic_dec(&delayed_refs->procs_running_refs);
2738         smp_mb();
2739         if (waitqueue_active(&delayed_refs->wait))
2740                 wake_up(&delayed_refs->wait);
2741
2742         spin_unlock(&delayed_refs->lock);
2743         assert_qgroups_uptodate(trans);
2744         return 0;
2745 }
2746
2747 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2748                                 struct btrfs_root *root,
2749                                 u64 bytenr, u64 num_bytes, u64 flags,
2750                                 int is_data)
2751 {
2752         struct btrfs_delayed_extent_op *extent_op;
2753         int ret;
2754
2755         extent_op = btrfs_alloc_delayed_extent_op();
2756         if (!extent_op)
2757                 return -ENOMEM;
2758
2759         extent_op->flags_to_set = flags;
2760         extent_op->update_flags = 1;
2761         extent_op->update_key = 0;
2762         extent_op->is_data = is_data ? 1 : 0;
2763
2764         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2765                                           num_bytes, extent_op);
2766         if (ret)
2767                 btrfs_free_delayed_extent_op(extent_op);
2768         return ret;
2769 }
2770
2771 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2772                                       struct btrfs_root *root,
2773                                       struct btrfs_path *path,
2774                                       u64 objectid, u64 offset, u64 bytenr)
2775 {
2776         struct btrfs_delayed_ref_head *head;
2777         struct btrfs_delayed_ref_node *ref;
2778         struct btrfs_delayed_data_ref *data_ref;
2779         struct btrfs_delayed_ref_root *delayed_refs;
2780         struct rb_node *node;
2781         int ret = 0;
2782
2783         ret = -ENOENT;
2784         delayed_refs = &trans->transaction->delayed_refs;
2785         spin_lock(&delayed_refs->lock);
2786         head = btrfs_find_delayed_ref_head(trans, bytenr);
2787         if (!head)
2788                 goto out;
2789
2790         if (!mutex_trylock(&head->mutex)) {
2791                 atomic_inc(&head->node.refs);
2792                 spin_unlock(&delayed_refs->lock);
2793
2794                 btrfs_release_path(path);
2795
2796                 /*
2797                  * Mutex was contended, block until it's released and let
2798                  * caller try again
2799                  */
2800                 mutex_lock(&head->mutex);
2801                 mutex_unlock(&head->mutex);
2802                 btrfs_put_delayed_ref(&head->node);
2803                 return -EAGAIN;
2804         }
2805
2806         node = rb_prev(&head->node.rb_node);
2807         if (!node)
2808                 goto out_unlock;
2809
2810         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2811
2812         if (ref->bytenr != bytenr)
2813                 goto out_unlock;
2814
2815         ret = 1;
2816         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2817                 goto out_unlock;
2818
2819         data_ref = btrfs_delayed_node_to_data_ref(ref);
2820
2821         node = rb_prev(node);
2822         if (node) {
2823                 int seq = ref->seq;
2824
2825                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2826                 if (ref->bytenr == bytenr && ref->seq == seq)
2827                         goto out_unlock;
2828         }
2829
2830         if (data_ref->root != root->root_key.objectid ||
2831             data_ref->objectid != objectid || data_ref->offset != offset)
2832                 goto out_unlock;
2833
2834         ret = 0;
2835 out_unlock:
2836         mutex_unlock(&head->mutex);
2837 out:
2838         spin_unlock(&delayed_refs->lock);
2839         return ret;
2840 }
2841
2842 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2843                                         struct btrfs_root *root,
2844                                         struct btrfs_path *path,
2845                                         u64 objectid, u64 offset, u64 bytenr)
2846 {
2847         struct btrfs_root *extent_root = root->fs_info->extent_root;
2848         struct extent_buffer *leaf;
2849         struct btrfs_extent_data_ref *ref;
2850         struct btrfs_extent_inline_ref *iref;
2851         struct btrfs_extent_item *ei;
2852         struct btrfs_key key;
2853         u32 item_size;
2854         int ret;
2855
2856         key.objectid = bytenr;
2857         key.offset = (u64)-1;
2858         key.type = BTRFS_EXTENT_ITEM_KEY;
2859
2860         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2861         if (ret < 0)
2862                 goto out;
2863         BUG_ON(ret == 0); /* Corruption */
2864
2865         ret = -ENOENT;
2866         if (path->slots[0] == 0)
2867                 goto out;
2868
2869         path->slots[0]--;
2870         leaf = path->nodes[0];
2871         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2872
2873         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2874                 goto out;
2875
2876         ret = 1;
2877         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2878 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2879         if (item_size < sizeof(*ei)) {
2880                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2881                 goto out;
2882         }
2883 #endif
2884         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2885
2886         if (item_size != sizeof(*ei) +
2887             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2888                 goto out;
2889
2890         if (btrfs_extent_generation(leaf, ei) <=
2891             btrfs_root_last_snapshot(&root->root_item))
2892                 goto out;
2893
2894         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2895         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2896             BTRFS_EXTENT_DATA_REF_KEY)
2897                 goto out;
2898
2899         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2900         if (btrfs_extent_refs(leaf, ei) !=
2901             btrfs_extent_data_ref_count(leaf, ref) ||
2902             btrfs_extent_data_ref_root(leaf, ref) !=
2903             root->root_key.objectid ||
2904             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2905             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2906                 goto out;
2907
2908         ret = 0;
2909 out:
2910         return ret;
2911 }
2912
2913 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2914                           struct btrfs_root *root,
2915                           u64 objectid, u64 offset, u64 bytenr)
2916 {
2917         struct btrfs_path *path;
2918         int ret;
2919         int ret2;
2920
2921         path = btrfs_alloc_path();
2922         if (!path)
2923                 return -ENOENT;
2924
2925         do {
2926                 ret = check_committed_ref(trans, root, path, objectid,
2927                                           offset, bytenr);
2928                 if (ret && ret != -ENOENT)
2929                         goto out;
2930
2931                 ret2 = check_delayed_ref(trans, root, path, objectid,
2932                                          offset, bytenr);
2933         } while (ret2 == -EAGAIN);
2934
2935         if (ret2 && ret2 != -ENOENT) {
2936                 ret = ret2;
2937                 goto out;
2938         }
2939
2940         if (ret != -ENOENT || ret2 != -ENOENT)
2941                 ret = 0;
2942 out:
2943         btrfs_free_path(path);
2944         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2945                 WARN_ON(ret > 0);
2946         return ret;
2947 }
2948
2949 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2950                            struct btrfs_root *root,
2951                            struct extent_buffer *buf,
2952                            int full_backref, int inc, int for_cow)
2953 {
2954         u64 bytenr;
2955         u64 num_bytes;
2956         u64 parent;
2957         u64 ref_root;
2958         u32 nritems;
2959         struct btrfs_key key;
2960         struct btrfs_file_extent_item *fi;
2961         int i;
2962         int level;
2963         int ret = 0;
2964         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2965                             u64, u64, u64, u64, u64, u64, int);
2966
2967         ref_root = btrfs_header_owner(buf);
2968         nritems = btrfs_header_nritems(buf);
2969         level = btrfs_header_level(buf);
2970
2971         if (!root->ref_cows && level == 0)
2972                 return 0;
2973
2974         if (inc)
2975                 process_func = btrfs_inc_extent_ref;
2976         else
2977                 process_func = btrfs_free_extent;
2978
2979         if (full_backref)
2980                 parent = buf->start;
2981         else
2982                 parent = 0;
2983
2984         for (i = 0; i < nritems; i++) {
2985                 if (level == 0) {
2986                         btrfs_item_key_to_cpu(buf, &key, i);
2987                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2988                                 continue;
2989                         fi = btrfs_item_ptr(buf, i,
2990                                             struct btrfs_file_extent_item);
2991                         if (btrfs_file_extent_type(buf, fi) ==
2992                             BTRFS_FILE_EXTENT_INLINE)
2993                                 continue;
2994                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2995                         if (bytenr == 0)
2996                                 continue;
2997
2998                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2999                         key.offset -= btrfs_file_extent_offset(buf, fi);
3000                         ret = process_func(trans, root, bytenr, num_bytes,
3001                                            parent, ref_root, key.objectid,
3002                                            key.offset, for_cow);
3003                         if (ret)
3004                                 goto fail;
3005                 } else {
3006                         bytenr = btrfs_node_blockptr(buf, i);
3007                         num_bytes = btrfs_level_size(root, level - 1);
3008                         ret = process_func(trans, root, bytenr, num_bytes,
3009                                            parent, ref_root, level - 1, 0,
3010                                            for_cow);
3011                         if (ret)
3012                                 goto fail;
3013                 }
3014         }
3015         return 0;
3016 fail:
3017         return ret;
3018 }
3019
3020 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3021                   struct extent_buffer *buf, int full_backref, int for_cow)
3022 {
3023         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
3024 }
3025
3026 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3027                   struct extent_buffer *buf, int full_backref, int for_cow)
3028 {
3029         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
3030 }
3031
3032 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3033                                  struct btrfs_root *root,
3034                                  struct btrfs_path *path,
3035                                  struct btrfs_block_group_cache *cache)
3036 {
3037         int ret;
3038         struct btrfs_root *extent_root = root->fs_info->extent_root;
3039         unsigned long bi;
3040         struct extent_buffer *leaf;
3041
3042         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3043         if (ret < 0)
3044                 goto fail;
3045         BUG_ON(ret); /* Corruption */
3046
3047         leaf = path->nodes[0];
3048         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3049         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3050         btrfs_mark_buffer_dirty(leaf);
3051         btrfs_release_path(path);
3052 fail:
3053         if (ret) {
3054                 btrfs_abort_transaction(trans, root, ret);
3055                 return ret;
3056         }
3057         return 0;
3058
3059 }
3060
3061 static struct btrfs_block_group_cache *
3062 next_block_group(struct btrfs_root *root,
3063                  struct btrfs_block_group_cache *cache)
3064 {
3065         struct rb_node *node;
3066         spin_lock(&root->fs_info->block_group_cache_lock);
3067         node = rb_next(&cache->cache_node);
3068         btrfs_put_block_group(cache);
3069         if (node) {
3070                 cache = rb_entry(node, struct btrfs_block_group_cache,
3071                                  cache_node);
3072                 btrfs_get_block_group(cache);
3073         } else
3074                 cache = NULL;
3075         spin_unlock(&root->fs_info->block_group_cache_lock);
3076         return cache;
3077 }
3078
3079 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3080                             struct btrfs_trans_handle *trans,
3081                             struct btrfs_path *path)
3082 {
3083         struct btrfs_root *root = block_group->fs_info->tree_root;
3084         struct inode *inode = NULL;
3085         u64 alloc_hint = 0;
3086         int dcs = BTRFS_DC_ERROR;
3087         int num_pages = 0;
3088         int retries = 0;
3089         int ret = 0;
3090
3091         /*
3092          * If this block group is smaller than 100 megs don't bother caching the
3093          * block group.
3094          */
3095         if (block_group->key.offset < (100 * 1024 * 1024)) {
3096                 spin_lock(&block_group->lock);
3097                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3098                 spin_unlock(&block_group->lock);
3099                 return 0;
3100         }
3101
3102 again:
3103         inode = lookup_free_space_inode(root, block_group, path);
3104         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3105                 ret = PTR_ERR(inode);
3106                 btrfs_release_path(path);
3107                 goto out;
3108         }
3109
3110         if (IS_ERR(inode)) {
3111                 BUG_ON(retries);
3112                 retries++;
3113
3114                 if (block_group->ro)
3115                         goto out_free;
3116
3117                 ret = create_free_space_inode(root, trans, block_group, path);
3118                 if (ret)
3119                         goto out_free;
3120                 goto again;
3121         }
3122
3123         /* We've already setup this transaction, go ahead and exit */
3124         if (block_group->cache_generation == trans->transid &&
3125             i_size_read(inode)) {
3126                 dcs = BTRFS_DC_SETUP;
3127                 goto out_put;
3128         }
3129
3130         /*
3131          * We want to set the generation to 0, that way if anything goes wrong
3132          * from here on out we know not to trust this cache when we load up next
3133          * time.
3134          */
3135         BTRFS_I(inode)->generation = 0;
3136         ret = btrfs_update_inode(trans, root, inode);
3137         WARN_ON(ret);
3138
3139         if (i_size_read(inode) > 0) {
3140                 ret = btrfs_truncate_free_space_cache(root, trans, path,
3141                                                       inode);
3142                 if (ret)
3143                         goto out_put;
3144         }
3145
3146         spin_lock(&block_group->lock);
3147         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3148             !btrfs_test_opt(root, SPACE_CACHE)) {
3149                 /*
3150                  * don't bother trying to write stuff out _if_
3151                  * a) we're not cached,
3152                  * b) we're with nospace_cache mount option.
3153                  */
3154                 dcs = BTRFS_DC_WRITTEN;
3155                 spin_unlock(&block_group->lock);
3156                 goto out_put;
3157         }
3158         spin_unlock(&block_group->lock);
3159
3160         /*
3161          * Try to preallocate enough space based on how big the block group is.
3162          * Keep in mind this has to include any pinned space which could end up
3163          * taking up quite a bit since it's not folded into the other space
3164          * cache.
3165          */
3166         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3167         if (!num_pages)
3168                 num_pages = 1;
3169
3170         num_pages *= 16;
3171         num_pages *= PAGE_CACHE_SIZE;
3172
3173         ret = btrfs_check_data_free_space(inode, num_pages);
3174         if (ret)
3175                 goto out_put;
3176
3177         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3178                                               num_pages, num_pages,
3179                                               &alloc_hint);
3180         if (!ret)
3181                 dcs = BTRFS_DC_SETUP;
3182         btrfs_free_reserved_data_space(inode, num_pages);
3183
3184 out_put:
3185         iput(inode);
3186 out_free:
3187         btrfs_release_path(path);
3188 out:
3189         spin_lock(&block_group->lock);
3190         if (!ret && dcs == BTRFS_DC_SETUP)
3191                 block_group->cache_generation = trans->transid;
3192         block_group->disk_cache_state = dcs;
3193         spin_unlock(&block_group->lock);
3194
3195         return ret;
3196 }
3197
3198 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3199                                    struct btrfs_root *root)
3200 {
3201         struct btrfs_block_group_cache *cache;
3202         int err = 0;
3203         struct btrfs_path *path;
3204         u64 last = 0;
3205
3206         path = btrfs_alloc_path();
3207         if (!path)
3208                 return -ENOMEM;
3209
3210 again:
3211         while (1) {
3212                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3213                 while (cache) {
3214                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3215                                 break;
3216                         cache = next_block_group(root, cache);
3217                 }
3218                 if (!cache) {
3219                         if (last == 0)
3220                                 break;
3221                         last = 0;
3222                         continue;
3223                 }
3224                 err = cache_save_setup(cache, trans, path);
3225                 last = cache->key.objectid + cache->key.offset;
3226                 btrfs_put_block_group(cache);
3227         }
3228
3229         while (1) {
3230                 if (last == 0) {
3231                         err = btrfs_run_delayed_refs(trans, root,
3232                                                      (unsigned long)-1);
3233                         if (err) /* File system offline */
3234                                 goto out;
3235                 }
3236
3237                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3238                 while (cache) {
3239                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3240                                 btrfs_put_block_group(cache);
3241                                 goto again;
3242                         }
3243
3244                         if (cache->dirty)
3245                                 break;
3246                         cache = next_block_group(root, cache);
3247                 }
3248                 if (!cache) {
3249                         if (last == 0)
3250                                 break;
3251                         last = 0;
3252                         continue;
3253                 }
3254
3255                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3256                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3257                 cache->dirty = 0;
3258                 last = cache->key.objectid + cache->key.offset;
3259
3260                 err = write_one_cache_group(trans, root, path, cache);
3261                 if (err) /* File system offline */
3262                         goto out;
3263
3264                 btrfs_put_block_group(cache);
3265         }
3266
3267         while (1) {
3268                 /*
3269                  * I don't think this is needed since we're just marking our
3270                  * preallocated extent as written, but just in case it can't
3271                  * hurt.
3272                  */
3273                 if (last == 0) {
3274                         err = btrfs_run_delayed_refs(trans, root,
3275                                                      (unsigned long)-1);
3276                         if (err) /* File system offline */
3277                                 goto out;
3278                 }
3279
3280                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3281                 while (cache) {
3282                         /*
3283                          * Really this shouldn't happen, but it could if we
3284                          * couldn't write the entire preallocated extent and
3285                          * splitting the extent resulted in a new block.
3286                          */
3287                         if (cache->dirty) {
3288                                 btrfs_put_block_group(cache);
3289                                 goto again;
3290                         }
3291                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3292                                 break;
3293                         cache = next_block_group(root, cache);
3294                 }
3295                 if (!cache) {
3296                         if (last == 0)
3297                                 break;
3298                         last = 0;
3299                         continue;
3300                 }
3301
3302                 err = btrfs_write_out_cache(root, trans, cache, path);
3303
3304                 /*
3305                  * If we didn't have an error then the cache state is still
3306                  * NEED_WRITE, so we can set it to WRITTEN.
3307                  */
3308                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3309                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3310                 last = cache->key.objectid + cache->key.offset;
3311                 btrfs_put_block_group(cache);
3312         }
3313 out:
3314
3315         btrfs_free_path(path);
3316         return err;
3317 }
3318
3319 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3320 {
3321         struct btrfs_block_group_cache *block_group;
3322         int readonly = 0;
3323
3324         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3325         if (!block_group || block_group->ro)
3326                 readonly = 1;
3327         if (block_group)
3328                 btrfs_put_block_group(block_group);
3329         return readonly;
3330 }
3331
3332 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3333                              u64 total_bytes, u64 bytes_used,
3334                              struct btrfs_space_info **space_info)
3335 {
3336         struct btrfs_space_info *found;
3337         int i;
3338         int factor;
3339
3340         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3341                      BTRFS_BLOCK_GROUP_RAID10))
3342                 factor = 2;
3343         else
3344                 factor = 1;
3345
3346         found = __find_space_info(info, flags);
3347         if (found) {
3348                 spin_lock(&found->lock);
3349                 found->total_bytes += total_bytes;
3350                 found->disk_total += total_bytes * factor;
3351                 found->bytes_used += bytes_used;
3352                 found->disk_used += bytes_used * factor;
3353                 found->full = 0;
3354                 spin_unlock(&found->lock);
3355                 *space_info = found;
3356                 return 0;
3357         }
3358         found = kzalloc(sizeof(*found), GFP_NOFS);
3359         if (!found)
3360                 return -ENOMEM;
3361
3362         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3363                 INIT_LIST_HEAD(&found->block_groups[i]);
3364         init_rwsem(&found->groups_sem);
3365         spin_lock_init(&found->lock);
3366         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3367         found->total_bytes = total_bytes;
3368         found->disk_total = total_bytes * factor;
3369         found->bytes_used = bytes_used;
3370         found->disk_used = bytes_used * factor;
3371         found->bytes_pinned = 0;
3372         found->bytes_reserved = 0;
3373         found->bytes_readonly = 0;
3374         found->bytes_may_use = 0;
3375         found->full = 0;
3376         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3377         found->chunk_alloc = 0;
3378         found->flush = 0;
3379         init_waitqueue_head(&found->wait);
3380         *space_info = found;
3381         list_add_rcu(&found->list, &info->space_info);
3382         if (flags & BTRFS_BLOCK_GROUP_DATA)
3383                 info->data_sinfo = found;
3384         return 0;
3385 }
3386
3387 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3388 {
3389         u64 extra_flags = chunk_to_extended(flags) &
3390                                 BTRFS_EXTENDED_PROFILE_MASK;
3391
3392         write_seqlock(&fs_info->profiles_lock);
3393         if (flags & BTRFS_BLOCK_GROUP_DATA)
3394                 fs_info->avail_data_alloc_bits |= extra_flags;
3395         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3396                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3397         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3398                 fs_info->avail_system_alloc_bits |= extra_flags;
3399         write_sequnlock(&fs_info->profiles_lock);
3400 }
3401
3402 /*
3403  * returns target flags in extended format or 0 if restripe for this
3404  * chunk_type is not in progress
3405  *
3406  * should be called with either volume_mutex or balance_lock held
3407  */
3408 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3409 {
3410         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3411         u64 target = 0;
3412
3413         if (!bctl)
3414                 return 0;
3415
3416         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3417             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3418                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3419         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3420                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3421                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3422         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3423                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3424                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3425         }
3426
3427         return target;
3428 }
3429
3430 /*
3431  * @flags: available profiles in extended format (see ctree.h)
3432  *
3433  * Returns reduced profile in chunk format.  If profile changing is in
3434  * progress (either running or paused) picks the target profile (if it's
3435  * already available), otherwise falls back to plain reducing.
3436  */
3437 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3438 {
3439         /*
3440          * we add in the count of missing devices because we want
3441          * to make sure that any RAID levels on a degraded FS
3442          * continue to be honored.
3443          */
3444         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3445                 root->fs_info->fs_devices->missing_devices;
3446         u64 target;
3447         u64 tmp;
3448
3449         /*
3450          * see if restripe for this chunk_type is in progress, if so
3451          * try to reduce to the target profile
3452          */
3453         spin_lock(&root->fs_info->balance_lock);
3454         target = get_restripe_target(root->fs_info, flags);
3455         if (target) {
3456                 /* pick target profile only if it's already available */
3457                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3458                         spin_unlock(&root->fs_info->balance_lock);
3459                         return extended_to_chunk(target);
3460                 }
3461         }
3462         spin_unlock(&root->fs_info->balance_lock);
3463
3464         /* First, mask out the RAID levels which aren't possible */
3465         if (num_devices == 1)
3466                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3467                            BTRFS_BLOCK_GROUP_RAID5);
3468         if (num_devices < 3)
3469                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3470         if (num_devices < 4)
3471                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3472
3473         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3474                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3475                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3476         flags &= ~tmp;
3477
3478         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3479                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3480         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3481                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3482         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3483                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3484         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3485                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3486         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3487                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3488
3489         return extended_to_chunk(flags | tmp);
3490 }
3491
3492 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3493 {
3494         unsigned seq;
3495
3496         do {
3497                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3498
3499                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3500                         flags |= root->fs_info->avail_data_alloc_bits;
3501                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3502                         flags |= root->fs_info->avail_system_alloc_bits;
3503                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3504                         flags |= root->fs_info->avail_metadata_alloc_bits;
3505         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3506
3507         return btrfs_reduce_alloc_profile(root, flags);
3508 }
3509
3510 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3511 {
3512         u64 flags;
3513         u64 ret;
3514
3515         if (data)
3516                 flags = BTRFS_BLOCK_GROUP_DATA;
3517         else if (root == root->fs_info->chunk_root)
3518                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3519         else
3520                 flags = BTRFS_BLOCK_GROUP_METADATA;
3521
3522         ret = get_alloc_profile(root, flags);
3523         return ret;
3524 }
3525
3526 /*
3527  * This will check the space that the inode allocates from to make sure we have
3528  * enough space for bytes.
3529  */
3530 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3531 {
3532         struct btrfs_space_info *data_sinfo;
3533         struct btrfs_root *root = BTRFS_I(inode)->root;
3534         struct btrfs_fs_info *fs_info = root->fs_info;
3535         u64 used;
3536         int ret = 0, committed = 0, alloc_chunk = 1;
3537
3538         /* make sure bytes are sectorsize aligned */
3539         bytes = ALIGN(bytes, root->sectorsize);
3540
3541         if (root == root->fs_info->tree_root ||
3542             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3543                 alloc_chunk = 0;
3544                 committed = 1;
3545         }
3546
3547         data_sinfo = fs_info->data_sinfo;
3548         if (!data_sinfo)
3549                 goto alloc;
3550
3551 again:
3552         /* make sure we have enough space to handle the data first */
3553         spin_lock(&data_sinfo->lock);
3554         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3555                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3556                 data_sinfo->bytes_may_use;
3557
3558         if (used + bytes > data_sinfo->total_bytes) {
3559                 struct btrfs_trans_handle *trans;
3560
3561                 /*
3562                  * if we don't have enough free bytes in this space then we need
3563                  * to alloc a new chunk.
3564                  */
3565                 if (!data_sinfo->full && alloc_chunk) {
3566                         u64 alloc_target;
3567
3568                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3569                         spin_unlock(&data_sinfo->lock);
3570 alloc:
3571                         alloc_target = btrfs_get_alloc_profile(root, 1);
3572                         trans = btrfs_join_transaction(root);
3573                         if (IS_ERR(trans))
3574                                 return PTR_ERR(trans);
3575
3576                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3577                                              alloc_target,
3578                                              CHUNK_ALLOC_NO_FORCE);
3579                         btrfs_end_transaction(trans, root);
3580                         if (ret < 0) {
3581                                 if (ret != -ENOSPC)
3582                                         return ret;
3583                                 else
3584                                         goto commit_trans;
3585                         }
3586
3587                         if (!data_sinfo)
3588                                 data_sinfo = fs_info->data_sinfo;
3589
3590                         goto again;
3591                 }
3592
3593                 /*
3594                  * If we have less pinned bytes than we want to allocate then
3595                  * don't bother committing the transaction, it won't help us.
3596                  */
3597                 if (data_sinfo->bytes_pinned < bytes)
3598                         committed = 1;
3599                 spin_unlock(&data_sinfo->lock);
3600
3601                 /* commit the current transaction and try again */
3602 commit_trans:
3603                 if (!committed &&
3604                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3605                         committed = 1;
3606                         trans = btrfs_join_transaction(root);
3607                         if (IS_ERR(trans))
3608                                 return PTR_ERR(trans);
3609                         ret = btrfs_commit_transaction(trans, root);
3610                         if (ret)
3611                                 return ret;
3612                         goto again;
3613                 }
3614
3615                 return -ENOSPC;
3616         }
3617         data_sinfo->bytes_may_use += bytes;
3618         trace_btrfs_space_reservation(root->fs_info, "space_info",
3619                                       data_sinfo->flags, bytes, 1);
3620         spin_unlock(&data_sinfo->lock);
3621
3622         return 0;
3623 }
3624
3625 /*
3626  * Called if we need to clear a data reservation for this inode.
3627  */
3628 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3629 {
3630         struct btrfs_root *root = BTRFS_I(inode)->root;
3631         struct btrfs_space_info *data_sinfo;
3632
3633         /* make sure bytes are sectorsize aligned */
3634         bytes = ALIGN(bytes, root->sectorsize);
3635
3636         data_sinfo = root->fs_info->data_sinfo;
3637         spin_lock(&data_sinfo->lock);
3638         data_sinfo->bytes_may_use -= bytes;
3639         trace_btrfs_space_reservation(root->fs_info, "space_info",
3640                                       data_sinfo->flags, bytes, 0);
3641         spin_unlock(&data_sinfo->lock);
3642 }
3643
3644 static void force_metadata_allocation(struct btrfs_fs_info *info)
3645 {
3646         struct list_head *head = &info->space_info;
3647         struct btrfs_space_info *found;
3648
3649         rcu_read_lock();
3650         list_for_each_entry_rcu(found, head, list) {
3651                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3652                         found->force_alloc = CHUNK_ALLOC_FORCE;
3653         }
3654         rcu_read_unlock();
3655 }
3656
3657 static int should_alloc_chunk(struct btrfs_root *root,
3658                               struct btrfs_space_info *sinfo, int force)
3659 {
3660         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3661         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3662         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3663         u64 thresh;
3664
3665         if (force == CHUNK_ALLOC_FORCE)
3666                 return 1;
3667
3668         /*
3669          * We need to take into account the global rsv because for all intents
3670          * and purposes it's used space.  Don't worry about locking the
3671          * global_rsv, it doesn't change except when the transaction commits.
3672          */
3673         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3674                 num_allocated += global_rsv->size;
3675
3676         /*
3677          * in limited mode, we want to have some free space up to
3678          * about 1% of the FS size.
3679          */
3680         if (force == CHUNK_ALLOC_LIMITED) {
3681                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3682                 thresh = max_t(u64, 64 * 1024 * 1024,
3683                                div_factor_fine(thresh, 1));
3684
3685                 if (num_bytes - num_allocated < thresh)
3686                         return 1;
3687         }
3688
3689         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3690                 return 0;
3691         return 1;
3692 }
3693
3694 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3695 {
3696         u64 num_dev;
3697
3698         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3699                     BTRFS_BLOCK_GROUP_RAID0 |
3700                     BTRFS_BLOCK_GROUP_RAID5 |
3701                     BTRFS_BLOCK_GROUP_RAID6))
3702                 num_dev = root->fs_info->fs_devices->rw_devices;
3703         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3704                 num_dev = 2;
3705         else
3706                 num_dev = 1;    /* DUP or single */
3707
3708         /* metadata for updaing devices and chunk tree */
3709         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3710 }
3711
3712 static void check_system_chunk(struct btrfs_trans_handle *trans,
3713                                struct btrfs_root *root, u64 type)
3714 {
3715         struct btrfs_space_info *info;
3716         u64 left;
3717         u64 thresh;
3718
3719         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3720         spin_lock(&info->lock);
3721         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3722                 info->bytes_reserved - info->bytes_readonly;
3723         spin_unlock(&info->lock);
3724
3725         thresh = get_system_chunk_thresh(root, type);
3726         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3727                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3728                         left, thresh, type);
3729                 dump_space_info(info, 0, 0);
3730         }
3731
3732         if (left < thresh) {
3733                 u64 flags;
3734
3735                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3736                 btrfs_alloc_chunk(trans, root, flags);
3737         }
3738 }
3739
3740 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3741                           struct btrfs_root *extent_root, u64 flags, int force)
3742 {
3743         struct btrfs_space_info *space_info;
3744         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3745         int wait_for_alloc = 0;
3746         int ret = 0;
3747
3748         /* Don't re-enter if we're already allocating a chunk */
3749         if (trans->allocating_chunk)
3750                 return -ENOSPC;
3751
3752         space_info = __find_space_info(extent_root->fs_info, flags);
3753         if (!space_info) {
3754                 ret = update_space_info(extent_root->fs_info, flags,
3755                                         0, 0, &space_info);
3756                 BUG_ON(ret); /* -ENOMEM */
3757         }
3758         BUG_ON(!space_info); /* Logic error */
3759
3760 again:
3761         spin_lock(&space_info->lock);
3762         if (force < space_info->force_alloc)
3763                 force = space_info->force_alloc;
3764         if (space_info->full) {
3765                 spin_unlock(&space_info->lock);
3766                 return 0;
3767         }
3768
3769         if (!should_alloc_chunk(extent_root, space_info, force)) {
3770                 spin_unlock(&space_info->lock);
3771                 return 0;
3772         } else if (space_info->chunk_alloc) {
3773                 wait_for_alloc = 1;
3774         } else {
3775                 space_info->chunk_alloc = 1;
3776         }
3777
3778         spin_unlock(&space_info->lock);
3779
3780         mutex_lock(&fs_info->chunk_mutex);
3781
3782         /*
3783          * The chunk_mutex is held throughout the entirety of a chunk
3784          * allocation, so once we've acquired the chunk_mutex we know that the
3785          * other guy is done and we need to recheck and see if we should
3786          * allocate.
3787          */
3788         if (wait_for_alloc) {
3789                 mutex_unlock(&fs_info->chunk_mutex);
3790                 wait_for_alloc = 0;
3791                 goto again;
3792         }
3793
3794         trans->allocating_chunk = true;
3795
3796         /*
3797          * If we have mixed data/metadata chunks we want to make sure we keep
3798          * allocating mixed chunks instead of individual chunks.
3799          */
3800         if (btrfs_mixed_space_info(space_info))
3801                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3802
3803         /*
3804          * if we're doing a data chunk, go ahead and make sure that
3805          * we keep a reasonable number of metadata chunks allocated in the
3806          * FS as well.
3807          */
3808         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3809                 fs_info->data_chunk_allocations++;
3810                 if (!(fs_info->data_chunk_allocations %
3811                       fs_info->metadata_ratio))
3812                         force_metadata_allocation(fs_info);
3813         }
3814
3815         /*
3816          * Check if we have enough space in SYSTEM chunk because we may need
3817          * to update devices.
3818          */
3819         check_system_chunk(trans, extent_root, flags);
3820
3821         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3822         trans->allocating_chunk = false;
3823
3824         spin_lock(&space_info->lock);
3825         if (ret < 0 && ret != -ENOSPC)
3826                 goto out;
3827         if (ret)
3828                 space_info->full = 1;
3829         else
3830                 ret = 1;
3831
3832         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3833 out:
3834         space_info->chunk_alloc = 0;
3835         spin_unlock(&space_info->lock);
3836         mutex_unlock(&fs_info->chunk_mutex);
3837         return ret;
3838 }
3839
3840 static int can_overcommit(struct btrfs_root *root,
3841                           struct btrfs_space_info *space_info, u64 bytes,
3842                           enum btrfs_reserve_flush_enum flush)
3843 {
3844         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3845         u64 profile = btrfs_get_alloc_profile(root, 0);
3846         u64 rsv_size = 0;
3847         u64 avail;
3848         u64 used;
3849         u64 to_add;
3850
3851         used = space_info->bytes_used + space_info->bytes_reserved +
3852                 space_info->bytes_pinned + space_info->bytes_readonly;
3853
3854         spin_lock(&global_rsv->lock);
3855         rsv_size = global_rsv->size;
3856         spin_unlock(&global_rsv->lock);
3857
3858         /*
3859          * We only want to allow over committing if we have lots of actual space
3860          * free, but if we don't have enough space to handle the global reserve
3861          * space then we could end up having a real enospc problem when trying
3862          * to allocate a chunk or some other such important allocation.
3863          */
3864         rsv_size <<= 1;
3865         if (used + rsv_size >= space_info->total_bytes)
3866                 return 0;
3867
3868         used += space_info->bytes_may_use;
3869
3870         spin_lock(&root->fs_info->free_chunk_lock);
3871         avail = root->fs_info->free_chunk_space;
3872         spin_unlock(&root->fs_info->free_chunk_lock);
3873
3874         /*
3875          * If we have dup, raid1 or raid10 then only half of the free
3876          * space is actually useable.  For raid56, the space info used
3877          * doesn't include the parity drive, so we don't have to
3878          * change the math
3879          */
3880         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3881                        BTRFS_BLOCK_GROUP_RAID1 |
3882                        BTRFS_BLOCK_GROUP_RAID10))
3883                 avail >>= 1;
3884
3885         to_add = space_info->total_bytes;
3886
3887         /*
3888          * If we aren't flushing all things, let us overcommit up to
3889          * 1/2th of the space. If we can flush, don't let us overcommit
3890          * too much, let it overcommit up to 1/8 of the space.
3891          */
3892         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3893                 to_add >>= 3;
3894         else
3895                 to_add >>= 1;
3896
3897         /*
3898          * Limit the overcommit to the amount of free space we could possibly
3899          * allocate for chunks.
3900          */
3901         to_add = min(avail, to_add);
3902
3903         if (used + bytes < space_info->total_bytes + to_add)
3904                 return 1;
3905         return 0;
3906 }
3907
3908 void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3909                                   unsigned long nr_pages)
3910 {
3911         struct super_block *sb = root->fs_info->sb;
3912         int started;
3913
3914         /* If we can not start writeback, just sync all the delalloc file. */
3915         started = try_to_writeback_inodes_sb_nr(sb, nr_pages,
3916                                                       WB_REASON_FS_FREE_SPACE);
3917         if (!started) {
3918                 /*
3919                  * We needn't worry the filesystem going from r/w to r/o though
3920                  * we don't acquire ->s_umount mutex, because the filesystem
3921                  * should guarantee the delalloc inodes list be empty after
3922                  * the filesystem is readonly(all dirty pages are written to
3923                  * the disk).
3924                  */
3925                 btrfs_start_delalloc_inodes(root, 0);
3926                 if (!current->journal_info)
3927                         btrfs_wait_ordered_extents(root, 0);
3928         }
3929 }
3930
3931 /*
3932  * shrink metadata reservation for delalloc
3933  */
3934 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
3935                             bool wait_ordered)
3936 {
3937         struct btrfs_block_rsv *block_rsv;
3938         struct btrfs_space_info *space_info;
3939         struct btrfs_trans_handle *trans;
3940         u64 delalloc_bytes;
3941         u64 max_reclaim;
3942         long time_left;
3943         unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3944         int loops = 0;
3945         enum btrfs_reserve_flush_enum flush;
3946
3947         trans = (struct btrfs_trans_handle *)current->journal_info;
3948         block_rsv = &root->fs_info->delalloc_block_rsv;
3949         space_info = block_rsv->space_info;
3950
3951         smp_mb();
3952         delalloc_bytes = percpu_counter_sum_positive(
3953                                                 &root->fs_info->delalloc_bytes);
3954         if (delalloc_bytes == 0) {
3955                 if (trans)
3956                         return;
3957                 btrfs_wait_ordered_extents(root, 0);
3958                 return;
3959         }
3960
3961         while (delalloc_bytes && loops < 3) {
3962                 max_reclaim = min(delalloc_bytes, to_reclaim);
3963                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
3964                 btrfs_writeback_inodes_sb_nr(root, nr_pages);
3965                 /*
3966                  * We need to wait for the async pages to actually start before
3967                  * we do anything.
3968                  */
3969                 wait_event(root->fs_info->async_submit_wait,
3970                            !atomic_read(&root->fs_info->async_delalloc_pages));
3971
3972                 if (!trans)
3973                         flush = BTRFS_RESERVE_FLUSH_ALL;
3974                 else
3975                         flush = BTRFS_RESERVE_NO_FLUSH;
3976                 spin_lock(&space_info->lock);
3977                 if (can_overcommit(root, space_info, orig, flush)) {
3978                         spin_unlock(&space_info->lock);
3979                         break;
3980                 }
3981                 spin_unlock(&space_info->lock);
3982
3983                 loops++;
3984                 if (wait_ordered && !trans) {
3985                         btrfs_wait_ordered_extents(root, 0);
3986                 } else {
3987                         time_left = schedule_timeout_killable(1);
3988                         if (time_left)
3989                                 break;
3990                 }
3991                 smp_mb();
3992                 delalloc_bytes = percpu_counter_sum_positive(
3993                                                 &root->fs_info->delalloc_bytes);
3994         }
3995 }
3996
3997 /**
3998  * maybe_commit_transaction - possibly commit the transaction if its ok to
3999  * @root - the root we're allocating for
4000  * @bytes - the number of bytes we want to reserve
4001  * @force - force the commit
4002  *
4003  * This will check to make sure that committing the transaction will actually
4004  * get us somewhere and then commit the transaction if it does.  Otherwise it
4005  * will return -ENOSPC.
4006  */
4007 static int may_commit_transaction(struct btrfs_root *root,
4008                                   struct btrfs_space_info *space_info,
4009                                   u64 bytes, int force)
4010 {
4011         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4012         struct btrfs_trans_handle *trans;
4013
4014         trans = (struct btrfs_trans_handle *)current->journal_info;
4015         if (trans)
4016                 return -EAGAIN;
4017
4018         if (force)
4019                 goto commit;
4020
4021         /* See if there is enough pinned space to make this reservation */
4022         spin_lock(&space_info->lock);
4023         if (space_info->bytes_pinned >= bytes) {
4024                 spin_unlock(&space_info->lock);
4025                 goto commit;
4026         }
4027         spin_unlock(&space_info->lock);
4028
4029         /*
4030          * See if there is some space in the delayed insertion reservation for
4031          * this reservation.
4032          */
4033         if (space_info != delayed_rsv->space_info)
4034                 return -ENOSPC;
4035
4036         spin_lock(&space_info->lock);
4037         spin_lock(&delayed_rsv->lock);
4038         if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
4039                 spin_unlock(&delayed_rsv->lock);
4040                 spin_unlock(&space_info->lock);
4041                 return -ENOSPC;
4042         }
4043         spin_unlock(&delayed_rsv->lock);
4044         spin_unlock(&space_info->lock);
4045
4046 commit:
4047         trans = btrfs_join_transaction(root);
4048         if (IS_ERR(trans))
4049                 return -ENOSPC;
4050
4051         return btrfs_commit_transaction(trans, root);
4052 }
4053
4054 enum flush_state {
4055         FLUSH_DELAYED_ITEMS_NR  =       1,
4056         FLUSH_DELAYED_ITEMS     =       2,
4057         FLUSH_DELALLOC          =       3,
4058         FLUSH_DELALLOC_WAIT     =       4,
4059         ALLOC_CHUNK             =       5,
4060         COMMIT_TRANS            =       6,
4061 };
4062
4063 static int flush_space(struct btrfs_root *root,
4064                        struct btrfs_space_info *space_info, u64 num_bytes,
4065                        u64 orig_bytes, int state)
4066 {
4067         struct btrfs_trans_handle *trans;
4068         int nr;
4069         int ret = 0;
4070
4071         switch (state) {
4072         case FLUSH_DELAYED_ITEMS_NR:
4073         case FLUSH_DELAYED_ITEMS:
4074                 if (state == FLUSH_DELAYED_ITEMS_NR) {
4075                         u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
4076
4077                         nr = (int)div64_u64(num_bytes, bytes);
4078                         if (!nr)
4079                                 nr = 1;
4080                         nr *= 2;
4081                 } else {
4082                         nr = -1;
4083                 }
4084                 trans = btrfs_join_transaction(root);
4085                 if (IS_ERR(trans)) {
4086                         ret = PTR_ERR(trans);
4087                         break;
4088                 }
4089                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4090                 btrfs_end_transaction(trans, root);
4091                 break;
4092         case FLUSH_DELALLOC:
4093         case FLUSH_DELALLOC_WAIT:
4094                 shrink_delalloc(root, num_bytes, orig_bytes,
4095                                 state == FLUSH_DELALLOC_WAIT);
4096                 break;
4097         case ALLOC_CHUNK:
4098                 trans = btrfs_join_transaction(root);
4099                 if (IS_ERR(trans)) {
4100                         ret = PTR_ERR(trans);
4101                         break;
4102                 }
4103                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4104                                      btrfs_get_alloc_profile(root, 0),
4105                                      CHUNK_ALLOC_NO_FORCE);
4106                 btrfs_end_transaction(trans, root);
4107                 if (ret == -ENOSPC)
4108                         ret = 0;
4109                 break;
4110         case COMMIT_TRANS:
4111                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4112                 break;
4113         default:
4114                 ret = -ENOSPC;
4115                 break;
4116         }
4117
4118         return ret;
4119 }
4120 /**
4121  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4122  * @root - the root we're allocating for
4123  * @block_rsv - the block_rsv we're allocating for
4124  * @orig_bytes - the number of bytes we want
4125  * @flush - whether or not we can flush to make our reservation
4126  *
4127  * This will reserve orgi_bytes number of bytes from the space info associated
4128  * with the block_rsv.  If there is not enough space it will make an attempt to
4129  * flush out space to make room.  It will do this by flushing delalloc if
4130  * possible or committing the transaction.  If flush is 0 then no attempts to
4131  * regain reservations will be made and this will fail if there is not enough
4132  * space already.
4133  */
4134 static int reserve_metadata_bytes(struct btrfs_root *root,
4135                                   struct btrfs_block_rsv *block_rsv,
4136                                   u64 orig_bytes,
4137                                   enum btrfs_reserve_flush_enum flush)
4138 {
4139         struct btrfs_space_info *space_info = block_rsv->space_info;
4140         u64 used;
4141         u64 num_bytes = orig_bytes;
4142         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4143         int ret = 0;
4144         bool flushing = false;
4145
4146 again:
4147         ret = 0;
4148         spin_lock(&space_info->lock);
4149         /*
4150          * We only want to wait if somebody other than us is flushing and we
4151          * are actually allowed to flush all things.
4152          */
4153         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4154                space_info->flush) {
4155                 spin_unlock(&space_info->lock);
4156                 /*
4157                  * If we have a trans handle we can't wait because the flusher
4158                  * may have to commit the transaction, which would mean we would
4159                  * deadlock since we are waiting for the flusher to finish, but
4160                  * hold the current transaction open.
4161                  */
4162                 if (current->journal_info)
4163                         return -EAGAIN;
4164                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4165                 /* Must have been killed, return */
4166                 if (ret)
4167                         return -EINTR;
4168
4169                 spin_lock(&space_info->lock);
4170         }
4171
4172         ret = -ENOSPC;
4173         used = space_info->bytes_used + space_info->bytes_reserved +
4174                 space_info->bytes_pinned + space_info->bytes_readonly +
4175                 space_info->bytes_may_use;
4176
4177         /*
4178          * The idea here is that we've not already over-reserved the block group
4179          * then we can go ahead and save our reservation first and then start
4180          * flushing if we need to.  Otherwise if we've already overcommitted
4181          * lets start flushing stuff first and then come back and try to make
4182          * our reservation.
4183          */
4184         if (used <= space_info->total_bytes) {
4185                 if (used + orig_bytes <= space_info->total_bytes) {
4186                         space_info->bytes_may_use += orig_bytes;
4187                         trace_btrfs_space_reservation(root->fs_info,
4188                                 "space_info", space_info->flags, orig_bytes, 1);
4189                         ret = 0;
4190                 } else {
4191                         /*
4192                          * Ok set num_bytes to orig_bytes since we aren't
4193                          * overocmmitted, this way we only try and reclaim what
4194                          * we need.
4195                          */
4196                         num_bytes = orig_bytes;
4197                 }
4198         } else {
4199                 /*
4200                  * Ok we're over committed, set num_bytes to the overcommitted
4201                  * amount plus the amount of bytes that we need for this
4202                  * reservation.
4203                  */
4204                 num_bytes = used - space_info->total_bytes +
4205                         (orig_bytes * 2);
4206         }
4207
4208         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4209                 space_info->bytes_may_use += orig_bytes;
4210                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4211                                               space_info->flags, orig_bytes,
4212                                               1);
4213                 ret = 0;
4214         }
4215
4216         /*
4217          * Couldn't make our reservation, save our place so while we're trying
4218          * to reclaim space we can actually use it instead of somebody else
4219          * stealing it from us.
4220          *
4221          * We make the other tasks wait for the flush only when we can flush
4222          * all things.
4223          */
4224         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4225                 flushing = true;
4226                 space_info->flush = 1;
4227         }
4228
4229         spin_unlock(&space_info->lock);
4230
4231         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4232                 goto out;
4233
4234         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4235                           flush_state);
4236         flush_state++;
4237
4238         /*
4239          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4240          * would happen. So skip delalloc flush.
4241          */
4242         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4243             (flush_state == FLUSH_DELALLOC ||
4244              flush_state == FLUSH_DELALLOC_WAIT))
4245                 flush_state = ALLOC_CHUNK;
4246
4247         if (!ret)
4248                 goto again;
4249         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4250                  flush_state < COMMIT_TRANS)
4251                 goto again;
4252         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4253                  flush_state <= COMMIT_TRANS)
4254                 goto again;
4255
4256 out:
4257         if (ret == -ENOSPC &&
4258             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4259                 struct btrfs_block_rsv *global_rsv =
4260                         &root->fs_info->global_block_rsv;
4261
4262                 if (block_rsv != global_rsv &&
4263                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4264                         ret = 0;
4265         }
4266         if (flushing) {
4267                 spin_lock(&space_info->lock);
4268                 space_info->flush = 0;
4269                 wake_up_all(&space_info->wait);
4270                 spin_unlock(&space_info->lock);
4271         }
4272         return ret;
4273 }
4274
4275 static struct btrfs_block_rsv *get_block_rsv(
4276                                         const struct btrfs_trans_handle *trans,
4277                                         const struct btrfs_root *root)
4278 {
4279         struct btrfs_block_rsv *block_rsv = NULL;
4280
4281         if (root->ref_cows)
4282                 block_rsv = trans->block_rsv;
4283
4284         if (root == root->fs_info->csum_root && trans->adding_csums)
4285                 block_rsv = trans->block_rsv;
4286
4287         if (!block_rsv)
4288                 block_rsv = root->block_rsv;
4289
4290         if (!block_rsv)
4291                 block_rsv = &root->fs_info->empty_block_rsv;
4292
4293         return block_rsv;
4294 }
4295
4296 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4297                                u64 num_bytes)
4298 {
4299         int ret = -ENOSPC;
4300         spin_lock(&block_rsv->lock);
4301         if (block_rsv->reserved >= num_bytes) {
4302                 block_rsv->reserved -= num_bytes;
4303                 if (block_rsv->reserved < block_rsv->size)
4304                         block_rsv->full = 0;
4305                 ret = 0;
4306         }
4307         spin_unlock(&block_rsv->lock);
4308         return ret;
4309 }
4310
4311 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4312                                 u64 num_bytes, int update_size)
4313 {
4314         spin_lock(&block_rsv->lock);
4315         block_rsv->reserved += num_bytes;
4316         if (update_size)
4317                 block_rsv->size += num_bytes;
4318         else if (block_rsv->reserved >= block_rsv->size)
4319                 block_rsv->full = 1;
4320         spin_unlock(&block_rsv->lock);
4321 }
4322
4323 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4324                                     struct btrfs_block_rsv *block_rsv,
4325                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4326 {
4327         struct btrfs_space_info *space_info = block_rsv->space_info;
4328
4329         spin_lock(&block_rsv->lock);
4330         if (num_bytes == (u64)-1)
4331                 num_bytes = block_rsv->size;
4332         block_rsv->size -= num_bytes;
4333         if (block_rsv->reserved >= block_rsv->size) {
4334                 num_bytes = block_rsv->reserved - block_rsv->size;
4335                 block_rsv->reserved = block_rsv->size;
4336                 block_rsv->full = 1;
4337         } else {
4338                 num_bytes = 0;
4339         }
4340         spin_unlock(&block_rsv->lock);
4341
4342         if (num_bytes > 0) {
4343                 if (dest) {
4344                         spin_lock(&dest->lock);
4345                         if (!dest->full) {
4346                                 u64 bytes_to_add;
4347
4348                                 bytes_to_add = dest->size - dest->reserved;
4349                                 bytes_to_add = min(num_bytes, bytes_to_add);
4350                                 dest->reserved += bytes_to_add;
4351                                 if (dest->reserved >= dest->size)
4352                                         dest->full = 1;
4353                                 num_bytes -= bytes_to_add;
4354                         }
4355                         spin_unlock(&dest->lock);
4356                 }
4357                 if (num_bytes) {
4358                         spin_lock(&space_info->lock);
4359                         space_info->bytes_may_use -= num_bytes;
4360                         trace_btrfs_space_reservation(fs_info, "space_info",
4361                                         space_info->flags, num_bytes, 0);
4362                         space_info->reservation_progress++;
4363                         spin_unlock(&space_info->lock);
4364                 }
4365         }
4366 }
4367
4368 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4369                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4370 {
4371         int ret;
4372
4373         ret = block_rsv_use_bytes(src, num_bytes);
4374         if (ret)
4375                 return ret;
4376
4377         block_rsv_add_bytes(dst, num_bytes, 1);
4378         return 0;
4379 }
4380
4381 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4382 {
4383         memset(rsv, 0, sizeof(*rsv));
4384         spin_lock_init(&rsv->lock);
4385         rsv->type = type;
4386 }
4387
4388 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4389                                               unsigned short type)
4390 {
4391         struct btrfs_block_rsv *block_rsv;
4392         struct btrfs_fs_info *fs_info = root->fs_info;
4393
4394         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4395         if (!block_rsv)
4396                 return NULL;
4397
4398         btrfs_init_block_rsv(block_rsv, type);
4399         block_rsv->space_info = __find_space_info(fs_info,
4400                                                   BTRFS_BLOCK_GROUP_METADATA);
4401         return block_rsv;
4402 }
4403
4404 void btrfs_free_block_rsv(struct btrfs_root *root,
4405                           struct btrfs_block_rsv *rsv)
4406 {
4407         if (!rsv)
4408                 return;
4409         btrfs_block_rsv_release(root, rsv, (u64)-1);
4410         kfree(rsv);
4411 }
4412
4413 int btrfs_block_rsv_add(struct btrfs_root *root,
4414                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4415                         enum btrfs_reserve_flush_enum flush)
4416 {
4417         int ret;
4418
4419         if (num_bytes == 0)
4420                 return 0;
4421
4422         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4423         if (!ret) {
4424                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4425                 return 0;
4426         }
4427
4428         return ret;
4429 }
4430
4431 int btrfs_block_rsv_check(struct btrfs_root *root,
4432                           struct btrfs_block_rsv *block_rsv, int min_factor)
4433 {
4434         u64 num_bytes = 0;
4435         int ret = -ENOSPC;
4436
4437         if (!block_rsv)
4438                 return 0;
4439
4440         spin_lock(&block_rsv->lock);
4441         num_bytes = div_factor(block_rsv->size, min_factor);
4442         if (block_rsv->reserved >= num_bytes)
4443                 ret = 0;
4444         spin_unlock(&block_rsv->lock);
4445
4446         return ret;
4447 }
4448
4449 int btrfs_block_rsv_refill(struct btrfs_root *root,
4450                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4451                            enum btrfs_reserve_flush_enum flush)
4452 {
4453         u64 num_bytes = 0;
4454         int ret = -ENOSPC;
4455
4456         if (!block_rsv)
4457                 return 0;
4458
4459         spin_lock(&block_rsv->lock);
4460         num_bytes = min_reserved;
4461         if (block_rsv->reserved >= num_bytes)
4462                 ret = 0;
4463         else
4464                 num_bytes -= block_rsv->reserved;
4465         spin_unlock(&block_rsv->lock);
4466
4467         if (!ret)
4468                 return 0;
4469
4470         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4471         if (!ret) {
4472                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4473                 return 0;
4474         }
4475
4476         return ret;
4477 }
4478
4479 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4480                             struct btrfs_block_rsv *dst_rsv,
4481                             u64 num_bytes)
4482 {
4483         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4484 }
4485
4486 void btrfs_block_rsv_release(struct btrfs_root *root,
4487                              struct btrfs_block_rsv *block_rsv,
4488                              u64 num_bytes)
4489 {
4490         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4491         if (global_rsv->full || global_rsv == block_rsv ||
4492             block_rsv->space_info != global_rsv->space_info)
4493                 global_rsv = NULL;
4494         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4495                                 num_bytes);
4496 }
4497
4498 /*
4499  * helper to calculate size of global block reservation.
4500  * the desired value is sum of space used by extent tree,
4501  * checksum tree and root tree
4502  */
4503 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4504 {
4505         struct btrfs_space_info *sinfo;
4506         u64 num_bytes;
4507         u64 meta_used;
4508         u64 data_used;
4509         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4510
4511         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4512         spin_lock(&sinfo->lock);
4513         data_used = sinfo->bytes_used;
4514         spin_unlock(&sinfo->lock);
4515
4516         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4517         spin_lock(&sinfo->lock);
4518         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4519                 data_used = 0;
4520         meta_used = sinfo->bytes_used;
4521         spin_unlock(&sinfo->lock);
4522
4523         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4524                     csum_size * 2;
4525         num_bytes += div64_u64(data_used + meta_used, 50);
4526
4527         if (num_bytes * 3 > meta_used)
4528                 num_bytes = div64_u64(meta_used, 3);
4529
4530         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4531 }
4532
4533 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4534 {
4535         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4536         struct btrfs_space_info *sinfo = block_rsv->space_info;
4537         u64 num_bytes;
4538
4539         num_bytes = calc_global_metadata_size(fs_info);
4540
4541         spin_lock(&sinfo->lock);
4542         spin_lock(&block_rsv->lock);
4543
4544         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4545
4546         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4547                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4548                     sinfo->bytes_may_use;
4549
4550         if (sinfo->total_bytes > num_bytes) {
4551                 num_bytes = sinfo->total_bytes - num_bytes;
4552                 block_rsv->reserved += num_bytes;
4553                 sinfo->bytes_may_use += num_bytes;
4554                 trace_btrfs_space_reservation(fs_info, "space_info",
4555                                       sinfo->flags, num_bytes, 1);
4556         }
4557
4558         if (block_rsv->reserved >= block_rsv->size) {
4559                 num_bytes = block_rsv->reserved - block_rsv->size;
4560                 sinfo->bytes_may_use -= num_bytes;
4561                 trace_btrfs_space_reservation(fs_info, "space_info",
4562                                       sinfo->flags, num_bytes, 0);
4563                 sinfo->reservation_progress++;
4564                 block_rsv->reserved = block_rsv->size;
4565                 block_rsv->full = 1;
4566         }
4567
4568         spin_unlock(&block_rsv->lock);
4569         spin_unlock(&sinfo->lock);
4570 }
4571
4572 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4573 {
4574         struct btrfs_space_info *space_info;
4575
4576         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4577         fs_info->chunk_block_rsv.space_info = space_info;
4578
4579         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4580         fs_info->global_block_rsv.space_info = space_info;
4581         fs_info->delalloc_block_rsv.space_info = space_info;
4582         fs_info->trans_block_rsv.space_info = space_info;
4583         fs_info->empty_block_rsv.space_info = space_info;
4584         fs_info->delayed_block_rsv.space_info = space_info;
4585
4586         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4587         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4588         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4589         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4590         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4591
4592         update_global_block_rsv(fs_info);
4593 }
4594
4595 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4596 {
4597         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4598                                 (u64)-1);
4599         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4600         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4601         WARN_ON(fs_info->trans_block_rsv.size > 0);
4602         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4603         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4604         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4605         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4606         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4607 }
4608
4609 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4610                                   struct btrfs_root *root)
4611 {
4612         if (!trans->block_rsv)
4613                 return;
4614
4615         if (!trans->bytes_reserved)
4616                 return;
4617
4618         trace_btrfs_space_reservation(root->fs_info, "transaction",
4619                                       trans->transid, trans->bytes_reserved, 0);
4620         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4621         trans->bytes_reserved = 0;
4622 }
4623
4624 /* Can only return 0 or -ENOSPC */
4625 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4626                                   struct inode *inode)
4627 {
4628         struct btrfs_root *root = BTRFS_I(inode)->root;
4629         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4630         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4631
4632         /*
4633          * We need to hold space in order to delete our orphan item once we've
4634          * added it, so this takes the reservation so we can release it later
4635          * when we are truly done with the orphan item.
4636          */
4637         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4638         trace_btrfs_space_reservation(root->fs_info, "orphan",
4639                                       btrfs_ino(inode), num_bytes, 1);
4640         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4641 }
4642
4643 void btrfs_orphan_release_metadata(struct inode *inode)
4644 {
4645         struct btrfs_root *root = BTRFS_I(inode)->root;
4646         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4647         trace_btrfs_space_reservation(root->fs_info, "orphan",
4648                                       btrfs_ino(inode), num_bytes, 0);
4649         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4650 }
4651
4652 /*
4653  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4654  * root: the root of the parent directory
4655  * rsv: block reservation
4656  * items: the number of items that we need do reservation
4657  * qgroup_reserved: used to return the reserved size in qgroup
4658  *
4659  * This function is used to reserve the space for snapshot/subvolume
4660  * creation and deletion. Those operations are different with the
4661  * common file/directory operations, they change two fs/file trees
4662  * and root tree, the number of items that the qgroup reserves is
4663  * different with the free space reservation. So we can not use
4664  * the space reseravtion mechanism in start_transaction().
4665  */
4666 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4667                                      struct btrfs_block_rsv *rsv,
4668                                      int items,
4669                                      u64 *qgroup_reserved)
4670 {
4671         u64 num_bytes;
4672         int ret;
4673
4674         if (root->fs_info->quota_enabled) {
4675                 /* One for parent inode, two for dir entries */
4676                 num_bytes = 3 * root->leafsize;
4677                 ret = btrfs_qgroup_reserve(root, num_bytes);
4678                 if (ret)
4679                         return ret;
4680         } else {
4681                 num_bytes = 0;
4682         }
4683
4684         *qgroup_reserved = num_bytes;
4685
4686         num_bytes = btrfs_calc_trans_metadata_size(root, items);
4687         rsv->space_info = __find_space_info(root->fs_info,
4688                                             BTRFS_BLOCK_GROUP_METADATA);
4689         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4690                                   BTRFS_RESERVE_FLUSH_ALL);
4691         if (ret) {
4692                 if (*qgroup_reserved)
4693                         btrfs_qgroup_free(root, *qgroup_reserved);
4694         }
4695
4696         return ret;
4697 }
4698
4699 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4700                                       struct btrfs_block_rsv *rsv,
4701                                       u64 qgroup_reserved)
4702 {
4703         btrfs_block_rsv_release(root, rsv, (u64)-1);
4704         if (qgroup_reserved)
4705                 btrfs_qgroup_free(root, qgroup_reserved);
4706 }
4707
4708 /**
4709  * drop_outstanding_extent - drop an outstanding extent
4710  * @inode: the inode we're dropping the extent for
4711  *
4712  * This is called when we are freeing up an outstanding extent, either called
4713  * after an error or after an extent is written.  This will return the number of
4714  * reserved extents that need to be freed.  This must be called with
4715  * BTRFS_I(inode)->lock held.
4716  */
4717 static unsigned drop_outstanding_extent(struct inode *inode)
4718 {
4719         unsigned drop_inode_space = 0;
4720         unsigned dropped_extents = 0;
4721
4722         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4723         BTRFS_I(inode)->outstanding_extents--;
4724
4725         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4726             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4727                                &BTRFS_I(inode)->runtime_flags))
4728                 drop_inode_space = 1;
4729
4730         /*
4731          * If we have more or the same amount of outsanding extents than we have
4732          * reserved then we need to leave the reserved extents count alone.
4733          */
4734         if (BTRFS_I(inode)->outstanding_extents >=
4735             BTRFS_I(inode)->reserved_extents)
4736                 return drop_inode_space;
4737
4738         dropped_extents = BTRFS_I(inode)->reserved_extents -
4739                 BTRFS_I(inode)->outstanding_extents;
4740         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4741         return dropped_extents + drop_inode_space;
4742 }
4743
4744 /**
4745  * calc_csum_metadata_size - return the amount of metada space that must be
4746  *      reserved/free'd for the given bytes.
4747  * @inode: the inode we're manipulating
4748  * @num_bytes: the number of bytes in question
4749  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4750  *
4751  * This adjusts the number of csum_bytes in the inode and then returns the
4752  * correct amount of metadata that must either be reserved or freed.  We
4753  * calculate how many checksums we can fit into one leaf and then divide the
4754  * number of bytes that will need to be checksumed by this value to figure out
4755  * how many checksums will be required.  If we are adding bytes then the number
4756  * may go up and we will return the number of additional bytes that must be
4757  * reserved.  If it is going down we will return the number of bytes that must
4758  * be freed.
4759  *
4760  * This must be called with BTRFS_I(inode)->lock held.
4761  */
4762 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4763                                    int reserve)
4764 {
4765         struct btrfs_root *root = BTRFS_I(inode)->root;
4766         u64 csum_size;
4767         int num_csums_per_leaf;
4768         int num_csums;
4769         int old_csums;
4770
4771         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4772             BTRFS_I(inode)->csum_bytes == 0)
4773                 return 0;
4774
4775         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4776         if (reserve)
4777                 BTRFS_I(inode)->csum_bytes += num_bytes;
4778         else
4779                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4780         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4781         num_csums_per_leaf = (int)div64_u64(csum_size,
4782                                             sizeof(struct btrfs_csum_item) +
4783                                             sizeof(struct btrfs_disk_key));
4784         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4785         num_csums = num_csums + num_csums_per_leaf - 1;
4786         num_csums = num_csums / num_csums_per_leaf;
4787
4788         old_csums = old_csums + num_csums_per_leaf - 1;
4789         old_csums = old_csums / num_csums_per_leaf;
4790
4791         /* No change, no need to reserve more */
4792         if (old_csums == num_csums)
4793                 return 0;
4794
4795         if (reserve)
4796                 return btrfs_calc_trans_metadata_size(root,
4797                                                       num_csums - old_csums);
4798
4799         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4800 }
4801
4802 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4803 {
4804         struct btrfs_root *root = BTRFS_I(inode)->root;
4805         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4806         u64 to_reserve = 0;
4807         u64 csum_bytes;
4808         unsigned nr_extents = 0;
4809         int extra_reserve = 0;
4810         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4811         int ret = 0;
4812         bool delalloc_lock = true;
4813         u64 to_free = 0;
4814         unsigned dropped;
4815
4816         /* If we are a free space inode we need to not flush since we will be in
4817          * the middle of a transaction commit.  We also don't need the delalloc
4818          * mutex since we won't race with anybody.  We need this mostly to make
4819          * lockdep shut its filthy mouth.
4820          */
4821         if (btrfs_is_free_space_inode(inode)) {
4822                 flush = BTRFS_RESERVE_NO_FLUSH;
4823                 delalloc_lock = false;
4824         }
4825
4826         if (flush != BTRFS_RESERVE_NO_FLUSH &&
4827             btrfs_transaction_in_commit(root->fs_info))
4828                 schedule_timeout(1);
4829
4830         if (delalloc_lock)
4831                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4832
4833         num_bytes = ALIGN(num_bytes, root->sectorsize);
4834
4835         spin_lock(&BTRFS_I(inode)->lock);
4836         BTRFS_I(inode)->outstanding_extents++;
4837
4838         if (BTRFS_I(inode)->outstanding_extents >
4839             BTRFS_I(inode)->reserved_extents)
4840                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4841                         BTRFS_I(inode)->reserved_extents;
4842
4843         /*
4844          * Add an item to reserve for updating the inode when we complete the
4845          * delalloc io.
4846          */
4847         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4848                       &BTRFS_I(inode)->runtime_flags)) {
4849                 nr_extents++;
4850                 extra_reserve = 1;
4851         }
4852
4853         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4854         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4855         csum_bytes = BTRFS_I(inode)->csum_bytes;
4856         spin_unlock(&BTRFS_I(inode)->lock);
4857
4858         if (root->fs_info->quota_enabled) {
4859                 ret = btrfs_qgroup_reserve(root, num_bytes +
4860                                            nr_extents * root->leafsize);
4861                 if (ret)
4862                         goto out_fail;
4863         }
4864
4865         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4866         if (unlikely(ret)) {
4867                 if (root->fs_info->quota_enabled)
4868                         btrfs_qgroup_free(root, num_bytes +
4869                                                 nr_extents * root->leafsize);
4870                 goto out_fail;
4871         }
4872
4873         spin_lock(&BTRFS_I(inode)->lock);
4874         if (extra_reserve) {
4875                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4876                         &BTRFS_I(inode)->runtime_flags);
4877                 nr_extents--;
4878         }
4879         BTRFS_I(inode)->reserved_extents += nr_extents;
4880         spin_unlock(&BTRFS_I(inode)->lock);
4881
4882         if (delalloc_lock)
4883                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4884
4885         if (to_reserve)
4886                 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4887                                               btrfs_ino(inode), to_reserve, 1);
4888         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4889
4890         return 0;
4891
4892 out_fail:
4893         spin_lock(&BTRFS_I(inode)->lock);
4894         dropped = drop_outstanding_extent(inode);
4895         /*
4896          * If the inodes csum_bytes is the same as the original
4897          * csum_bytes then we know we haven't raced with any free()ers
4898          * so we can just reduce our inodes csum bytes and carry on.
4899          */
4900         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
4901                 calc_csum_metadata_size(inode, num_bytes, 0);
4902         } else {
4903                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
4904                 u64 bytes;
4905
4906                 /*
4907                  * This is tricky, but first we need to figure out how much we
4908                  * free'd from any free-ers that occured during this
4909                  * reservation, so we reset ->csum_bytes to the csum_bytes
4910                  * before we dropped our lock, and then call the free for the
4911                  * number of bytes that were freed while we were trying our
4912                  * reservation.
4913                  */
4914                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
4915                 BTRFS_I(inode)->csum_bytes = csum_bytes;
4916                 to_free = calc_csum_metadata_size(inode, bytes, 0);
4917
4918
4919                 /*
4920                  * Now we need to see how much we would have freed had we not
4921                  * been making this reservation and our ->csum_bytes were not
4922                  * artificially inflated.
4923                  */
4924                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
4925                 bytes = csum_bytes - orig_csum_bytes;
4926                 bytes = calc_csum_metadata_size(inode, bytes, 0);
4927
4928                 /*
4929                  * Now reset ->csum_bytes to what it should be.  If bytes is
4930                  * more than to_free then we would have free'd more space had we
4931                  * not had an artificially high ->csum_bytes, so we need to free
4932                  * the remainder.  If bytes is the same or less then we don't
4933                  * need to do anything, the other free-ers did the correct
4934                  * thing.
4935                  */
4936                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
4937                 if (bytes > to_free)
4938                         to_free = bytes - to_free;
4939                 else
4940                         to_free = 0;
4941         }
4942         spin_unlock(&BTRFS_I(inode)->lock);
4943         if (dropped)
4944                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4945
4946         if (to_free) {
4947                 btrfs_block_rsv_release(root, block_rsv, to_free);
4948                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
4949                                               btrfs_ino(inode), to_free, 0);
4950         }
4951         if (delalloc_lock)
4952                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4953         return ret;
4954 }
4955
4956 /**
4957  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4958  * @inode: the inode to release the reservation for
4959  * @num_bytes: the number of bytes we're releasing
4960  *
4961  * This will release the metadata reservation for an inode.  This can be called
4962  * once we complete IO for a given set of bytes to release their metadata
4963  * reservations.
4964  */
4965 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4966 {
4967         struct btrfs_root *root = BTRFS_I(inode)->root;
4968         u64 to_free = 0;
4969         unsigned dropped;
4970
4971         num_bytes = ALIGN(num_bytes, root->sectorsize);
4972         spin_lock(&BTRFS_I(inode)->lock);
4973         dropped = drop_outstanding_extent(inode);
4974
4975         if (num_bytes)
4976                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4977         spin_unlock(&BTRFS_I(inode)->lock);
4978         if (dropped > 0)
4979                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4980
4981         trace_btrfs_space_reservation(root->fs_info, "delalloc",
4982                                       btrfs_ino(inode), to_free, 0);
4983         if (root->fs_info->quota_enabled) {
4984                 btrfs_qgroup_free(root, num_bytes +
4985                                         dropped * root->leafsize);
4986         }
4987
4988         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4989                                 to_free);
4990 }
4991
4992 /**
4993  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4994  * @inode: inode we're writing to
4995  * @num_bytes: the number of bytes we want to allocate
4996  *
4997  * This will do the following things
4998  *
4999  * o reserve space in the data space info for num_bytes
5000  * o reserve space in the metadata space info based on number of outstanding
5001  *   extents and how much csums will be needed
5002  * o add to the inodes ->delalloc_bytes
5003  * o add it to the fs_info's delalloc inodes list.
5004  *
5005  * This will return 0 for success and -ENOSPC if there is no space left.
5006  */
5007 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5008 {
5009         int ret;
5010
5011         ret = btrfs_check_data_free_space(inode, num_bytes);
5012         if (ret)
5013                 return ret;
5014
5015         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5016         if (ret) {
5017                 btrfs_free_reserved_data_space(inode, num_bytes);
5018                 return ret;
5019         }
5020
5021         return 0;
5022 }
5023
5024 /**
5025  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5026  * @inode: inode we're releasing space for
5027  * @num_bytes: the number of bytes we want to free up
5028  *
5029  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5030  * called in the case that we don't need the metadata AND data reservations
5031  * anymore.  So if there is an error or we insert an inline extent.
5032  *
5033  * This function will release the metadata space that was not used and will
5034  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5035  * list if there are no delalloc bytes left.
5036  */
5037 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5038 {
5039         btrfs_delalloc_release_metadata(inode, num_bytes);
5040         btrfs_free_reserved_data_space(inode, num_bytes);
5041 }
5042
5043 static int update_block_group(struct btrfs_root *root,
5044                               u64 bytenr, u64 num_bytes, int alloc)
5045 {
5046         struct btrfs_block_group_cache *cache = NULL;
5047         struct btrfs_fs_info *info = root->fs_info;
5048         u64 total = num_bytes;
5049         u64 old_val;
5050         u64 byte_in_group;
5051         int factor;
5052
5053         /* block accounting for super block */
5054         spin_lock(&info->delalloc_lock);
5055         old_val = btrfs_super_bytes_used(info->super_copy);
5056         if (alloc)
5057                 old_val += num_bytes;
5058         else
5059                 old_val -= num_bytes;
5060         btrfs_set_super_bytes_used(info->super_copy, old_val);
5061         spin_unlock(&info->delalloc_lock);
5062
5063         while (total) {
5064                 cache = btrfs_lookup_block_group(info, bytenr);
5065                 if (!cache)
5066                         return -ENOENT;
5067                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5068                                     BTRFS_BLOCK_GROUP_RAID1 |
5069                                     BTRFS_BLOCK_GROUP_RAID10))
5070                         factor = 2;
5071                 else
5072                         factor = 1;
5073                 /*
5074                  * If this block group has free space cache written out, we
5075                  * need to make sure to load it if we are removing space.  This
5076                  * is because we need the unpinning stage to actually add the
5077                  * space back to the block group, otherwise we will leak space.
5078                  */
5079                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5080                         cache_block_group(cache, 1);
5081
5082                 byte_in_group = bytenr - cache->key.objectid;
5083                 WARN_ON(byte_in_group > cache->key.offset);
5084
5085                 spin_lock(&cache->space_info->lock);
5086                 spin_lock(&cache->lock);
5087
5088                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5089                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5090                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5091
5092                 cache->dirty = 1;
5093                 old_val = btrfs_block_group_used(&cache->item);
5094                 num_bytes = min(total, cache->key.offset - byte_in_group);
5095                 if (alloc) {
5096                         old_val += num_bytes;
5097                         btrfs_set_block_group_used(&cache->item, old_val);
5098                         cache->reserved -= num_bytes;
5099                         cache->space_info->bytes_reserved -= num_bytes;
5100                         cache->space_info->bytes_used += num_bytes;
5101                         cache->space_info->disk_used += num_bytes * factor;
5102                         spin_unlock(&cache->lock);
5103                         spin_unlock(&cache->space_info->lock);
5104                 } else {
5105                         old_val -= num_bytes;
5106                         btrfs_set_block_group_used(&cache->item, old_val);
5107                         cache->pinned += num_bytes;
5108                         cache->space_info->bytes_pinned += num_bytes;
5109                         cache->space_info->bytes_used -= num_bytes;
5110                         cache->space_info->disk_used -= num_bytes * factor;
5111                         spin_unlock(&cache->lock);
5112                         spin_unlock(&cache->space_info->lock);
5113
5114                         set_extent_dirty(info->pinned_extents,
5115                                          bytenr, bytenr + num_bytes - 1,
5116                                          GFP_NOFS | __GFP_NOFAIL);
5117                 }
5118                 btrfs_put_block_group(cache);
5119                 total -= num_bytes;
5120                 bytenr += num_bytes;
5121         }
5122         return 0;
5123 }
5124
5125 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5126 {
5127         struct btrfs_block_group_cache *cache;
5128         u64 bytenr;
5129
5130         spin_lock(&root->fs_info->block_group_cache_lock);
5131         bytenr = root->fs_info->first_logical_byte;
5132         spin_unlock(&root->fs_info->block_group_cache_lock);
5133
5134         if (bytenr < (u64)-1)
5135                 return bytenr;
5136
5137         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5138         if (!cache)
5139                 return 0;
5140
5141         bytenr = cache->key.objectid;
5142         btrfs_put_block_group(cache);
5143
5144         return bytenr;
5145 }
5146
5147 static int pin_down_extent(struct btrfs_root *root,
5148                            struct btrfs_block_group_cache *cache,
5149                            u64 bytenr, u64 num_bytes, int reserved)
5150 {
5151         spin_lock(&cache->space_info->lock);
5152         spin_lock(&cache->lock);
5153         cache->pinned += num_bytes;
5154         cache->space_info->bytes_pinned += num_bytes;
5155         if (reserved) {
5156                 cache->reserved -= num_bytes;
5157                 cache->space_info->bytes_reserved -= num_bytes;
5158         }
5159         spin_unlock(&cache->lock);
5160         spin_unlock(&cache->space_info->lock);
5161
5162         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5163                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5164         return 0;
5165 }
5166
5167 /*
5168  * this function must be called within transaction
5169  */
5170 int btrfs_pin_extent(struct btrfs_root *root,
5171                      u64 bytenr, u64 num_bytes, int reserved)
5172 {
5173         struct btrfs_block_group_cache *cache;
5174
5175         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5176         BUG_ON(!cache); /* Logic error */
5177
5178         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5179
5180         btrfs_put_block_group(cache);
5181         return 0;
5182 }
5183
5184 /*
5185  * this function must be called within transaction
5186  */
5187 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5188                                     u64 bytenr, u64 num_bytes)
5189 {
5190         struct btrfs_block_group_cache *cache;
5191
5192         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5193         BUG_ON(!cache); /* Logic error */
5194
5195         /*
5196          * pull in the free space cache (if any) so that our pin
5197          * removes the free space from the cache.  We have load_only set
5198          * to one because the slow code to read in the free extents does check
5199          * the pinned extents.
5200          */
5201         cache_block_group(cache, 1);
5202
5203         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5204
5205         /* remove us from the free space cache (if we're there at all) */
5206         btrfs_remove_free_space(cache, bytenr, num_bytes);
5207         btrfs_put_block_group(cache);
5208         return 0;
5209 }
5210
5211 /**
5212  * btrfs_update_reserved_bytes - update the block_group and space info counters
5213  * @cache:      The cache we are manipulating
5214  * @num_bytes:  The number of bytes in question
5215  * @reserve:    One of the reservation enums
5216  *
5217  * This is called by the allocator when it reserves space, or by somebody who is
5218  * freeing space that was never actually used on disk.  For example if you
5219  * reserve some space for a new leaf in transaction A and before transaction A
5220  * commits you free that leaf, you call this with reserve set to 0 in order to
5221  * clear the reservation.
5222  *
5223  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5224  * ENOSPC accounting.  For data we handle the reservation through clearing the
5225  * delalloc bits in the io_tree.  We have to do this since we could end up
5226  * allocating less disk space for the amount of data we have reserved in the
5227  * case of compression.
5228  *
5229  * If this is a reservation and the block group has become read only we cannot
5230  * make the reservation and return -EAGAIN, otherwise this function always
5231  * succeeds.
5232  */
5233 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5234                                        u64 num_bytes, int reserve)
5235 {
5236         struct btrfs_space_info *space_info = cache->space_info;
5237         int ret = 0;
5238
5239         spin_lock(&space_info->lock);
5240         spin_lock(&cache->lock);
5241         if (reserve != RESERVE_FREE) {
5242                 if (cache->ro) {
5243                         ret = -EAGAIN;
5244                 } else {
5245                         cache->reserved += num_bytes;
5246                         space_info->bytes_reserved += num_bytes;
5247                         if (reserve == RESERVE_ALLOC) {
5248                                 trace_btrfs_space_reservation(cache->fs_info,
5249                                                 "space_info", space_info->flags,
5250                                                 num_bytes, 0);
5251                                 space_info->bytes_may_use -= num_bytes;
5252                         }
5253                 }
5254         } else {
5255                 if (cache->ro)
5256                         space_info->bytes_readonly += num_bytes;
5257                 cache->reserved -= num_bytes;
5258                 space_info->bytes_reserved -= num_bytes;
5259                 space_info->reservation_progress++;
5260         }
5261         spin_unlock(&cache->lock);
5262         spin_unlock(&space_info->lock);
5263         return ret;
5264 }
5265
5266 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5267                                 struct btrfs_root *root)
5268 {
5269         struct btrfs_fs_info *fs_info = root->fs_info;
5270         struct btrfs_caching_control *next;
5271         struct btrfs_caching_control *caching_ctl;
5272         struct btrfs_block_group_cache *cache;
5273
5274         down_write(&fs_info->extent_commit_sem);
5275
5276         list_for_each_entry_safe(caching_ctl, next,
5277                                  &fs_info->caching_block_groups, list) {
5278                 cache = caching_ctl->block_group;
5279                 if (block_group_cache_done(cache)) {
5280                         cache->last_byte_to_unpin = (u64)-1;
5281                         list_del_init(&caching_ctl->list);
5282                         put_caching_control(caching_ctl);
5283                 } else {
5284                         cache->last_byte_to_unpin = caching_ctl->progress;
5285                 }
5286         }
5287
5288         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5289                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5290         else
5291                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5292
5293         up_write(&fs_info->extent_commit_sem);
5294
5295         update_global_block_rsv(fs_info);
5296 }
5297
5298 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5299 {
5300         struct btrfs_fs_info *fs_info = root->fs_info;
5301         struct btrfs_block_group_cache *cache = NULL;
5302         struct btrfs_space_info *space_info;
5303         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5304         u64 len;
5305         bool readonly;
5306
5307         while (start <= end) {
5308                 readonly = false;
5309                 if (!cache ||
5310                     start >= cache->key.objectid + cache->key.offset) {
5311                         if (cache)
5312                                 btrfs_put_block_group(cache);
5313                         cache = btrfs_lookup_block_group(fs_info, start);
5314                         BUG_ON(!cache); /* Logic error */
5315                 }
5316
5317                 len = cache->key.objectid + cache->key.offset - start;
5318                 len = min(len, end + 1 - start);
5319
5320                 if (start < cache->last_byte_to_unpin) {
5321                         len = min(len, cache->last_byte_to_unpin - start);
5322                         btrfs_add_free_space(cache, start, len);
5323                 }
5324
5325                 start += len;
5326                 space_info = cache->space_info;
5327
5328                 spin_lock(&space_info->lock);
5329                 spin_lock(&cache->lock);
5330                 cache->pinned -= len;
5331                 space_info->bytes_pinned -= len;
5332                 if (cache->ro) {
5333                         space_info->bytes_readonly += len;
5334                         readonly = true;
5335                 }
5336                 spin_unlock(&cache->lock);
5337                 if (!readonly && global_rsv->space_info == space_info) {
5338                         spin_lock(&global_rsv->lock);
5339                         if (!global_rsv->full) {
5340                                 len = min(len, global_rsv->size -
5341                                           global_rsv->reserved);
5342                                 global_rsv->reserved += len;
5343                                 space_info->bytes_may_use += len;
5344                                 if (global_rsv->reserved >= global_rsv->size)
5345                                         global_rsv->full = 1;
5346                         }
5347                         spin_unlock(&global_rsv->lock);
5348                 }
5349                 spin_unlock(&space_info->lock);
5350         }
5351
5352         if (cache)
5353                 btrfs_put_block_group(cache);
5354         return 0;
5355 }
5356
5357 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5358                                struct btrfs_root *root)
5359 {
5360         struct btrfs_fs_info *fs_info = root->fs_info;
5361         struct extent_io_tree *unpin;
5362         u64 start;
5363         u64 end;
5364         int ret;
5365
5366         if (trans->aborted)
5367                 return 0;
5368
5369         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5370                 unpin = &fs_info->freed_extents[1];
5371         else
5372                 unpin = &fs_info->freed_extents[0];
5373
5374         while (1) {
5375                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5376                                             EXTENT_DIRTY, NULL);
5377                 if (ret)
5378                         break;
5379
5380                 if (btrfs_test_opt(root, DISCARD))
5381                         ret = btrfs_discard_extent(root, start,
5382                                                    end + 1 - start, NULL);
5383
5384                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5385                 unpin_extent_range(root, start, end);
5386                 cond_resched();
5387         }
5388
5389         return 0;
5390 }
5391
5392 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5393                                 struct btrfs_root *root,
5394                                 u64 bytenr, u64 num_bytes, u64 parent,
5395                                 u64 root_objectid, u64 owner_objectid,
5396                                 u64 owner_offset, int refs_to_drop,
5397                                 struct btrfs_delayed_extent_op *extent_op)
5398 {
5399         struct btrfs_key key;
5400         struct btrfs_path *path;
5401         struct btrfs_fs_info *info = root->fs_info;
5402         struct btrfs_root *extent_root = info->extent_root;
5403         struct extent_buffer *leaf;
5404         struct btrfs_extent_item *ei;
5405         struct btrfs_extent_inline_ref *iref;
5406         int ret;
5407         int is_data;
5408         int extent_slot = 0;
5409         int found_extent = 0;
5410         int num_to_del = 1;
5411         u32 item_size;
5412         u64 refs;
5413         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5414                                                  SKINNY_METADATA);
5415
5416         path = btrfs_alloc_path();
5417         if (!path)
5418                 return -ENOMEM;
5419
5420         path->reada = 1;
5421         path->leave_spinning = 1;
5422
5423         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5424         BUG_ON(!is_data && refs_to_drop != 1);
5425
5426         if (is_data)
5427                 skinny_metadata = 0;
5428
5429         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5430                                     bytenr, num_bytes, parent,
5431                                     root_objectid, owner_objectid,
5432                                     owner_offset);
5433         if (ret == 0) {
5434                 extent_slot = path->slots[0];
5435                 while (extent_slot >= 0) {
5436                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5437                                               extent_slot);
5438                         if (key.objectid != bytenr)
5439                                 break;
5440                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5441                             key.offset == num_bytes) {
5442                                 found_extent = 1;
5443                                 break;
5444                         }
5445                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5446                             key.offset == owner_objectid) {
5447                                 found_extent = 1;
5448                                 break;
5449                         }
5450                         if (path->slots[0] - extent_slot > 5)
5451                                 break;
5452                         extent_slot--;
5453                 }
5454 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5455                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5456                 if (found_extent && item_size < sizeof(*ei))
5457                         found_extent = 0;
5458 #endif
5459                 if (!found_extent) {
5460                         BUG_ON(iref);
5461                         ret = remove_extent_backref(trans, extent_root, path,
5462                                                     NULL, refs_to_drop,
5463                                                     is_data);
5464                         if (ret) {
5465                                 btrfs_abort_transaction(trans, extent_root, ret);
5466                                 goto out;
5467                         }
5468                         btrfs_release_path(path);
5469                         path->leave_spinning = 1;
5470
5471                         key.objectid = bytenr;
5472                         key.type = BTRFS_EXTENT_ITEM_KEY;
5473                         key.offset = num_bytes;
5474
5475                         if (!is_data && skinny_metadata) {
5476                                 key.type = BTRFS_METADATA_ITEM_KEY;
5477                                 key.offset = owner_objectid;
5478                         }
5479
5480                         ret = btrfs_search_slot(trans, extent_root,
5481                                                 &key, path, -1, 1);
5482                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5483                                 /*
5484                                  * Couldn't find our skinny metadata item,
5485                                  * see if we have ye olde extent item.
5486                                  */
5487                                 path->slots[0]--;
5488                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5489                                                       path->slots[0]);
5490                                 if (key.objectid == bytenr &&
5491                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5492                                     key.offset == num_bytes)
5493                                         ret = 0;
5494                         }
5495
5496                         if (ret > 0 && skinny_metadata) {
5497                                 skinny_metadata = false;
5498                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5499                                 key.offset = num_bytes;
5500                                 btrfs_release_path(path);
5501                                 ret = btrfs_search_slot(trans, extent_root,
5502                                                         &key, path, -1, 1);
5503                         }
5504
5505                         if (ret) {
5506                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5507                                         ret, (unsigned long long)bytenr);
5508                                 if (ret > 0)
5509                                         btrfs_print_leaf(extent_root,
5510                                                          path->nodes[0]);
5511                         }
5512                         if (ret < 0) {
5513                                 btrfs_abort_transaction(trans, extent_root, ret);
5514                                 goto out;
5515                         }
5516                         extent_slot = path->slots[0];
5517                 }
5518         } else if (ret == -ENOENT) {
5519                 btrfs_print_leaf(extent_root, path->nodes[0]);
5520                 WARN_ON(1);
5521                 btrfs_err(info,
5522                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5523                         (unsigned long long)bytenr,
5524                         (unsigned long long)parent,
5525                         (unsigned long long)root_objectid,
5526                         (unsigned long long)owner_objectid,
5527                         (unsigned long long)owner_offset);
5528         } else {
5529                 btrfs_abort_transaction(trans, extent_root, ret);
5530                 goto out;
5531         }
5532
5533         leaf = path->nodes[0];
5534         item_size = btrfs_item_size_nr(leaf, extent_slot);
5535 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5536         if (item_size < sizeof(*ei)) {
5537                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5538                 ret = convert_extent_item_v0(trans, extent_root, path,
5539                                              owner_objectid, 0);
5540                 if (ret < 0) {
5541                         btrfs_abort_transaction(trans, extent_root, ret);
5542                         goto out;
5543                 }
5544
5545                 btrfs_release_path(path);
5546                 path->leave_spinning = 1;
5547
5548                 key.objectid = bytenr;
5549                 key.type = BTRFS_EXTENT_ITEM_KEY;
5550                 key.offset = num_bytes;
5551
5552                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5553                                         -1, 1);
5554                 if (ret) {
5555                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5556                                 ret, (unsigned long long)bytenr);
5557                         btrfs_print_leaf(extent_root, path->nodes[0]);
5558                 }
5559                 if (ret < 0) {
5560                         btrfs_abort_transaction(trans, extent_root, ret);
5561                         goto out;
5562                 }
5563
5564                 extent_slot = path->slots[0];
5565                 leaf = path->nodes[0];
5566                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5567         }
5568 #endif
5569         BUG_ON(item_size < sizeof(*ei));
5570         ei = btrfs_item_ptr(leaf, extent_slot,
5571                             struct btrfs_extent_item);
5572         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5573             key.type == BTRFS_EXTENT_ITEM_KEY) {
5574                 struct btrfs_tree_block_info *bi;
5575                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5576                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5577                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5578         }
5579
5580         refs = btrfs_extent_refs(leaf, ei);
5581         BUG_ON(refs < refs_to_drop);
5582         refs -= refs_to_drop;
5583
5584         if (refs > 0) {
5585                 if (extent_op)
5586                         __run_delayed_extent_op(extent_op, leaf, ei);
5587                 /*
5588                  * In the case of inline back ref, reference count will
5589                  * be updated by remove_extent_backref
5590                  */
5591                 if (iref) {
5592                         BUG_ON(!found_extent);
5593                 } else {
5594                         btrfs_set_extent_refs(leaf, ei, refs);
5595                         btrfs_mark_buffer_dirty(leaf);
5596                 }
5597                 if (found_extent) {
5598                         ret = remove_extent_backref(trans, extent_root, path,
5599                                                     iref, refs_to_drop,
5600                                                     is_data);
5601                         if (ret) {
5602                                 btrfs_abort_transaction(trans, extent_root, ret);
5603                                 goto out;
5604                         }
5605                 }
5606         } else {
5607                 if (found_extent) {
5608                         BUG_ON(is_data && refs_to_drop !=
5609                                extent_data_ref_count(root, path, iref));
5610                         if (iref) {
5611                                 BUG_ON(path->slots[0] != extent_slot);
5612                         } else {
5613                                 BUG_ON(path->slots[0] != extent_slot + 1);
5614                                 path->slots[0] = extent_slot;
5615                                 num_to_del = 2;
5616                         }
5617                 }
5618
5619                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5620                                       num_to_del);
5621                 if (ret) {
5622                         btrfs_abort_transaction(trans, extent_root, ret);
5623                         goto out;
5624                 }
5625                 btrfs_release_path(path);
5626
5627                 if (is_data) {
5628                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5629                         if (ret) {
5630                                 btrfs_abort_transaction(trans, extent_root, ret);
5631                                 goto out;
5632                         }
5633                 }
5634
5635                 ret = update_block_group(root, bytenr, num_bytes, 0);
5636                 if (ret) {
5637                         btrfs_abort_transaction(trans, extent_root, ret);
5638                         goto out;
5639                 }
5640         }
5641 out:
5642         btrfs_free_path(path);
5643         return ret;
5644 }
5645
5646 /*
5647  * when we free an block, it is possible (and likely) that we free the last
5648  * delayed ref for that extent as well.  This searches the delayed ref tree for
5649  * a given extent, and if there are no other delayed refs to be processed, it
5650  * removes it from the tree.
5651  */
5652 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5653                                       struct btrfs_root *root, u64 bytenr)
5654 {
5655         struct btrfs_delayed_ref_head *head;
5656         struct btrfs_delayed_ref_root *delayed_refs;
5657         struct btrfs_delayed_ref_node *ref;
5658         struct rb_node *node;
5659         int ret = 0;
5660
5661         delayed_refs = &trans->transaction->delayed_refs;
5662         spin_lock(&delayed_refs->lock);
5663         head = btrfs_find_delayed_ref_head(trans, bytenr);
5664         if (!head)
5665                 goto out;
5666
5667         node = rb_prev(&head->node.rb_node);
5668         if (!node)
5669                 goto out;
5670
5671         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5672
5673         /* there are still entries for this ref, we can't drop it */
5674         if (ref->bytenr == bytenr)
5675                 goto out;
5676
5677         if (head->extent_op) {
5678                 if (!head->must_insert_reserved)
5679                         goto out;
5680                 btrfs_free_delayed_extent_op(head->extent_op);
5681                 head->extent_op = NULL;
5682         }
5683
5684         /*
5685          * waiting for the lock here would deadlock.  If someone else has it
5686          * locked they are already in the process of dropping it anyway
5687          */
5688         if (!mutex_trylock(&head->mutex))
5689                 goto out;
5690
5691         /*
5692          * at this point we have a head with no other entries.  Go
5693          * ahead and process it.
5694          */
5695         head->node.in_tree = 0;
5696         rb_erase(&head->node.rb_node, &delayed_refs->root);
5697
5698         delayed_refs->num_entries--;
5699
5700         /*
5701          * we don't take a ref on the node because we're removing it from the
5702          * tree, so we just steal the ref the tree was holding.
5703          */
5704         delayed_refs->num_heads--;
5705         if (list_empty(&head->cluster))
5706                 delayed_refs->num_heads_ready--;
5707
5708         list_del_init(&head->cluster);
5709         spin_unlock(&delayed_refs->lock);
5710
5711         BUG_ON(head->extent_op);
5712         if (head->must_insert_reserved)
5713                 ret = 1;
5714
5715         mutex_unlock(&head->mutex);
5716         btrfs_put_delayed_ref(&head->node);
5717         return ret;
5718 out:
5719         spin_unlock(&delayed_refs->lock);
5720         return 0;
5721 }
5722
5723 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5724                            struct btrfs_root *root,
5725                            struct extent_buffer *buf,
5726                            u64 parent, int last_ref)
5727 {
5728         struct btrfs_block_group_cache *cache = NULL;
5729         int ret;
5730
5731         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5732                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5733                                         buf->start, buf->len,
5734                                         parent, root->root_key.objectid,
5735                                         btrfs_header_level(buf),
5736                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5737                 BUG_ON(ret); /* -ENOMEM */
5738         }
5739
5740         if (!last_ref)
5741                 return;
5742
5743         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5744
5745         if (btrfs_header_generation(buf) == trans->transid) {
5746                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5747                         ret = check_ref_cleanup(trans, root, buf->start);
5748                         if (!ret)
5749                                 goto out;
5750                 }
5751
5752                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5753                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5754                         goto out;
5755                 }
5756
5757                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5758
5759                 btrfs_add_free_space(cache, buf->start, buf->len);
5760                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5761         }
5762 out:
5763         /*
5764          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5765          * anymore.
5766          */
5767         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5768         btrfs_put_block_group(cache);
5769 }
5770
5771 /* Can return -ENOMEM */
5772 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5773                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5774                       u64 owner, u64 offset, int for_cow)
5775 {
5776         int ret;
5777         struct btrfs_fs_info *fs_info = root->fs_info;
5778
5779         /*
5780          * tree log blocks never actually go into the extent allocation
5781          * tree, just update pinning info and exit early.
5782          */
5783         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5784                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5785                 /* unlocks the pinned mutex */
5786                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
5787                 ret = 0;
5788         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5789                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5790                                         num_bytes,
5791                                         parent, root_objectid, (int)owner,
5792                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5793         } else {
5794                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5795                                                 num_bytes,
5796                                                 parent, root_objectid, owner,
5797                                                 offset, BTRFS_DROP_DELAYED_REF,
5798                                                 NULL, for_cow);
5799         }
5800         return ret;
5801 }
5802
5803 static u64 stripe_align(struct btrfs_root *root,
5804                         struct btrfs_block_group_cache *cache,
5805                         u64 val, u64 num_bytes)
5806 {
5807         u64 ret = ALIGN(val, root->stripesize);
5808         return ret;
5809 }
5810
5811 /*
5812  * when we wait for progress in the block group caching, its because
5813  * our allocation attempt failed at least once.  So, we must sleep
5814  * and let some progress happen before we try again.
5815  *
5816  * This function will sleep at least once waiting for new free space to
5817  * show up, and then it will check the block group free space numbers
5818  * for our min num_bytes.  Another option is to have it go ahead
5819  * and look in the rbtree for a free extent of a given size, but this
5820  * is a good start.
5821  */
5822 static noinline int
5823 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5824                                 u64 num_bytes)
5825 {
5826         struct btrfs_caching_control *caching_ctl;
5827
5828         caching_ctl = get_caching_control(cache);
5829         if (!caching_ctl)
5830                 return 0;
5831
5832         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5833                    (cache->free_space_ctl->free_space >= num_bytes));
5834
5835         put_caching_control(caching_ctl);
5836         return 0;
5837 }
5838
5839 static noinline int
5840 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5841 {
5842         struct btrfs_caching_control *caching_ctl;
5843
5844         caching_ctl = get_caching_control(cache);
5845         if (!caching_ctl)
5846                 return 0;
5847
5848         wait_event(caching_ctl->wait, block_group_cache_done(cache));
5849
5850         put_caching_control(caching_ctl);
5851         return 0;
5852 }
5853
5854 int __get_raid_index(u64 flags)
5855 {
5856         if (flags & BTRFS_BLOCK_GROUP_RAID10)
5857                 return BTRFS_RAID_RAID10;
5858         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5859                 return BTRFS_RAID_RAID1;
5860         else if (flags & BTRFS_BLOCK_GROUP_DUP)
5861                 return BTRFS_RAID_DUP;
5862         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5863                 return BTRFS_RAID_RAID0;
5864         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
5865                 return BTRFS_RAID_RAID5;
5866         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
5867                 return BTRFS_RAID_RAID6;
5868
5869         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
5870 }
5871
5872 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5873 {
5874         return __get_raid_index(cache->flags);
5875 }
5876
5877 enum btrfs_loop_type {
5878         LOOP_CACHING_NOWAIT = 0,
5879         LOOP_CACHING_WAIT = 1,
5880         LOOP_ALLOC_CHUNK = 2,
5881         LOOP_NO_EMPTY_SIZE = 3,
5882 };
5883
5884 /*
5885  * walks the btree of allocated extents and find a hole of a given size.
5886  * The key ins is changed to record the hole:
5887  * ins->objectid == block start
5888  * ins->flags = BTRFS_EXTENT_ITEM_KEY
5889  * ins->offset == number of blocks
5890  * Any available blocks before search_start are skipped.
5891  */
5892 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5893                                      struct btrfs_root *orig_root,
5894                                      u64 num_bytes, u64 empty_size,
5895                                      u64 hint_byte, struct btrfs_key *ins,
5896                                      u64 data)
5897 {
5898         int ret = 0;
5899         struct btrfs_root *root = orig_root->fs_info->extent_root;
5900         struct btrfs_free_cluster *last_ptr = NULL;
5901         struct btrfs_block_group_cache *block_group = NULL;
5902         struct btrfs_block_group_cache *used_block_group;
5903         u64 search_start = 0;
5904         int empty_cluster = 2 * 1024 * 1024;
5905         struct btrfs_space_info *space_info;
5906         int loop = 0;
5907         int index = __get_raid_index(data);
5908         int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5909                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5910         bool found_uncached_bg = false;
5911         bool failed_cluster_refill = false;
5912         bool failed_alloc = false;
5913         bool use_cluster = true;
5914         bool have_caching_bg = false;
5915
5916         WARN_ON(num_bytes < root->sectorsize);
5917         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5918         ins->objectid = 0;
5919         ins->offset = 0;
5920
5921         trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5922
5923         space_info = __find_space_info(root->fs_info, data);
5924         if (!space_info) {
5925                 btrfs_err(root->fs_info, "No space info for %llu", data);
5926                 return -ENOSPC;
5927         }
5928
5929         /*
5930          * If the space info is for both data and metadata it means we have a
5931          * small filesystem and we can't use the clustering stuff.
5932          */
5933         if (btrfs_mixed_space_info(space_info))
5934                 use_cluster = false;
5935
5936         if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5937                 last_ptr = &root->fs_info->meta_alloc_cluster;
5938                 if (!btrfs_test_opt(root, SSD))
5939                         empty_cluster = 64 * 1024;
5940         }
5941
5942         if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5943             btrfs_test_opt(root, SSD)) {
5944                 last_ptr = &root->fs_info->data_alloc_cluster;
5945         }
5946
5947         if (last_ptr) {
5948                 spin_lock(&last_ptr->lock);
5949                 if (last_ptr->block_group)
5950                         hint_byte = last_ptr->window_start;
5951                 spin_unlock(&last_ptr->lock);
5952         }
5953
5954         search_start = max(search_start, first_logical_byte(root, 0));
5955         search_start = max(search_start, hint_byte);
5956
5957         if (!last_ptr)
5958                 empty_cluster = 0;
5959
5960         if (search_start == hint_byte) {
5961                 block_group = btrfs_lookup_block_group(root->fs_info,
5962                                                        search_start);
5963                 used_block_group = block_group;
5964                 /*
5965                  * we don't want to use the block group if it doesn't match our
5966                  * allocation bits, or if its not cached.
5967                  *
5968                  * However if we are re-searching with an ideal block group
5969                  * picked out then we don't care that the block group is cached.
5970                  */
5971                 if (block_group && block_group_bits(block_group, data) &&
5972                     block_group->cached != BTRFS_CACHE_NO) {
5973                         down_read(&space_info->groups_sem);
5974                         if (list_empty(&block_group->list) ||
5975                             block_group->ro) {
5976                                 /*
5977                                  * someone is removing this block group,
5978                                  * we can't jump into the have_block_group
5979                                  * target because our list pointers are not
5980                                  * valid
5981                                  */
5982                                 btrfs_put_block_group(block_group);
5983                                 up_read(&space_info->groups_sem);
5984                         } else {
5985                                 index = get_block_group_index(block_group);
5986                                 goto have_block_group;
5987                         }
5988                 } else if (block_group) {
5989                         btrfs_put_block_group(block_group);
5990                 }
5991         }
5992 search:
5993         have_caching_bg = false;
5994         down_read(&space_info->groups_sem);
5995         list_for_each_entry(block_group, &space_info->block_groups[index],
5996                             list) {
5997                 u64 offset;
5998                 int cached;
5999
6000                 used_block_group = block_group;
6001                 btrfs_get_block_group(block_group);
6002                 search_start = block_group->key.objectid;
6003
6004                 /*
6005                  * this can happen if we end up cycling through all the
6006                  * raid types, but we want to make sure we only allocate
6007                  * for the proper type.
6008                  */
6009                 if (!block_group_bits(block_group, data)) {
6010                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6011                                 BTRFS_BLOCK_GROUP_RAID1 |
6012                                 BTRFS_BLOCK_GROUP_RAID5 |
6013                                 BTRFS_BLOCK_GROUP_RAID6 |
6014                                 BTRFS_BLOCK_GROUP_RAID10;
6015
6016                         /*
6017                          * if they asked for extra copies and this block group
6018                          * doesn't provide them, bail.  This does allow us to
6019                          * fill raid0 from raid1.
6020                          */
6021                         if ((data & extra) && !(block_group->flags & extra))
6022                                 goto loop;
6023                 }
6024
6025 have_block_group:
6026                 cached = block_group_cache_done(block_group);
6027                 if (unlikely(!cached)) {
6028                         found_uncached_bg = true;
6029                         ret = cache_block_group(block_group, 0);
6030                         BUG_ON(ret < 0);
6031                         ret = 0;
6032                 }
6033
6034                 if (unlikely(block_group->ro))
6035                         goto loop;
6036
6037                 /*
6038                  * Ok we want to try and use the cluster allocator, so
6039                  * lets look there
6040                  */
6041                 if (last_ptr) {
6042                         unsigned long aligned_cluster;
6043                         /*
6044                          * the refill lock keeps out other
6045                          * people trying to start a new cluster
6046                          */
6047                         spin_lock(&last_ptr->refill_lock);
6048                         used_block_group = last_ptr->block_group;
6049                         if (used_block_group != block_group &&
6050                             (!used_block_group ||
6051                              used_block_group->ro ||
6052                              !block_group_bits(used_block_group, data))) {
6053                                 used_block_group = block_group;
6054                                 goto refill_cluster;
6055                         }
6056
6057                         if (used_block_group != block_group)
6058                                 btrfs_get_block_group(used_block_group);
6059
6060                         offset = btrfs_alloc_from_cluster(used_block_group,
6061                           last_ptr, num_bytes, used_block_group->key.objectid);
6062                         if (offset) {
6063                                 /* we have a block, we're done */
6064                                 spin_unlock(&last_ptr->refill_lock);
6065                                 trace_btrfs_reserve_extent_cluster(root,
6066                                         block_group, search_start, num_bytes);
6067                                 goto checks;
6068                         }
6069
6070                         WARN_ON(last_ptr->block_group != used_block_group);
6071                         if (used_block_group != block_group) {
6072                                 btrfs_put_block_group(used_block_group);
6073                                 used_block_group = block_group;
6074                         }
6075 refill_cluster:
6076                         BUG_ON(used_block_group != block_group);
6077                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6078                          * set up a new clusters, so lets just skip it
6079                          * and let the allocator find whatever block
6080                          * it can find.  If we reach this point, we
6081                          * will have tried the cluster allocator
6082                          * plenty of times and not have found
6083                          * anything, so we are likely way too
6084                          * fragmented for the clustering stuff to find
6085                          * anything.
6086                          *
6087                          * However, if the cluster is taken from the
6088                          * current block group, release the cluster
6089                          * first, so that we stand a better chance of
6090                          * succeeding in the unclustered
6091                          * allocation.  */
6092                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6093                             last_ptr->block_group != block_group) {
6094                                 spin_unlock(&last_ptr->refill_lock);
6095                                 goto unclustered_alloc;
6096                         }
6097
6098                         /*
6099                          * this cluster didn't work out, free it and
6100                          * start over
6101                          */
6102                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6103
6104                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6105                                 spin_unlock(&last_ptr->refill_lock);
6106                                 goto unclustered_alloc;
6107                         }
6108
6109                         aligned_cluster = max_t(unsigned long,
6110                                                 empty_cluster + empty_size,
6111                                               block_group->full_stripe_len);
6112
6113                         /* allocate a cluster in this block group */
6114                         ret = btrfs_find_space_cluster(trans, root,
6115                                                block_group, last_ptr,
6116                                                search_start, num_bytes,
6117                                                aligned_cluster);
6118                         if (ret == 0) {
6119                                 /*
6120                                  * now pull our allocation out of this
6121                                  * cluster
6122                                  */
6123                                 offset = btrfs_alloc_from_cluster(block_group,
6124                                                   last_ptr, num_bytes,
6125                                                   search_start);
6126                                 if (offset) {
6127                                         /* we found one, proceed */
6128                                         spin_unlock(&last_ptr->refill_lock);
6129                                         trace_btrfs_reserve_extent_cluster(root,
6130                                                 block_group, search_start,
6131                                                 num_bytes);
6132                                         goto checks;
6133                                 }
6134                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6135                                    && !failed_cluster_refill) {
6136                                 spin_unlock(&last_ptr->refill_lock);
6137
6138                                 failed_cluster_refill = true;
6139                                 wait_block_group_cache_progress(block_group,
6140                                        num_bytes + empty_cluster + empty_size);
6141                                 goto have_block_group;
6142                         }
6143
6144                         /*
6145                          * at this point we either didn't find a cluster
6146                          * or we weren't able to allocate a block from our
6147                          * cluster.  Free the cluster we've been trying
6148                          * to use, and go to the next block group
6149                          */
6150                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6151                         spin_unlock(&last_ptr->refill_lock);
6152                         goto loop;
6153                 }
6154
6155 unclustered_alloc:
6156                 spin_lock(&block_group->free_space_ctl->tree_lock);
6157                 if (cached &&
6158                     block_group->free_space_ctl->free_space <
6159                     num_bytes + empty_cluster + empty_size) {
6160                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6161                         goto loop;
6162                 }
6163                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6164
6165                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6166                                                     num_bytes, empty_size);
6167                 /*
6168                  * If we didn't find a chunk, and we haven't failed on this
6169                  * block group before, and this block group is in the middle of
6170                  * caching and we are ok with waiting, then go ahead and wait
6171                  * for progress to be made, and set failed_alloc to true.
6172                  *
6173                  * If failed_alloc is true then we've already waited on this
6174                  * block group once and should move on to the next block group.
6175                  */
6176                 if (!offset && !failed_alloc && !cached &&
6177                     loop > LOOP_CACHING_NOWAIT) {
6178                         wait_block_group_cache_progress(block_group,
6179                                                 num_bytes + empty_size);
6180                         failed_alloc = true;
6181                         goto have_block_group;
6182                 } else if (!offset) {
6183                         if (!cached)
6184                                 have_caching_bg = true;
6185                         goto loop;
6186                 }
6187 checks:
6188                 search_start = stripe_align(root, used_block_group,
6189                                             offset, num_bytes);
6190
6191                 /* move on to the next group */
6192                 if (search_start + num_bytes >
6193                     used_block_group->key.objectid + used_block_group->key.offset) {
6194                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6195                         goto loop;
6196                 }
6197
6198                 if (offset < search_start)
6199                         btrfs_add_free_space(used_block_group, offset,
6200                                              search_start - offset);
6201                 BUG_ON(offset > search_start);
6202
6203                 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
6204                                                   alloc_type);
6205                 if (ret == -EAGAIN) {
6206                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6207                         goto loop;
6208                 }
6209
6210                 /* we are all good, lets return */
6211                 ins->objectid = search_start;
6212                 ins->offset = num_bytes;
6213
6214                 trace_btrfs_reserve_extent(orig_root, block_group,
6215                                            search_start, num_bytes);
6216                 if (used_block_group != block_group)
6217                         btrfs_put_block_group(used_block_group);
6218                 btrfs_put_block_group(block_group);
6219                 break;
6220 loop:
6221                 failed_cluster_refill = false;
6222                 failed_alloc = false;
6223                 BUG_ON(index != get_block_group_index(block_group));
6224                 if (used_block_group != block_group)
6225                         btrfs_put_block_group(used_block_group);
6226                 btrfs_put_block_group(block_group);
6227         }
6228         up_read(&space_info->groups_sem);
6229
6230         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6231                 goto search;
6232
6233         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6234                 goto search;
6235
6236         /*
6237          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6238          *                      caching kthreads as we move along
6239          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6240          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6241          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6242          *                      again
6243          */
6244         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6245                 index = 0;
6246                 loop++;
6247                 if (loop == LOOP_ALLOC_CHUNK) {
6248                         ret = do_chunk_alloc(trans, root, data,
6249                                              CHUNK_ALLOC_FORCE);
6250                         /*
6251                          * Do not bail out on ENOSPC since we
6252                          * can do more things.
6253                          */
6254                         if (ret < 0 && ret != -ENOSPC) {
6255                                 btrfs_abort_transaction(trans,
6256                                                         root, ret);
6257                                 goto out;
6258                         }
6259                 }
6260
6261                 if (loop == LOOP_NO_EMPTY_SIZE) {
6262                         empty_size = 0;
6263                         empty_cluster = 0;
6264                 }
6265
6266                 goto search;
6267         } else if (!ins->objectid) {
6268                 ret = -ENOSPC;
6269         } else if (ins->objectid) {
6270                 ret = 0;
6271         }
6272 out:
6273
6274         return ret;
6275 }
6276
6277 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6278                             int dump_block_groups)
6279 {
6280         struct btrfs_block_group_cache *cache;
6281         int index = 0;
6282
6283         spin_lock(&info->lock);
6284         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
6285                (unsigned long long)info->flags,
6286                (unsigned long long)(info->total_bytes - info->bytes_used -
6287                                     info->bytes_pinned - info->bytes_reserved -
6288                                     info->bytes_readonly),
6289                (info->full) ? "" : "not ");
6290         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
6291                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6292                (unsigned long long)info->total_bytes,
6293                (unsigned long long)info->bytes_used,
6294                (unsigned long long)info->bytes_pinned,
6295                (unsigned long long)info->bytes_reserved,
6296                (unsigned long long)info->bytes_may_use,
6297                (unsigned long long)info->bytes_readonly);
6298         spin_unlock(&info->lock);
6299
6300         if (!dump_block_groups)
6301                 return;
6302
6303         down_read(&info->groups_sem);
6304 again:
6305         list_for_each_entry(cache, &info->block_groups[index], list) {
6306                 spin_lock(&cache->lock);
6307                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
6308                        (unsigned long long)cache->key.objectid,
6309                        (unsigned long long)cache->key.offset,
6310                        (unsigned long long)btrfs_block_group_used(&cache->item),
6311                        (unsigned long long)cache->pinned,
6312                        (unsigned long long)cache->reserved,
6313                        cache->ro ? "[readonly]" : "");
6314                 btrfs_dump_free_space(cache, bytes);
6315                 spin_unlock(&cache->lock);
6316         }
6317         if (++index < BTRFS_NR_RAID_TYPES)
6318                 goto again;
6319         up_read(&info->groups_sem);
6320 }
6321
6322 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
6323                          struct btrfs_root *root,
6324                          u64 num_bytes, u64 min_alloc_size,
6325                          u64 empty_size, u64 hint_byte,
6326                          struct btrfs_key *ins, u64 data)
6327 {
6328         bool final_tried = false;
6329         int ret;
6330
6331         data = btrfs_get_alloc_profile(root, data);
6332 again:
6333         WARN_ON(num_bytes < root->sectorsize);
6334         ret = find_free_extent(trans, root, num_bytes, empty_size,
6335                                hint_byte, ins, data);
6336
6337         if (ret == -ENOSPC) {
6338                 if (!final_tried) {
6339                         num_bytes = num_bytes >> 1;
6340                         num_bytes = round_down(num_bytes, root->sectorsize);
6341                         num_bytes = max(num_bytes, min_alloc_size);
6342                         if (num_bytes == min_alloc_size)
6343                                 final_tried = true;
6344                         goto again;
6345                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6346                         struct btrfs_space_info *sinfo;
6347
6348                         sinfo = __find_space_info(root->fs_info, data);
6349                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6350                                 (unsigned long long)data,
6351                                 (unsigned long long)num_bytes);
6352                         if (sinfo)
6353                                 dump_space_info(sinfo, num_bytes, 1);
6354                 }
6355         }
6356
6357         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6358
6359         return ret;
6360 }
6361
6362 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6363                                         u64 start, u64 len, int pin)
6364 {
6365         struct btrfs_block_group_cache *cache;
6366         int ret = 0;
6367
6368         cache = btrfs_lookup_block_group(root->fs_info, start);
6369         if (!cache) {
6370                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6371                         (unsigned long long)start);
6372                 return -ENOSPC;
6373         }
6374
6375         if (btrfs_test_opt(root, DISCARD))
6376                 ret = btrfs_discard_extent(root, start, len, NULL);
6377
6378         if (pin)
6379                 pin_down_extent(root, cache, start, len, 1);
6380         else {
6381                 btrfs_add_free_space(cache, start, len);
6382                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6383         }
6384         btrfs_put_block_group(cache);
6385
6386         trace_btrfs_reserved_extent_free(root, start, len);
6387
6388         return ret;
6389 }
6390
6391 int btrfs_free_reserved_extent(struct btrfs_root *root,
6392                                         u64 start, u64 len)
6393 {
6394         return __btrfs_free_reserved_extent(root, start, len, 0);
6395 }
6396
6397 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6398                                        u64 start, u64 len)
6399 {
6400         return __btrfs_free_reserved_extent(root, start, len, 1);
6401 }
6402
6403 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6404                                       struct btrfs_root *root,
6405                                       u64 parent, u64 root_objectid,
6406                                       u64 flags, u64 owner, u64 offset,
6407                                       struct btrfs_key *ins, int ref_mod)
6408 {
6409         int ret;
6410         struct btrfs_fs_info *fs_info = root->fs_info;
6411         struct btrfs_extent_item *extent_item;
6412         struct btrfs_extent_inline_ref *iref;
6413         struct btrfs_path *path;
6414         struct extent_buffer *leaf;
6415         int type;
6416         u32 size;
6417
6418         if (parent > 0)
6419                 type = BTRFS_SHARED_DATA_REF_KEY;
6420         else
6421                 type = BTRFS_EXTENT_DATA_REF_KEY;
6422
6423         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6424
6425         path = btrfs_alloc_path();
6426         if (!path)
6427                 return -ENOMEM;
6428
6429         path->leave_spinning = 1;
6430         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6431                                       ins, size);
6432         if (ret) {
6433                 btrfs_free_path(path);
6434                 return ret;
6435         }
6436
6437         leaf = path->nodes[0];
6438         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6439                                      struct btrfs_extent_item);
6440         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6441         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6442         btrfs_set_extent_flags(leaf, extent_item,
6443                                flags | BTRFS_EXTENT_FLAG_DATA);
6444
6445         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6446         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6447         if (parent > 0) {
6448                 struct btrfs_shared_data_ref *ref;
6449                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6450                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6451                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6452         } else {
6453                 struct btrfs_extent_data_ref *ref;
6454                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6455                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6456                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6457                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6458                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6459         }
6460
6461         btrfs_mark_buffer_dirty(path->nodes[0]);
6462         btrfs_free_path(path);
6463
6464         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6465         if (ret) { /* -ENOENT, logic error */
6466                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6467                         (unsigned long long)ins->objectid,
6468                         (unsigned long long)ins->offset);
6469                 BUG();
6470         }
6471         return ret;
6472 }
6473
6474 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6475                                      struct btrfs_root *root,
6476                                      u64 parent, u64 root_objectid,
6477                                      u64 flags, struct btrfs_disk_key *key,
6478                                      int level, struct btrfs_key *ins)
6479 {
6480         int ret;
6481         struct btrfs_fs_info *fs_info = root->fs_info;
6482         struct btrfs_extent_item *extent_item;
6483         struct btrfs_tree_block_info *block_info;
6484         struct btrfs_extent_inline_ref *iref;
6485         struct btrfs_path *path;
6486         struct extent_buffer *leaf;
6487         u32 size = sizeof(*extent_item) + sizeof(*iref);
6488         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6489                                                  SKINNY_METADATA);
6490
6491         if (!skinny_metadata)
6492                 size += sizeof(*block_info);
6493
6494         path = btrfs_alloc_path();
6495         if (!path)
6496                 return -ENOMEM;
6497
6498         path->leave_spinning = 1;
6499         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6500                                       ins, size);
6501         if (ret) {
6502                 btrfs_free_path(path);
6503                 return ret;
6504         }
6505
6506         leaf = path->nodes[0];
6507         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6508                                      struct btrfs_extent_item);
6509         btrfs_set_extent_refs(leaf, extent_item, 1);
6510         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6511         btrfs_set_extent_flags(leaf, extent_item,
6512                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6513
6514         if (skinny_metadata) {
6515                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6516         } else {
6517                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6518                 btrfs_set_tree_block_key(leaf, block_info, key);
6519                 btrfs_set_tree_block_level(leaf, block_info, level);
6520                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6521         }
6522
6523         if (parent > 0) {
6524                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6525                 btrfs_set_extent_inline_ref_type(leaf, iref,
6526                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6527                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6528         } else {
6529                 btrfs_set_extent_inline_ref_type(leaf, iref,
6530                                                  BTRFS_TREE_BLOCK_REF_KEY);
6531                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6532         }
6533
6534         btrfs_mark_buffer_dirty(leaf);
6535         btrfs_free_path(path);
6536
6537         ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6538         if (ret) { /* -ENOENT, logic error */
6539                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6540                         (unsigned long long)ins->objectid,
6541                         (unsigned long long)ins->offset);
6542                 BUG();
6543         }
6544         return ret;
6545 }
6546
6547 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6548                                      struct btrfs_root *root,
6549                                      u64 root_objectid, u64 owner,
6550                                      u64 offset, struct btrfs_key *ins)
6551 {
6552         int ret;
6553
6554         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6555
6556         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6557                                          ins->offset, 0,
6558                                          root_objectid, owner, offset,
6559                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6560         return ret;
6561 }
6562
6563 /*
6564  * this is used by the tree logging recovery code.  It records that
6565  * an extent has been allocated and makes sure to clear the free
6566  * space cache bits as well
6567  */
6568 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6569                                    struct btrfs_root *root,
6570                                    u64 root_objectid, u64 owner, u64 offset,
6571                                    struct btrfs_key *ins)
6572 {
6573         int ret;
6574         struct btrfs_block_group_cache *block_group;
6575         struct btrfs_caching_control *caching_ctl;
6576         u64 start = ins->objectid;
6577         u64 num_bytes = ins->offset;
6578
6579         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6580         cache_block_group(block_group, 0);
6581         caching_ctl = get_caching_control(block_group);
6582
6583         if (!caching_ctl) {
6584                 BUG_ON(!block_group_cache_done(block_group));
6585                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6586                 BUG_ON(ret); /* -ENOMEM */
6587         } else {
6588                 mutex_lock(&caching_ctl->mutex);
6589
6590                 if (start >= caching_ctl->progress) {
6591                         ret = add_excluded_extent(root, start, num_bytes);
6592                         BUG_ON(ret); /* -ENOMEM */
6593                 } else if (start + num_bytes <= caching_ctl->progress) {
6594                         ret = btrfs_remove_free_space(block_group,
6595                                                       start, num_bytes);
6596                         BUG_ON(ret); /* -ENOMEM */
6597                 } else {
6598                         num_bytes = caching_ctl->progress - start;
6599                         ret = btrfs_remove_free_space(block_group,
6600                                                       start, num_bytes);
6601                         BUG_ON(ret); /* -ENOMEM */
6602
6603                         start = caching_ctl->progress;
6604                         num_bytes = ins->objectid + ins->offset -
6605                                     caching_ctl->progress;
6606                         ret = add_excluded_extent(root, start, num_bytes);
6607                         BUG_ON(ret); /* -ENOMEM */
6608                 }
6609
6610                 mutex_unlock(&caching_ctl->mutex);
6611                 put_caching_control(caching_ctl);
6612         }
6613
6614         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6615                                           RESERVE_ALLOC_NO_ACCOUNT);
6616         BUG_ON(ret); /* logic error */
6617         btrfs_put_block_group(block_group);
6618         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6619                                          0, owner, offset, ins, 1);
6620         return ret;
6621 }
6622
6623 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6624                                             struct btrfs_root *root,
6625                                             u64 bytenr, u32 blocksize,
6626                                             int level)
6627 {
6628         struct extent_buffer *buf;
6629
6630         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6631         if (!buf)
6632                 return ERR_PTR(-ENOMEM);
6633         btrfs_set_header_generation(buf, trans->transid);
6634         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6635         btrfs_tree_lock(buf);
6636         clean_tree_block(trans, root, buf);
6637         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6638
6639         btrfs_set_lock_blocking(buf);
6640         btrfs_set_buffer_uptodate(buf);
6641
6642         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6643                 /*
6644                  * we allow two log transactions at a time, use different
6645                  * EXENT bit to differentiate dirty pages.
6646                  */
6647                 if (root->log_transid % 2 == 0)
6648                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6649                                         buf->start + buf->len - 1, GFP_NOFS);
6650                 else
6651                         set_extent_new(&root->dirty_log_pages, buf->start,
6652                                         buf->start + buf->len - 1, GFP_NOFS);
6653         } else {
6654                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6655                          buf->start + buf->len - 1, GFP_NOFS);
6656         }
6657         trans->blocks_used++;
6658         /* this returns a buffer locked for blocking */
6659         return buf;
6660 }
6661
6662 static struct btrfs_block_rsv *
6663 use_block_rsv(struct btrfs_trans_handle *trans,
6664               struct btrfs_root *root, u32 blocksize)
6665 {
6666         struct btrfs_block_rsv *block_rsv;
6667         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6668         int ret;
6669
6670         block_rsv = get_block_rsv(trans, root);
6671
6672         if (block_rsv->size == 0) {
6673                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6674                                              BTRFS_RESERVE_NO_FLUSH);
6675                 /*
6676                  * If we couldn't reserve metadata bytes try and use some from
6677                  * the global reserve.
6678                  */
6679                 if (ret && block_rsv != global_rsv) {
6680                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6681                         if (!ret)
6682                                 return global_rsv;
6683                         return ERR_PTR(ret);
6684                 } else if (ret) {
6685                         return ERR_PTR(ret);
6686                 }
6687                 return block_rsv;
6688         }
6689
6690         ret = block_rsv_use_bytes(block_rsv, blocksize);
6691         if (!ret)
6692                 return block_rsv;
6693         if (ret && !block_rsv->failfast) {
6694                 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6695                         static DEFINE_RATELIMIT_STATE(_rs,
6696                                         DEFAULT_RATELIMIT_INTERVAL * 10,
6697                                         /*DEFAULT_RATELIMIT_BURST*/ 1);
6698                         if (__ratelimit(&_rs))
6699                                 WARN(1, KERN_DEBUG
6700                                         "btrfs: block rsv returned %d\n", ret);
6701                 }
6702                 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6703                                              BTRFS_RESERVE_NO_FLUSH);
6704                 if (!ret) {
6705                         return block_rsv;
6706                 } else if (ret && block_rsv != global_rsv) {
6707                         ret = block_rsv_use_bytes(global_rsv, blocksize);
6708                         if (!ret)
6709                                 return global_rsv;
6710                 }
6711         }
6712
6713         return ERR_PTR(-ENOSPC);
6714 }
6715
6716 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6717                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6718 {
6719         block_rsv_add_bytes(block_rsv, blocksize, 0);
6720         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6721 }
6722
6723 /*
6724  * finds a free extent and does all the dirty work required for allocation
6725  * returns the key for the extent through ins, and a tree buffer for
6726  * the first block of the extent through buf.
6727  *
6728  * returns the tree buffer or NULL.
6729  */
6730 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6731                                         struct btrfs_root *root, u32 blocksize,
6732                                         u64 parent, u64 root_objectid,
6733                                         struct btrfs_disk_key *key, int level,
6734                                         u64 hint, u64 empty_size)
6735 {
6736         struct btrfs_key ins;
6737         struct btrfs_block_rsv *block_rsv;
6738         struct extent_buffer *buf;
6739         u64 flags = 0;
6740         int ret;
6741         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6742                                                  SKINNY_METADATA);
6743
6744         block_rsv = use_block_rsv(trans, root, blocksize);
6745         if (IS_ERR(block_rsv))
6746                 return ERR_CAST(block_rsv);
6747
6748         ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6749                                    empty_size, hint, &ins, 0);
6750         if (ret) {
6751                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6752                 return ERR_PTR(ret);
6753         }
6754
6755         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6756                                     blocksize, level);
6757         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6758
6759         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6760                 if (parent == 0)
6761                         parent = ins.objectid;
6762                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6763         } else
6764                 BUG_ON(parent > 0);
6765
6766         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6767                 struct btrfs_delayed_extent_op *extent_op;
6768                 extent_op = btrfs_alloc_delayed_extent_op();
6769                 BUG_ON(!extent_op); /* -ENOMEM */
6770                 if (key)
6771                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
6772                 else
6773                         memset(&extent_op->key, 0, sizeof(extent_op->key));
6774                 extent_op->flags_to_set = flags;
6775                 if (skinny_metadata)
6776                         extent_op->update_key = 0;
6777                 else
6778                         extent_op->update_key = 1;
6779                 extent_op->update_flags = 1;
6780                 extent_op->is_data = 0;
6781
6782                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6783                                         ins.objectid,
6784                                         ins.offset, parent, root_objectid,
6785                                         level, BTRFS_ADD_DELAYED_EXTENT,
6786                                         extent_op, 0);
6787                 BUG_ON(ret); /* -ENOMEM */
6788         }
6789         return buf;
6790 }
6791
6792 struct walk_control {
6793         u64 refs[BTRFS_MAX_LEVEL];
6794         u64 flags[BTRFS_MAX_LEVEL];
6795         struct btrfs_key update_progress;
6796         int stage;
6797         int level;
6798         int shared_level;
6799         int update_ref;
6800         int keep_locks;
6801         int reada_slot;
6802         int reada_count;
6803         int for_reloc;
6804 };
6805
6806 #define DROP_REFERENCE  1
6807 #define UPDATE_BACKREF  2
6808
6809 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6810                                      struct btrfs_root *root,
6811                                      struct walk_control *wc,
6812                                      struct btrfs_path *path)
6813 {
6814         u64 bytenr;
6815         u64 generation;
6816         u64 refs;
6817         u64 flags;
6818         u32 nritems;
6819         u32 blocksize;
6820         struct btrfs_key key;
6821         struct extent_buffer *eb;
6822         int ret;
6823         int slot;
6824         int nread = 0;
6825
6826         if (path->slots[wc->level] < wc->reada_slot) {
6827                 wc->reada_count = wc->reada_count * 2 / 3;
6828                 wc->reada_count = max(wc->reada_count, 2);
6829         } else {
6830                 wc->reada_count = wc->reada_count * 3 / 2;
6831                 wc->reada_count = min_t(int, wc->reada_count,
6832                                         BTRFS_NODEPTRS_PER_BLOCK(root));
6833         }
6834
6835         eb = path->nodes[wc->level];
6836         nritems = btrfs_header_nritems(eb);
6837         blocksize = btrfs_level_size(root, wc->level - 1);
6838
6839         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6840                 if (nread >= wc->reada_count)
6841                         break;
6842
6843                 cond_resched();
6844                 bytenr = btrfs_node_blockptr(eb, slot);
6845                 generation = btrfs_node_ptr_generation(eb, slot);
6846
6847                 if (slot == path->slots[wc->level])
6848                         goto reada;
6849
6850                 if (wc->stage == UPDATE_BACKREF &&
6851                     generation <= root->root_key.offset)
6852                         continue;
6853
6854                 /* We don't lock the tree block, it's OK to be racy here */
6855                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
6856                                                wc->level - 1, 1, &refs,
6857                                                &flags);
6858                 /* We don't care about errors in readahead. */
6859                 if (ret < 0)
6860                         continue;
6861                 BUG_ON(refs == 0);
6862
6863                 if (wc->stage == DROP_REFERENCE) {
6864                         if (refs == 1)
6865                                 goto reada;
6866
6867                         if (wc->level == 1 &&
6868                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6869                                 continue;
6870                         if (!wc->update_ref ||
6871                             generation <= root->root_key.offset)
6872                                 continue;
6873                         btrfs_node_key_to_cpu(eb, &key, slot);
6874                         ret = btrfs_comp_cpu_keys(&key,
6875                                                   &wc->update_progress);
6876                         if (ret < 0)
6877                                 continue;
6878                 } else {
6879                         if (wc->level == 1 &&
6880                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6881                                 continue;
6882                 }
6883 reada:
6884                 ret = readahead_tree_block(root, bytenr, blocksize,
6885                                            generation);
6886                 if (ret)
6887                         break;
6888                 nread++;
6889         }
6890         wc->reada_slot = slot;
6891 }
6892
6893 /*
6894  * helper to process tree block while walking down the tree.
6895  *
6896  * when wc->stage == UPDATE_BACKREF, this function updates
6897  * back refs for pointers in the block.
6898  *
6899  * NOTE: return value 1 means we should stop walking down.
6900  */
6901 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6902                                    struct btrfs_root *root,
6903                                    struct btrfs_path *path,
6904                                    struct walk_control *wc, int lookup_info)
6905 {
6906         int level = wc->level;
6907         struct extent_buffer *eb = path->nodes[level];
6908         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6909         int ret;
6910
6911         if (wc->stage == UPDATE_BACKREF &&
6912             btrfs_header_owner(eb) != root->root_key.objectid)
6913                 return 1;
6914
6915         /*
6916          * when reference count of tree block is 1, it won't increase
6917          * again. once full backref flag is set, we never clear it.
6918          */
6919         if (lookup_info &&
6920             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6921              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6922                 BUG_ON(!path->locks[level]);
6923                 ret = btrfs_lookup_extent_info(trans, root,
6924                                                eb->start, level, 1,
6925                                                &wc->refs[level],
6926                                                &wc->flags[level]);
6927                 BUG_ON(ret == -ENOMEM);
6928                 if (ret)
6929                         return ret;
6930                 BUG_ON(wc->refs[level] == 0);
6931         }
6932
6933         if (wc->stage == DROP_REFERENCE) {
6934                 if (wc->refs[level] > 1)
6935                         return 1;
6936
6937                 if (path->locks[level] && !wc->keep_locks) {
6938                         btrfs_tree_unlock_rw(eb, path->locks[level]);
6939                         path->locks[level] = 0;
6940                 }
6941                 return 0;
6942         }
6943
6944         /* wc->stage == UPDATE_BACKREF */
6945         if (!(wc->flags[level] & flag)) {
6946                 BUG_ON(!path->locks[level]);
6947                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6948                 BUG_ON(ret); /* -ENOMEM */
6949                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6950                 BUG_ON(ret); /* -ENOMEM */
6951                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6952                                                   eb->len, flag, 0);
6953                 BUG_ON(ret); /* -ENOMEM */
6954                 wc->flags[level] |= flag;
6955         }
6956
6957         /*
6958          * the block is shared by multiple trees, so it's not good to
6959          * keep the tree lock
6960          */
6961         if (path->locks[level] && level > 0) {
6962                 btrfs_tree_unlock_rw(eb, path->locks[level]);
6963                 path->locks[level] = 0;
6964         }
6965         return 0;
6966 }
6967
6968 /*
6969  * helper to process tree block pointer.
6970  *
6971  * when wc->stage == DROP_REFERENCE, this function checks
6972  * reference count of the block pointed to. if the block
6973  * is shared and we need update back refs for the subtree
6974  * rooted at the block, this function changes wc->stage to
6975  * UPDATE_BACKREF. if the block is shared and there is no
6976  * need to update back, this function drops the reference
6977  * to the block.
6978  *
6979  * NOTE: return value 1 means we should stop walking down.
6980  */
6981 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6982                                  struct btrfs_root *root,
6983                                  struct btrfs_path *path,
6984                                  struct walk_control *wc, int *lookup_info)
6985 {
6986         u64 bytenr;
6987         u64 generation;
6988         u64 parent;
6989         u32 blocksize;
6990         struct btrfs_key key;
6991         struct extent_buffer *next;
6992         int level = wc->level;
6993         int reada = 0;
6994         int ret = 0;
6995
6996         generation = btrfs_node_ptr_generation(path->nodes[level],
6997                                                path->slots[level]);
6998         /*
6999          * if the lower level block was created before the snapshot
7000          * was created, we know there is no need to update back refs
7001          * for the subtree
7002          */
7003         if (wc->stage == UPDATE_BACKREF &&
7004             generation <= root->root_key.offset) {
7005                 *lookup_info = 1;
7006                 return 1;
7007         }
7008
7009         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7010         blocksize = btrfs_level_size(root, level - 1);
7011
7012         next = btrfs_find_tree_block(root, bytenr, blocksize);
7013         if (!next) {
7014                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7015                 if (!next)
7016                         return -ENOMEM;
7017                 reada = 1;
7018         }
7019         btrfs_tree_lock(next);
7020         btrfs_set_lock_blocking(next);
7021
7022         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7023                                        &wc->refs[level - 1],
7024                                        &wc->flags[level - 1]);
7025         if (ret < 0) {
7026                 btrfs_tree_unlock(next);
7027                 return ret;
7028         }
7029
7030         if (unlikely(wc->refs[level - 1] == 0)) {
7031                 btrfs_err(root->fs_info, "Missing references.");
7032                 BUG();
7033         }
7034         *lookup_info = 0;
7035
7036         if (wc->stage == DROP_REFERENCE) {
7037                 if (wc->refs[level - 1] > 1) {
7038                         if (level == 1 &&
7039                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7040                                 goto skip;
7041
7042                         if (!wc->update_ref ||
7043                             generation <= root->root_key.offset)
7044                                 goto skip;
7045
7046                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7047                                               path->slots[level]);
7048                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7049                         if (ret < 0)
7050                                 goto skip;
7051
7052                         wc->stage = UPDATE_BACKREF;
7053                         wc->shared_level = level - 1;
7054                 }
7055         } else {
7056                 if (level == 1 &&
7057                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7058                         goto skip;
7059         }
7060
7061         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7062                 btrfs_tree_unlock(next);
7063                 free_extent_buffer(next);
7064                 next = NULL;
7065                 *lookup_info = 1;
7066         }
7067
7068         if (!next) {
7069                 if (reada && level == 1)
7070                         reada_walk_down(trans, root, wc, path);
7071                 next = read_tree_block(root, bytenr, blocksize, generation);
7072                 if (!next)
7073                         return -EIO;
7074                 btrfs_tree_lock(next);
7075                 btrfs_set_lock_blocking(next);
7076         }
7077
7078         level--;
7079         BUG_ON(level != btrfs_header_level(next));
7080         path->nodes[level] = next;
7081         path->slots[level] = 0;
7082         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7083         wc->level = level;
7084         if (wc->level == 1)
7085                 wc->reada_slot = 0;
7086         return 0;
7087 skip:
7088         wc->refs[level - 1] = 0;
7089         wc->flags[level - 1] = 0;
7090         if (wc->stage == DROP_REFERENCE) {
7091                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7092                         parent = path->nodes[level]->start;
7093                 } else {
7094                         BUG_ON(root->root_key.objectid !=
7095                                btrfs_header_owner(path->nodes[level]));
7096                         parent = 0;
7097                 }
7098
7099                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7100                                 root->root_key.objectid, level - 1, 0, 0);
7101                 BUG_ON(ret); /* -ENOMEM */
7102         }
7103         btrfs_tree_unlock(next);
7104         free_extent_buffer(next);
7105         *lookup_info = 1;
7106         return 1;
7107 }
7108
7109 /*
7110  * helper to process tree block while walking up the tree.
7111  *
7112  * when wc->stage == DROP_REFERENCE, this function drops
7113  * reference count on the block.
7114  *
7115  * when wc->stage == UPDATE_BACKREF, this function changes
7116  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7117  * to UPDATE_BACKREF previously while processing the block.
7118  *
7119  * NOTE: return value 1 means we should stop walking up.
7120  */
7121 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7122                                  struct btrfs_root *root,
7123                                  struct btrfs_path *path,
7124                                  struct walk_control *wc)
7125 {
7126         int ret;
7127         int level = wc->level;
7128         struct extent_buffer *eb = path->nodes[level];
7129         u64 parent = 0;
7130
7131         if (wc->stage == UPDATE_BACKREF) {
7132                 BUG_ON(wc->shared_level < level);
7133                 if (level < wc->shared_level)
7134                         goto out;
7135
7136                 ret = find_next_key(path, level + 1, &wc->update_progress);
7137                 if (ret > 0)
7138                         wc->update_ref = 0;
7139
7140                 wc->stage = DROP_REFERENCE;
7141                 wc->shared_level = -1;
7142                 path->slots[level] = 0;
7143
7144                 /*
7145                  * check reference count again if the block isn't locked.
7146                  * we should start walking down the tree again if reference
7147                  * count is one.
7148                  */
7149                 if (!path->locks[level]) {
7150                         BUG_ON(level == 0);
7151                         btrfs_tree_lock(eb);
7152                         btrfs_set_lock_blocking(eb);
7153                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7154
7155                         ret = btrfs_lookup_extent_info(trans, root,
7156                                                        eb->start, level, 1,
7157                                                        &wc->refs[level],
7158                                                        &wc->flags[level]);
7159                         if (ret < 0) {
7160                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7161                                 path->locks[level] = 0;
7162                                 return ret;
7163                         }
7164                         BUG_ON(wc->refs[level] == 0);
7165                         if (wc->refs[level] == 1) {
7166                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7167                                 path->locks[level] = 0;
7168                                 return 1;
7169                         }
7170                 }
7171         }
7172
7173         /* wc->stage == DROP_REFERENCE */
7174         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7175
7176         if (wc->refs[level] == 1) {
7177                 if (level == 0) {
7178                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7179                                 ret = btrfs_dec_ref(trans, root, eb, 1,
7180                                                     wc->for_reloc);
7181                         else
7182                                 ret = btrfs_dec_ref(trans, root, eb, 0,
7183                                                     wc->for_reloc);
7184                         BUG_ON(ret); /* -ENOMEM */
7185                 }
7186                 /* make block locked assertion in clean_tree_block happy */
7187                 if (!path->locks[level] &&
7188                     btrfs_header_generation(eb) == trans->transid) {
7189                         btrfs_tree_lock(eb);
7190                         btrfs_set_lock_blocking(eb);
7191                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7192                 }
7193                 clean_tree_block(trans, root, eb);
7194         }
7195
7196         if (eb == root->node) {
7197                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7198                         parent = eb->start;
7199                 else
7200                         BUG_ON(root->root_key.objectid !=
7201                                btrfs_header_owner(eb));
7202         } else {
7203                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7204                         parent = path->nodes[level + 1]->start;
7205                 else
7206                         BUG_ON(root->root_key.objectid !=
7207                                btrfs_header_owner(path->nodes[level + 1]));
7208         }
7209
7210         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7211 out:
7212         wc->refs[level] = 0;
7213         wc->flags[level] = 0;
7214         return 0;
7215 }
7216
7217 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7218                                    struct btrfs_root *root,
7219                                    struct btrfs_path *path,
7220                                    struct walk_control *wc)
7221 {
7222         int level = wc->level;
7223         int lookup_info = 1;
7224         int ret;
7225
7226         while (level >= 0) {
7227                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7228                 if (ret > 0)
7229                         break;
7230
7231                 if (level == 0)
7232                         break;
7233
7234                 if (path->slots[level] >=
7235                     btrfs_header_nritems(path->nodes[level]))
7236                         break;
7237
7238                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7239                 if (ret > 0) {
7240                         path->slots[level]++;
7241                         continue;
7242                 } else if (ret < 0)
7243                         return ret;
7244                 level = wc->level;
7245         }
7246         return 0;
7247 }
7248
7249 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7250                                  struct btrfs_root *root,
7251                                  struct btrfs_path *path,
7252                                  struct walk_control *wc, int max_level)
7253 {
7254         int level = wc->level;
7255         int ret;
7256
7257         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7258         while (level < max_level && path->nodes[level]) {
7259                 wc->level = level;
7260                 if (path->slots[level] + 1 <
7261                     btrfs_header_nritems(path->nodes[level])) {
7262                         path->slots[level]++;
7263                         return 0;
7264                 } else {
7265                         ret = walk_up_proc(trans, root, path, wc);
7266                         if (ret > 0)
7267                                 return 0;
7268
7269                         if (path->locks[level]) {
7270                                 btrfs_tree_unlock_rw(path->nodes[level],
7271                                                      path->locks[level]);
7272                                 path->locks[level] = 0;
7273                         }
7274                         free_extent_buffer(path->nodes[level]);
7275                         path->nodes[level] = NULL;
7276                         level++;
7277                 }
7278         }
7279         return 1;
7280 }
7281
7282 /*
7283  * drop a subvolume tree.
7284  *
7285  * this function traverses the tree freeing any blocks that only
7286  * referenced by the tree.
7287  *
7288  * when a shared tree block is found. this function decreases its
7289  * reference count by one. if update_ref is true, this function
7290  * also make sure backrefs for the shared block and all lower level
7291  * blocks are properly updated.
7292  *
7293  * If called with for_reloc == 0, may exit early with -EAGAIN
7294  */
7295 int btrfs_drop_snapshot(struct btrfs_root *root,
7296                          struct btrfs_block_rsv *block_rsv, int update_ref,
7297                          int for_reloc)
7298 {
7299         struct btrfs_path *path;
7300         struct btrfs_trans_handle *trans;
7301         struct btrfs_root *tree_root = root->fs_info->tree_root;
7302         struct btrfs_root_item *root_item = &root->root_item;
7303         struct walk_control *wc;
7304         struct btrfs_key key;
7305         int err = 0;
7306         int ret;
7307         int level;
7308
7309         path = btrfs_alloc_path();
7310         if (!path) {
7311                 err = -ENOMEM;
7312                 goto out;
7313         }
7314
7315         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7316         if (!wc) {
7317                 btrfs_free_path(path);
7318                 err = -ENOMEM;
7319                 goto out;
7320         }
7321
7322         trans = btrfs_start_transaction(tree_root, 0);
7323         if (IS_ERR(trans)) {
7324                 err = PTR_ERR(trans);
7325                 goto out_free;
7326         }
7327
7328         if (block_rsv)
7329                 trans->block_rsv = block_rsv;
7330
7331         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7332                 level = btrfs_header_level(root->node);
7333                 path->nodes[level] = btrfs_lock_root_node(root);
7334                 btrfs_set_lock_blocking(path->nodes[level]);
7335                 path->slots[level] = 0;
7336                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7337                 memset(&wc->update_progress, 0,
7338                        sizeof(wc->update_progress));
7339         } else {
7340                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7341                 memcpy(&wc->update_progress, &key,
7342                        sizeof(wc->update_progress));
7343
7344                 level = root_item->drop_level;
7345                 BUG_ON(level == 0);
7346                 path->lowest_level = level;
7347                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7348                 path->lowest_level = 0;
7349                 if (ret < 0) {
7350                         err = ret;
7351                         goto out_end_trans;
7352                 }
7353                 WARN_ON(ret > 0);
7354
7355                 /*
7356                  * unlock our path, this is safe because only this
7357                  * function is allowed to delete this snapshot
7358                  */
7359                 btrfs_unlock_up_safe(path, 0);
7360
7361                 level = btrfs_header_level(root->node);
7362                 while (1) {
7363                         btrfs_tree_lock(path->nodes[level]);
7364                         btrfs_set_lock_blocking(path->nodes[level]);
7365
7366                         ret = btrfs_lookup_extent_info(trans, root,
7367                                                 path->nodes[level]->start,
7368                                                 level, 1, &wc->refs[level],
7369                                                 &wc->flags[level]);
7370                         if (ret < 0) {
7371                                 err = ret;
7372                                 goto out_end_trans;
7373                         }
7374                         BUG_ON(wc->refs[level] == 0);
7375
7376                         if (level == root_item->drop_level)
7377                                 break;
7378
7379                         btrfs_tree_unlock(path->nodes[level]);
7380                         WARN_ON(wc->refs[level] != 1);
7381                         level--;
7382                 }
7383         }
7384
7385         wc->level = level;
7386         wc->shared_level = -1;
7387         wc->stage = DROP_REFERENCE;
7388         wc->update_ref = update_ref;
7389         wc->keep_locks = 0;
7390         wc->for_reloc = for_reloc;
7391         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7392
7393         while (1) {
7394                 if (!for_reloc && btrfs_fs_closing(root->fs_info)) {
7395                         pr_debug("btrfs: drop snapshot early exit\n");
7396                         err = -EAGAIN;
7397                         goto out_end_trans;
7398                 }
7399
7400                 ret = walk_down_tree(trans, root, path, wc);
7401                 if (ret < 0) {
7402                         err = ret;
7403                         break;
7404                 }
7405
7406                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7407                 if (ret < 0) {
7408                         err = ret;
7409                         break;
7410                 }
7411
7412                 if (ret > 0) {
7413                         BUG_ON(wc->stage != DROP_REFERENCE);
7414                         break;
7415                 }
7416
7417                 if (wc->stage == DROP_REFERENCE) {
7418                         level = wc->level;
7419                         btrfs_node_key(path->nodes[level],
7420                                        &root_item->drop_progress,
7421                                        path->slots[level]);
7422                         root_item->drop_level = level;
7423                 }
7424
7425                 BUG_ON(wc->level == 0);
7426                 if (btrfs_should_end_transaction(trans, tree_root)) {
7427                         ret = btrfs_update_root(trans, tree_root,
7428                                                 &root->root_key,
7429                                                 root_item);
7430                         if (ret) {
7431                                 btrfs_abort_transaction(trans, tree_root, ret);
7432                                 err = ret;
7433                                 goto out_end_trans;
7434                         }
7435
7436                         btrfs_end_transaction_throttle(trans, tree_root);
7437                         trans = btrfs_start_transaction(tree_root, 0);
7438                         if (IS_ERR(trans)) {
7439                                 err = PTR_ERR(trans);
7440                                 goto out_free;
7441                         }
7442                         if (block_rsv)
7443                                 trans->block_rsv = block_rsv;
7444                 }
7445         }
7446         btrfs_release_path(path);
7447         if (err)
7448                 goto out_end_trans;
7449
7450         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7451         if (ret) {
7452                 btrfs_abort_transaction(trans, tree_root, ret);
7453                 goto out_end_trans;
7454         }
7455
7456         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7457                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
7458                                            NULL, NULL);
7459                 if (ret < 0) {
7460                         btrfs_abort_transaction(trans, tree_root, ret);
7461                         err = ret;
7462                         goto out_end_trans;
7463                 } else if (ret > 0) {
7464                         /* if we fail to delete the orphan item this time
7465                          * around, it'll get picked up the next time.
7466                          *
7467                          * The most common failure here is just -ENOENT.
7468                          */
7469                         btrfs_del_orphan_item(trans, tree_root,
7470                                               root->root_key.objectid);
7471                 }
7472         }
7473
7474         if (root->in_radix) {
7475                 btrfs_free_fs_root(tree_root->fs_info, root);
7476         } else {
7477                 free_extent_buffer(root->node);
7478                 free_extent_buffer(root->commit_root);
7479                 kfree(root);
7480         }
7481 out_end_trans:
7482         btrfs_end_transaction_throttle(trans, tree_root);
7483 out_free:
7484         kfree(wc);
7485         btrfs_free_path(path);
7486 out:
7487         if (err)
7488                 btrfs_std_error(root->fs_info, err);
7489         return err;
7490 }
7491
7492 /*
7493  * drop subtree rooted at tree block 'node'.
7494  *
7495  * NOTE: this function will unlock and release tree block 'node'
7496  * only used by relocation code
7497  */
7498 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7499                         struct btrfs_root *root,
7500                         struct extent_buffer *node,
7501                         struct extent_buffer *parent)
7502 {
7503         struct btrfs_path *path;
7504         struct walk_control *wc;
7505         int level;
7506         int parent_level;
7507         int ret = 0;
7508         int wret;
7509
7510         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7511
7512         path = btrfs_alloc_path();
7513         if (!path)
7514                 return -ENOMEM;
7515
7516         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7517         if (!wc) {
7518                 btrfs_free_path(path);
7519                 return -ENOMEM;
7520         }
7521
7522         btrfs_assert_tree_locked(parent);
7523         parent_level = btrfs_header_level(parent);
7524         extent_buffer_get(parent);
7525         path->nodes[parent_level] = parent;
7526         path->slots[parent_level] = btrfs_header_nritems(parent);
7527
7528         btrfs_assert_tree_locked(node);
7529         level = btrfs_header_level(node);
7530         path->nodes[level] = node;
7531         path->slots[level] = 0;
7532         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7533
7534         wc->refs[parent_level] = 1;
7535         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7536         wc->level = level;
7537         wc->shared_level = -1;
7538         wc->stage = DROP_REFERENCE;
7539         wc->update_ref = 0;
7540         wc->keep_locks = 1;
7541         wc->for_reloc = 1;
7542         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7543
7544         while (1) {
7545                 wret = walk_down_tree(trans, root, path, wc);
7546                 if (wret < 0) {
7547                         ret = wret;
7548                         break;
7549                 }
7550
7551                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7552                 if (wret < 0)
7553                         ret = wret;
7554                 if (wret != 0)
7555                         break;
7556         }
7557
7558         kfree(wc);
7559         btrfs_free_path(path);
7560         return ret;
7561 }
7562
7563 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7564 {
7565         u64 num_devices;
7566         u64 stripped;
7567
7568         /*
7569          * if restripe for this chunk_type is on pick target profile and
7570          * return, otherwise do the usual balance
7571          */
7572         stripped = get_restripe_target(root->fs_info, flags);
7573         if (stripped)
7574                 return extended_to_chunk(stripped);
7575
7576         /*
7577          * we add in the count of missing devices because we want
7578          * to make sure that any RAID levels on a degraded FS
7579          * continue to be honored.
7580          */
7581         num_devices = root->fs_info->fs_devices->rw_devices +
7582                 root->fs_info->fs_devices->missing_devices;
7583
7584         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7585                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7586                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7587
7588         if (num_devices == 1) {
7589                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7590                 stripped = flags & ~stripped;
7591
7592                 /* turn raid0 into single device chunks */
7593                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7594                         return stripped;
7595
7596                 /* turn mirroring into duplication */
7597                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7598                              BTRFS_BLOCK_GROUP_RAID10))
7599                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7600         } else {
7601                 /* they already had raid on here, just return */
7602                 if (flags & stripped)
7603                         return flags;
7604
7605                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7606                 stripped = flags & ~stripped;
7607
7608                 /* switch duplicated blocks with raid1 */
7609                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7610                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7611
7612                 /* this is drive concat, leave it alone */
7613         }
7614
7615         return flags;
7616 }
7617
7618 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7619 {
7620         struct btrfs_space_info *sinfo = cache->space_info;
7621         u64 num_bytes;
7622         u64 min_allocable_bytes;
7623         int ret = -ENOSPC;
7624
7625
7626         /*
7627          * We need some metadata space and system metadata space for
7628          * allocating chunks in some corner cases until we force to set
7629          * it to be readonly.
7630          */
7631         if ((sinfo->flags &
7632              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7633             !force)
7634                 min_allocable_bytes = 1 * 1024 * 1024;
7635         else
7636                 min_allocable_bytes = 0;
7637
7638         spin_lock(&sinfo->lock);
7639         spin_lock(&cache->lock);
7640
7641         if (cache->ro) {
7642                 ret = 0;
7643                 goto out;
7644         }
7645
7646         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7647                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7648
7649         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7650             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7651             min_allocable_bytes <= sinfo->total_bytes) {
7652                 sinfo->bytes_readonly += num_bytes;
7653                 cache->ro = 1;
7654                 ret = 0;
7655         }
7656 out:
7657         spin_unlock(&cache->lock);
7658         spin_unlock(&sinfo->lock);
7659         return ret;
7660 }
7661
7662 int btrfs_set_block_group_ro(struct btrfs_root *root,
7663                              struct btrfs_block_group_cache *cache)
7664
7665 {
7666         struct btrfs_trans_handle *trans;
7667         u64 alloc_flags;
7668         int ret;
7669
7670         BUG_ON(cache->ro);
7671
7672         trans = btrfs_join_transaction(root);
7673         if (IS_ERR(trans))
7674                 return PTR_ERR(trans);
7675
7676         alloc_flags = update_block_group_flags(root, cache->flags);
7677         if (alloc_flags != cache->flags) {
7678                 ret = do_chunk_alloc(trans, root, alloc_flags,
7679                                      CHUNK_ALLOC_FORCE);
7680                 if (ret < 0)
7681                         goto out;
7682         }
7683
7684         ret = set_block_group_ro(cache, 0);
7685         if (!ret)
7686                 goto out;
7687         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7688         ret = do_chunk_alloc(trans, root, alloc_flags,
7689                              CHUNK_ALLOC_FORCE);
7690         if (ret < 0)
7691                 goto out;
7692         ret = set_block_group_ro(cache, 0);
7693 out:
7694         btrfs_end_transaction(trans, root);
7695         return ret;
7696 }
7697
7698 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7699                             struct btrfs_root *root, u64 type)
7700 {
7701         u64 alloc_flags = get_alloc_profile(root, type);
7702         return do_chunk_alloc(trans, root, alloc_flags,
7703                               CHUNK_ALLOC_FORCE);
7704 }
7705
7706 /*
7707  * helper to account the unused space of all the readonly block group in the
7708  * list. takes mirrors into account.
7709  */
7710 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7711 {
7712         struct btrfs_block_group_cache *block_group;
7713         u64 free_bytes = 0;
7714         int factor;
7715
7716         list_for_each_entry(block_group, groups_list, list) {
7717                 spin_lock(&block_group->lock);
7718
7719                 if (!block_group->ro) {
7720                         spin_unlock(&block_group->lock);
7721                         continue;
7722                 }
7723
7724                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7725                                           BTRFS_BLOCK_GROUP_RAID10 |
7726                                           BTRFS_BLOCK_GROUP_DUP))
7727                         factor = 2;
7728                 else
7729                         factor = 1;
7730
7731                 free_bytes += (block_group->key.offset -
7732                                btrfs_block_group_used(&block_group->item)) *
7733                                factor;
7734
7735                 spin_unlock(&block_group->lock);
7736         }
7737
7738         return free_bytes;
7739 }
7740
7741 /*
7742  * helper to account the unused space of all the readonly block group in the
7743  * space_info. takes mirrors into account.
7744  */
7745 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7746 {
7747         int i;
7748         u64 free_bytes = 0;
7749
7750         spin_lock(&sinfo->lock);
7751
7752         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7753                 if (!list_empty(&sinfo->block_groups[i]))
7754                         free_bytes += __btrfs_get_ro_block_group_free_space(
7755                                                 &sinfo->block_groups[i]);
7756
7757         spin_unlock(&sinfo->lock);
7758
7759         return free_bytes;
7760 }
7761
7762 void btrfs_set_block_group_rw(struct btrfs_root *root,
7763                               struct btrfs_block_group_cache *cache)
7764 {
7765         struct btrfs_space_info *sinfo = cache->space_info;
7766         u64 num_bytes;
7767
7768         BUG_ON(!cache->ro);
7769
7770         spin_lock(&sinfo->lock);
7771         spin_lock(&cache->lock);
7772         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7773                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7774         sinfo->bytes_readonly -= num_bytes;
7775         cache->ro = 0;
7776         spin_unlock(&cache->lock);
7777         spin_unlock(&sinfo->lock);
7778 }
7779
7780 /*
7781  * checks to see if its even possible to relocate this block group.
7782  *
7783  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7784  * ok to go ahead and try.
7785  */
7786 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7787 {
7788         struct btrfs_block_group_cache *block_group;
7789         struct btrfs_space_info *space_info;
7790         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7791         struct btrfs_device *device;
7792         u64 min_free;
7793         u64 dev_min = 1;
7794         u64 dev_nr = 0;
7795         u64 target;
7796         int index;
7797         int full = 0;
7798         int ret = 0;
7799
7800         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7801
7802         /* odd, couldn't find the block group, leave it alone */
7803         if (!block_group)
7804                 return -1;
7805
7806         min_free = btrfs_block_group_used(&block_group->item);
7807
7808         /* no bytes used, we're good */
7809         if (!min_free)
7810                 goto out;
7811
7812         space_info = block_group->space_info;
7813         spin_lock(&space_info->lock);
7814
7815         full = space_info->full;
7816
7817         /*
7818          * if this is the last block group we have in this space, we can't
7819          * relocate it unless we're able to allocate a new chunk below.
7820          *
7821          * Otherwise, we need to make sure we have room in the space to handle
7822          * all of the extents from this block group.  If we can, we're good
7823          */
7824         if ((space_info->total_bytes != block_group->key.offset) &&
7825             (space_info->bytes_used + space_info->bytes_reserved +
7826              space_info->bytes_pinned + space_info->bytes_readonly +
7827              min_free < space_info->total_bytes)) {
7828                 spin_unlock(&space_info->lock);
7829                 goto out;
7830         }
7831         spin_unlock(&space_info->lock);
7832
7833         /*
7834          * ok we don't have enough space, but maybe we have free space on our
7835          * devices to allocate new chunks for relocation, so loop through our
7836          * alloc devices and guess if we have enough space.  if this block
7837          * group is going to be restriped, run checks against the target
7838          * profile instead of the current one.
7839          */
7840         ret = -1;
7841
7842         /*
7843          * index:
7844          *      0: raid10
7845          *      1: raid1
7846          *      2: dup
7847          *      3: raid0
7848          *      4: single
7849          */
7850         target = get_restripe_target(root->fs_info, block_group->flags);
7851         if (target) {
7852                 index = __get_raid_index(extended_to_chunk(target));
7853         } else {
7854                 /*
7855                  * this is just a balance, so if we were marked as full
7856                  * we know there is no space for a new chunk
7857                  */
7858                 if (full)
7859                         goto out;
7860
7861                 index = get_block_group_index(block_group);
7862         }
7863
7864         if (index == BTRFS_RAID_RAID10) {
7865                 dev_min = 4;
7866                 /* Divide by 2 */
7867                 min_free >>= 1;
7868         } else if (index == BTRFS_RAID_RAID1) {
7869                 dev_min = 2;
7870         } else if (index == BTRFS_RAID_DUP) {
7871                 /* Multiply by 2 */
7872                 min_free <<= 1;
7873         } else if (index == BTRFS_RAID_RAID0) {
7874                 dev_min = fs_devices->rw_devices;
7875                 do_div(min_free, dev_min);
7876         }
7877
7878         mutex_lock(&root->fs_info->chunk_mutex);
7879         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7880                 u64 dev_offset;
7881
7882                 /*
7883                  * check to make sure we can actually find a chunk with enough
7884                  * space to fit our block group in.
7885                  */
7886                 if (device->total_bytes > device->bytes_used + min_free &&
7887                     !device->is_tgtdev_for_dev_replace) {
7888                         ret = find_free_dev_extent(device, min_free,
7889                                                    &dev_offset, NULL);
7890                         if (!ret)
7891                                 dev_nr++;
7892
7893                         if (dev_nr >= dev_min)
7894                                 break;
7895
7896                         ret = -1;
7897                 }
7898         }
7899         mutex_unlock(&root->fs_info->chunk_mutex);
7900 out:
7901         btrfs_put_block_group(block_group);
7902         return ret;
7903 }
7904
7905 static int find_first_block_group(struct btrfs_root *root,
7906                 struct btrfs_path *path, struct btrfs_key *key)
7907 {
7908         int ret = 0;
7909         struct btrfs_key found_key;
7910         struct extent_buffer *leaf;
7911         int slot;
7912
7913         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7914         if (ret < 0)
7915                 goto out;
7916
7917         while (1) {
7918                 slot = path->slots[0];
7919                 leaf = path->nodes[0];
7920                 if (slot >= btrfs_header_nritems(leaf)) {
7921                         ret = btrfs_next_leaf(root, path);
7922                         if (ret == 0)
7923                                 continue;
7924                         if (ret < 0)
7925                                 goto out;
7926                         break;
7927                 }
7928                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7929
7930                 if (found_key.objectid >= key->objectid &&
7931                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7932                         ret = 0;
7933                         goto out;
7934                 }
7935                 path->slots[0]++;
7936         }
7937 out:
7938         return ret;
7939 }
7940
7941 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7942 {
7943         struct btrfs_block_group_cache *block_group;
7944         u64 last = 0;
7945
7946         while (1) {
7947                 struct inode *inode;
7948
7949                 block_group = btrfs_lookup_first_block_group(info, last);
7950                 while (block_group) {
7951                         spin_lock(&block_group->lock);
7952                         if (block_group->iref)
7953                                 break;
7954                         spin_unlock(&block_group->lock);
7955                         block_group = next_block_group(info->tree_root,
7956                                                        block_group);
7957                 }
7958                 if (!block_group) {
7959                         if (last == 0)
7960                                 break;
7961                         last = 0;
7962                         continue;
7963                 }
7964
7965                 inode = block_group->inode;
7966                 block_group->iref = 0;
7967                 block_group->inode = NULL;
7968                 spin_unlock(&block_group->lock);
7969                 iput(inode);
7970                 last = block_group->key.objectid + block_group->key.offset;
7971                 btrfs_put_block_group(block_group);
7972         }
7973 }
7974
7975 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7976 {
7977         struct btrfs_block_group_cache *block_group;
7978         struct btrfs_space_info *space_info;
7979         struct btrfs_caching_control *caching_ctl;
7980         struct rb_node *n;
7981
7982         down_write(&info->extent_commit_sem);
7983         while (!list_empty(&info->caching_block_groups)) {
7984                 caching_ctl = list_entry(info->caching_block_groups.next,
7985                                          struct btrfs_caching_control, list);
7986                 list_del(&caching_ctl->list);
7987                 put_caching_control(caching_ctl);
7988         }
7989         up_write(&info->extent_commit_sem);
7990
7991         spin_lock(&info->block_group_cache_lock);
7992         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7993                 block_group = rb_entry(n, struct btrfs_block_group_cache,
7994                                        cache_node);
7995                 rb_erase(&block_group->cache_node,
7996                          &info->block_group_cache_tree);
7997                 spin_unlock(&info->block_group_cache_lock);
7998
7999                 down_write(&block_group->space_info->groups_sem);
8000                 list_del(&block_group->list);
8001                 up_write(&block_group->space_info->groups_sem);
8002
8003                 if (block_group->cached == BTRFS_CACHE_STARTED)
8004                         wait_block_group_cache_done(block_group);
8005
8006                 /*
8007                  * We haven't cached this block group, which means we could
8008                  * possibly have excluded extents on this block group.
8009                  */
8010                 if (block_group->cached == BTRFS_CACHE_NO)
8011                         free_excluded_extents(info->extent_root, block_group);
8012
8013                 btrfs_remove_free_space_cache(block_group);
8014                 btrfs_put_block_group(block_group);
8015
8016                 spin_lock(&info->block_group_cache_lock);
8017         }
8018         spin_unlock(&info->block_group_cache_lock);
8019
8020         /* now that all the block groups are freed, go through and
8021          * free all the space_info structs.  This is only called during
8022          * the final stages of unmount, and so we know nobody is
8023          * using them.  We call synchronize_rcu() once before we start,
8024          * just to be on the safe side.
8025          */
8026         synchronize_rcu();
8027
8028         release_global_block_rsv(info);
8029
8030         while(!list_empty(&info->space_info)) {
8031                 space_info = list_entry(info->space_info.next,
8032                                         struct btrfs_space_info,
8033                                         list);
8034                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8035                         if (space_info->bytes_pinned > 0 ||
8036                             space_info->bytes_reserved > 0 ||
8037                             space_info->bytes_may_use > 0) {
8038                                 WARN_ON(1);
8039                                 dump_space_info(space_info, 0, 0);
8040                         }
8041                 }
8042                 list_del(&space_info->list);
8043                 kfree(space_info);
8044         }
8045         return 0;
8046 }
8047
8048 static void __link_block_group(struct btrfs_space_info *space_info,
8049                                struct btrfs_block_group_cache *cache)
8050 {
8051         int index = get_block_group_index(cache);
8052
8053         down_write(&space_info->groups_sem);
8054         list_add_tail(&cache->list, &space_info->block_groups[index]);
8055         up_write(&space_info->groups_sem);
8056 }
8057
8058 int btrfs_read_block_groups(struct btrfs_root *root)
8059 {
8060         struct btrfs_path *path;
8061         int ret;
8062         struct btrfs_block_group_cache *cache;
8063         struct btrfs_fs_info *info = root->fs_info;
8064         struct btrfs_space_info *space_info;
8065         struct btrfs_key key;
8066         struct btrfs_key found_key;
8067         struct extent_buffer *leaf;
8068         int need_clear = 0;
8069         u64 cache_gen;
8070
8071         root = info->extent_root;
8072         key.objectid = 0;
8073         key.offset = 0;
8074         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8075         path = btrfs_alloc_path();
8076         if (!path)
8077                 return -ENOMEM;
8078         path->reada = 1;
8079
8080         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8081         if (btrfs_test_opt(root, SPACE_CACHE) &&
8082             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8083                 need_clear = 1;
8084         if (btrfs_test_opt(root, CLEAR_CACHE))
8085                 need_clear = 1;
8086
8087         while (1) {
8088                 ret = find_first_block_group(root, path, &key);
8089                 if (ret > 0)
8090                         break;
8091                 if (ret != 0)
8092                         goto error;
8093                 leaf = path->nodes[0];
8094                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8095                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8096                 if (!cache) {
8097                         ret = -ENOMEM;
8098                         goto error;
8099                 }
8100                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8101                                                 GFP_NOFS);
8102                 if (!cache->free_space_ctl) {
8103                         kfree(cache);
8104                         ret = -ENOMEM;
8105                         goto error;
8106                 }
8107
8108                 atomic_set(&cache->count, 1);
8109                 spin_lock_init(&cache->lock);
8110                 cache->fs_info = info;
8111                 INIT_LIST_HEAD(&cache->list);
8112                 INIT_LIST_HEAD(&cache->cluster_list);
8113
8114                 if (need_clear) {
8115                         /*
8116                          * When we mount with old space cache, we need to
8117                          * set BTRFS_DC_CLEAR and set dirty flag.
8118                          *
8119                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
8120                          *    truncate the old free space cache inode and
8121                          *    setup a new one.
8122                          * b) Setting 'dirty flag' makes sure that we flush
8123                          *    the new space cache info onto disk.
8124                          */
8125                         cache->disk_cache_state = BTRFS_DC_CLEAR;
8126                         if (btrfs_test_opt(root, SPACE_CACHE))
8127                                 cache->dirty = 1;
8128                 }
8129
8130                 read_extent_buffer(leaf, &cache->item,
8131                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
8132                                    sizeof(cache->item));
8133                 memcpy(&cache->key, &found_key, sizeof(found_key));
8134
8135                 key.objectid = found_key.objectid + found_key.offset;
8136                 btrfs_release_path(path);
8137                 cache->flags = btrfs_block_group_flags(&cache->item);
8138                 cache->sectorsize = root->sectorsize;
8139                 cache->full_stripe_len = btrfs_full_stripe_len(root,
8140                                                &root->fs_info->mapping_tree,
8141                                                found_key.objectid);
8142                 btrfs_init_free_space_ctl(cache);
8143
8144                 /*
8145                  * We need to exclude the super stripes now so that the space
8146                  * info has super bytes accounted for, otherwise we'll think
8147                  * we have more space than we actually do.
8148                  */
8149                 ret = exclude_super_stripes(root, cache);
8150                 if (ret) {
8151                         /*
8152                          * We may have excluded something, so call this just in
8153                          * case.
8154                          */
8155                         free_excluded_extents(root, cache);
8156                         kfree(cache->free_space_ctl);
8157                         kfree(cache);
8158                         goto error;
8159                 }
8160
8161                 /*
8162                  * check for two cases, either we are full, and therefore
8163                  * don't need to bother with the caching work since we won't
8164                  * find any space, or we are empty, and we can just add all
8165                  * the space in and be done with it.  This saves us _alot_ of
8166                  * time, particularly in the full case.
8167                  */
8168                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8169                         cache->last_byte_to_unpin = (u64)-1;
8170                         cache->cached = BTRFS_CACHE_FINISHED;
8171                         free_excluded_extents(root, cache);
8172                 } else if (btrfs_block_group_used(&cache->item) == 0) {
8173                         cache->last_byte_to_unpin = (u64)-1;
8174                         cache->cached = BTRFS_CACHE_FINISHED;
8175                         add_new_free_space(cache, root->fs_info,
8176                                            found_key.objectid,
8177                                            found_key.objectid +
8178                                            found_key.offset);
8179                         free_excluded_extents(root, cache);
8180                 }
8181
8182                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8183                 if (ret) {
8184                         btrfs_remove_free_space_cache(cache);
8185                         btrfs_put_block_group(cache);
8186                         goto error;
8187                 }
8188
8189                 ret = update_space_info(info, cache->flags, found_key.offset,
8190                                         btrfs_block_group_used(&cache->item),
8191                                         &space_info);
8192                 if (ret) {
8193                         btrfs_remove_free_space_cache(cache);
8194                         spin_lock(&info->block_group_cache_lock);
8195                         rb_erase(&cache->cache_node,
8196                                  &info->block_group_cache_tree);
8197                         spin_unlock(&info->block_group_cache_lock);
8198                         btrfs_put_block_group(cache);
8199                         goto error;
8200                 }
8201
8202                 cache->space_info = space_info;
8203                 spin_lock(&cache->space_info->lock);
8204                 cache->space_info->bytes_readonly += cache->bytes_super;
8205                 spin_unlock(&cache->space_info->lock);
8206
8207                 __link_block_group(space_info, cache);
8208
8209                 set_avail_alloc_bits(root->fs_info, cache->flags);
8210                 if (btrfs_chunk_readonly(root, cache->key.objectid))
8211                         set_block_group_ro(cache, 1);
8212         }
8213
8214         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8215                 if (!(get_alloc_profile(root, space_info->flags) &
8216                       (BTRFS_BLOCK_GROUP_RAID10 |
8217                        BTRFS_BLOCK_GROUP_RAID1 |
8218                        BTRFS_BLOCK_GROUP_RAID5 |
8219                        BTRFS_BLOCK_GROUP_RAID6 |
8220                        BTRFS_BLOCK_GROUP_DUP)))
8221                         continue;
8222                 /*
8223                  * avoid allocating from un-mirrored block group if there are
8224                  * mirrored block groups.
8225                  */
8226                 list_for_each_entry(cache, &space_info->block_groups[3], list)
8227                         set_block_group_ro(cache, 1);
8228                 list_for_each_entry(cache, &space_info->block_groups[4], list)
8229                         set_block_group_ro(cache, 1);
8230         }
8231
8232         init_global_block_rsv(info);
8233         ret = 0;
8234 error:
8235         btrfs_free_path(path);
8236         return ret;
8237 }
8238
8239 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8240                                        struct btrfs_root *root)
8241 {
8242         struct btrfs_block_group_cache *block_group, *tmp;
8243         struct btrfs_root *extent_root = root->fs_info->extent_root;
8244         struct btrfs_block_group_item item;
8245         struct btrfs_key key;
8246         int ret = 0;
8247
8248         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8249                                  new_bg_list) {
8250                 list_del_init(&block_group->new_bg_list);
8251
8252                 if (ret)
8253                         continue;
8254
8255                 spin_lock(&block_group->lock);
8256                 memcpy(&item, &block_group->item, sizeof(item));
8257                 memcpy(&key, &block_group->key, sizeof(key));
8258                 spin_unlock(&block_group->lock);
8259
8260                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8261                                         sizeof(item));
8262                 if (ret)
8263                         btrfs_abort_transaction(trans, extent_root, ret);
8264         }
8265 }
8266
8267 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8268                            struct btrfs_root *root, u64 bytes_used,
8269                            u64 type, u64 chunk_objectid, u64 chunk_offset,
8270                            u64 size)
8271 {
8272         int ret;
8273         struct btrfs_root *extent_root;
8274         struct btrfs_block_group_cache *cache;
8275
8276         extent_root = root->fs_info->extent_root;
8277
8278         root->fs_info->last_trans_log_full_commit = trans->transid;
8279
8280         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8281         if (!cache)
8282                 return -ENOMEM;
8283         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8284                                         GFP_NOFS);
8285         if (!cache->free_space_ctl) {
8286                 kfree(cache);
8287                 return -ENOMEM;
8288         }
8289
8290         cache->key.objectid = chunk_offset;
8291         cache->key.offset = size;
8292         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8293         cache->sectorsize = root->sectorsize;
8294         cache->fs_info = root->fs_info;
8295         cache->full_stripe_len = btrfs_full_stripe_len(root,
8296                                                &root->fs_info->mapping_tree,
8297                                                chunk_offset);
8298
8299         atomic_set(&cache->count, 1);
8300         spin_lock_init(&cache->lock);
8301         INIT_LIST_HEAD(&cache->list);
8302         INIT_LIST_HEAD(&cache->cluster_list);
8303         INIT_LIST_HEAD(&cache->new_bg_list);
8304
8305         btrfs_init_free_space_ctl(cache);
8306
8307         btrfs_set_block_group_used(&cache->item, bytes_used);
8308         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8309         cache->flags = type;
8310         btrfs_set_block_group_flags(&cache->item, type);
8311
8312         cache->last_byte_to_unpin = (u64)-1;
8313         cache->cached = BTRFS_CACHE_FINISHED;
8314         ret = exclude_super_stripes(root, cache);
8315         if (ret) {
8316                 /*
8317                  * We may have excluded something, so call this just in
8318                  * case.
8319                  */
8320                 free_excluded_extents(root, cache);
8321                 kfree(cache->free_space_ctl);
8322                 kfree(cache);
8323                 return ret;
8324         }
8325
8326         add_new_free_space(cache, root->fs_info, chunk_offset,
8327                            chunk_offset + size);
8328
8329         free_excluded_extents(root, cache);
8330
8331         ret = btrfs_add_block_group_cache(root->fs_info, cache);
8332         if (ret) {
8333                 btrfs_remove_free_space_cache(cache);
8334                 btrfs_put_block_group(cache);
8335                 return ret;
8336         }
8337
8338         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8339                                 &cache->space_info);
8340         if (ret) {
8341                 btrfs_remove_free_space_cache(cache);
8342                 spin_lock(&root->fs_info->block_group_cache_lock);
8343                 rb_erase(&cache->cache_node,
8344                          &root->fs_info->block_group_cache_tree);
8345                 spin_unlock(&root->fs_info->block_group_cache_lock);
8346                 btrfs_put_block_group(cache);
8347                 return ret;
8348         }
8349         update_global_block_rsv(root->fs_info);
8350
8351         spin_lock(&cache->space_info->lock);
8352         cache->space_info->bytes_readonly += cache->bytes_super;
8353         spin_unlock(&cache->space_info->lock);
8354
8355         __link_block_group(cache->space_info, cache);
8356
8357         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8358
8359         set_avail_alloc_bits(extent_root->fs_info, type);
8360
8361         return 0;
8362 }
8363
8364 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8365 {
8366         u64 extra_flags = chunk_to_extended(flags) &
8367                                 BTRFS_EXTENDED_PROFILE_MASK;
8368
8369         write_seqlock(&fs_info->profiles_lock);
8370         if (flags & BTRFS_BLOCK_GROUP_DATA)
8371                 fs_info->avail_data_alloc_bits &= ~extra_flags;
8372         if (flags & BTRFS_BLOCK_GROUP_METADATA)
8373                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8374         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8375                 fs_info->avail_system_alloc_bits &= ~extra_flags;
8376         write_sequnlock(&fs_info->profiles_lock);
8377 }
8378
8379 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8380                              struct btrfs_root *root, u64 group_start)
8381 {
8382         struct btrfs_path *path;
8383         struct btrfs_block_group_cache *block_group;
8384         struct btrfs_free_cluster *cluster;
8385         struct btrfs_root *tree_root = root->fs_info->tree_root;
8386         struct btrfs_key key;
8387         struct inode *inode;
8388         int ret;
8389         int index;
8390         int factor;
8391
8392         root = root->fs_info->extent_root;
8393
8394         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8395         BUG_ON(!block_group);
8396         BUG_ON(!block_group->ro);
8397
8398         /*
8399          * Free the reserved super bytes from this block group before
8400          * remove it.
8401          */
8402         free_excluded_extents(root, block_group);
8403
8404         memcpy(&key, &block_group->key, sizeof(key));
8405         index = get_block_group_index(block_group);
8406         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8407                                   BTRFS_BLOCK_GROUP_RAID1 |
8408                                   BTRFS_BLOCK_GROUP_RAID10))
8409                 factor = 2;
8410         else
8411                 factor = 1;
8412
8413         /* make sure this block group isn't part of an allocation cluster */
8414         cluster = &root->fs_info->data_alloc_cluster;
8415         spin_lock(&cluster->refill_lock);
8416         btrfs_return_cluster_to_free_space(block_group, cluster);
8417         spin_unlock(&cluster->refill_lock);
8418
8419         /*
8420          * make sure this block group isn't part of a metadata
8421          * allocation cluster
8422          */
8423         cluster = &root->fs_info->meta_alloc_cluster;
8424         spin_lock(&cluster->refill_lock);
8425         btrfs_return_cluster_to_free_space(block_group, cluster);
8426         spin_unlock(&cluster->refill_lock);
8427
8428         path = btrfs_alloc_path();
8429         if (!path) {
8430                 ret = -ENOMEM;
8431                 goto out;
8432         }
8433
8434         inode = lookup_free_space_inode(tree_root, block_group, path);
8435         if (!IS_ERR(inode)) {
8436                 ret = btrfs_orphan_add(trans, inode);
8437                 if (ret) {
8438                         btrfs_add_delayed_iput(inode);
8439                         goto out;
8440                 }
8441                 clear_nlink(inode);
8442                 /* One for the block groups ref */
8443                 spin_lock(&block_group->lock);
8444                 if (block_group->iref) {
8445                         block_group->iref = 0;
8446                         block_group->inode = NULL;
8447                         spin_unlock(&block_group->lock);
8448                         iput(inode);
8449                 } else {
8450                         spin_unlock(&block_group->lock);
8451                 }
8452                 /* One for our lookup ref */
8453                 btrfs_add_delayed_iput(inode);
8454         }
8455
8456         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8457         key.offset = block_group->key.objectid;
8458         key.type = 0;
8459
8460         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8461         if (ret < 0)
8462                 goto out;
8463         if (ret > 0)
8464                 btrfs_release_path(path);
8465         if (ret == 0) {
8466                 ret = btrfs_del_item(trans, tree_root, path);
8467                 if (ret)
8468                         goto out;
8469                 btrfs_release_path(path);
8470         }
8471
8472         spin_lock(&root->fs_info->block_group_cache_lock);
8473         rb_erase(&block_group->cache_node,
8474                  &root->fs_info->block_group_cache_tree);
8475
8476         if (root->fs_info->first_logical_byte == block_group->key.objectid)
8477                 root->fs_info->first_logical_byte = (u64)-1;
8478         spin_unlock(&root->fs_info->block_group_cache_lock);
8479
8480         down_write(&block_group->space_info->groups_sem);
8481         /*
8482          * we must use list_del_init so people can check to see if they
8483          * are still on the list after taking the semaphore
8484          */
8485         list_del_init(&block_group->list);
8486         if (list_empty(&block_group->space_info->block_groups[index]))
8487                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8488         up_write(&block_group->space_info->groups_sem);
8489
8490         if (block_group->cached == BTRFS_CACHE_STARTED)
8491                 wait_block_group_cache_done(block_group);
8492
8493         btrfs_remove_free_space_cache(block_group);
8494
8495         spin_lock(&block_group->space_info->lock);
8496         block_group->space_info->total_bytes -= block_group->key.offset;
8497         block_group->space_info->bytes_readonly -= block_group->key.offset;
8498         block_group->space_info->disk_total -= block_group->key.offset * factor;
8499         spin_unlock(&block_group->space_info->lock);
8500
8501         memcpy(&key, &block_group->key, sizeof(key));
8502
8503         btrfs_clear_space_info_full(root->fs_info);
8504
8505         btrfs_put_block_group(block_group);
8506         btrfs_put_block_group(block_group);
8507
8508         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8509         if (ret > 0)
8510                 ret = -EIO;
8511         if (ret < 0)
8512                 goto out;
8513
8514         ret = btrfs_del_item(trans, root, path);
8515 out:
8516         btrfs_free_path(path);
8517         return ret;
8518 }
8519
8520 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8521 {
8522         struct btrfs_space_info *space_info;
8523         struct btrfs_super_block *disk_super;
8524         u64 features;
8525         u64 flags;
8526         int mixed = 0;
8527         int ret;
8528
8529         disk_super = fs_info->super_copy;
8530         if (!btrfs_super_root(disk_super))
8531                 return 1;
8532
8533         features = btrfs_super_incompat_flags(disk_super);
8534         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8535                 mixed = 1;
8536
8537         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8538         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8539         if (ret)
8540                 goto out;
8541
8542         if (mixed) {
8543                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8544                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8545         } else {
8546                 flags = BTRFS_BLOCK_GROUP_METADATA;
8547                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8548                 if (ret)
8549                         goto out;
8550
8551                 flags = BTRFS_BLOCK_GROUP_DATA;
8552                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8553         }
8554 out:
8555         return ret;
8556 }
8557
8558 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8559 {
8560         return unpin_extent_range(root, start, end);
8561 }
8562
8563 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8564                                u64 num_bytes, u64 *actual_bytes)
8565 {
8566         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8567 }
8568
8569 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8570 {
8571         struct btrfs_fs_info *fs_info = root->fs_info;
8572         struct btrfs_block_group_cache *cache = NULL;
8573         u64 group_trimmed;
8574         u64 start;
8575         u64 end;
8576         u64 trimmed = 0;
8577         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8578         int ret = 0;
8579
8580         /*
8581          * try to trim all FS space, our block group may start from non-zero.
8582          */
8583         if (range->len == total_bytes)
8584                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8585         else
8586                 cache = btrfs_lookup_block_group(fs_info, range->start);
8587
8588         while (cache) {
8589                 if (cache->key.objectid >= (range->start + range->len)) {
8590                         btrfs_put_block_group(cache);
8591                         break;
8592                 }
8593
8594                 start = max(range->start, cache->key.objectid);
8595                 end = min(range->start + range->len,
8596                                 cache->key.objectid + cache->key.offset);
8597
8598                 if (end - start >= range->minlen) {
8599                         if (!block_group_cache_done(cache)) {
8600                                 ret = cache_block_group(cache, 0);
8601                                 if (!ret)
8602                                         wait_block_group_cache_done(cache);
8603                         }
8604                         ret = btrfs_trim_block_group(cache,
8605                                                      &group_trimmed,
8606                                                      start,
8607                                                      end,
8608                                                      range->minlen);
8609
8610                         trimmed += group_trimmed;
8611                         if (ret) {
8612                                 btrfs_put_block_group(cache);
8613                                 break;
8614                         }
8615                 }
8616
8617                 cache = next_block_group(fs_info->tree_root, cache);
8618         }
8619
8620         range->len = trimmed;
8621         return ret;
8622 }