Merge branches 'pm-cpufreq', 'pm-cpuidle', 'pm-devfreq', 'pm-opp' and 'pm-tools'
[linux-drm-fsl-dcu.git] / fs / btrfs / extent_io.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/spinlock.h>
8 #include <linux/blkdev.h>
9 #include <linux/swap.h>
10 #include <linux/writeback.h>
11 #include <linux/pagevec.h>
12 #include <linux/prefetch.h>
13 #include <linux/cleancache.h>
14 #include "extent_io.h"
15 #include "extent_map.h"
16 #include "ctree.h"
17 #include "btrfs_inode.h"
18 #include "volumes.h"
19 #include "check-integrity.h"
20 #include "locking.h"
21 #include "rcu-string.h"
22 #include "backref.h"
23
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
26 static struct bio_set *btrfs_bioset;
27
28 static inline bool extent_state_in_tree(const struct extent_state *state)
29 {
30         return !RB_EMPTY_NODE(&state->rb_node);
31 }
32
33 #ifdef CONFIG_BTRFS_DEBUG
34 static LIST_HEAD(buffers);
35 static LIST_HEAD(states);
36
37 static DEFINE_SPINLOCK(leak_lock);
38
39 static inline
40 void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
41 {
42         unsigned long flags;
43
44         spin_lock_irqsave(&leak_lock, flags);
45         list_add(new, head);
46         spin_unlock_irqrestore(&leak_lock, flags);
47 }
48
49 static inline
50 void btrfs_leak_debug_del(struct list_head *entry)
51 {
52         unsigned long flags;
53
54         spin_lock_irqsave(&leak_lock, flags);
55         list_del(entry);
56         spin_unlock_irqrestore(&leak_lock, flags);
57 }
58
59 static inline
60 void btrfs_leak_debug_check(void)
61 {
62         struct extent_state *state;
63         struct extent_buffer *eb;
64
65         while (!list_empty(&states)) {
66                 state = list_entry(states.next, struct extent_state, leak_list);
67                 pr_err("BTRFS: state leak: start %llu end %llu state %lu in tree %d refs %d\n",
68                        state->start, state->end, state->state,
69                        extent_state_in_tree(state),
70                        atomic_read(&state->refs));
71                 list_del(&state->leak_list);
72                 kmem_cache_free(extent_state_cache, state);
73         }
74
75         while (!list_empty(&buffers)) {
76                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
77                 printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
78                        "refs %d\n",
79                        eb->start, eb->len, atomic_read(&eb->refs));
80                 list_del(&eb->leak_list);
81                 kmem_cache_free(extent_buffer_cache, eb);
82         }
83 }
84
85 #define btrfs_debug_check_extent_io_range(tree, start, end)             \
86         __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
87 static inline void __btrfs_debug_check_extent_io_range(const char *caller,
88                 struct extent_io_tree *tree, u64 start, u64 end)
89 {
90         struct inode *inode;
91         u64 isize;
92
93         if (!tree->mapping)
94                 return;
95
96         inode = tree->mapping->host;
97         isize = i_size_read(inode);
98         if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
99                 printk_ratelimited(KERN_DEBUG
100                     "BTRFS: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
101                                 caller, btrfs_ino(inode), isize, start, end);
102         }
103 }
104 #else
105 #define btrfs_leak_debug_add(new, head) do {} while (0)
106 #define btrfs_leak_debug_del(entry)     do {} while (0)
107 #define btrfs_leak_debug_check()        do {} while (0)
108 #define btrfs_debug_check_extent_io_range(c, s, e)      do {} while (0)
109 #endif
110
111 #define BUFFER_LRU_MAX 64
112
113 struct tree_entry {
114         u64 start;
115         u64 end;
116         struct rb_node rb_node;
117 };
118
119 struct extent_page_data {
120         struct bio *bio;
121         struct extent_io_tree *tree;
122         get_extent_t *get_extent;
123         unsigned long bio_flags;
124
125         /* tells writepage not to lock the state bits for this range
126          * it still does the unlocking
127          */
128         unsigned int extent_locked:1;
129
130         /* tells the submit_bio code to use a WRITE_SYNC */
131         unsigned int sync_io:1;
132 };
133
134 static noinline void flush_write_bio(void *data);
135 static inline struct btrfs_fs_info *
136 tree_fs_info(struct extent_io_tree *tree)
137 {
138         if (!tree->mapping)
139                 return NULL;
140         return btrfs_sb(tree->mapping->host->i_sb);
141 }
142
143 int __init extent_io_init(void)
144 {
145         extent_state_cache = kmem_cache_create("btrfs_extent_state",
146                         sizeof(struct extent_state), 0,
147                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
148         if (!extent_state_cache)
149                 return -ENOMEM;
150
151         extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
152                         sizeof(struct extent_buffer), 0,
153                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
154         if (!extent_buffer_cache)
155                 goto free_state_cache;
156
157         btrfs_bioset = bioset_create(BIO_POOL_SIZE,
158                                      offsetof(struct btrfs_io_bio, bio));
159         if (!btrfs_bioset)
160                 goto free_buffer_cache;
161
162         if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
163                 goto free_bioset;
164
165         return 0;
166
167 free_bioset:
168         bioset_free(btrfs_bioset);
169         btrfs_bioset = NULL;
170
171 free_buffer_cache:
172         kmem_cache_destroy(extent_buffer_cache);
173         extent_buffer_cache = NULL;
174
175 free_state_cache:
176         kmem_cache_destroy(extent_state_cache);
177         extent_state_cache = NULL;
178         return -ENOMEM;
179 }
180
181 void extent_io_exit(void)
182 {
183         btrfs_leak_debug_check();
184
185         /*
186          * Make sure all delayed rcu free are flushed before we
187          * destroy caches.
188          */
189         rcu_barrier();
190         if (extent_state_cache)
191                 kmem_cache_destroy(extent_state_cache);
192         if (extent_buffer_cache)
193                 kmem_cache_destroy(extent_buffer_cache);
194         if (btrfs_bioset)
195                 bioset_free(btrfs_bioset);
196 }
197
198 void extent_io_tree_init(struct extent_io_tree *tree,
199                          struct address_space *mapping)
200 {
201         tree->state = RB_ROOT;
202         tree->ops = NULL;
203         tree->dirty_bytes = 0;
204         spin_lock_init(&tree->lock);
205         tree->mapping = mapping;
206 }
207
208 static struct extent_state *alloc_extent_state(gfp_t mask)
209 {
210         struct extent_state *state;
211
212         state = kmem_cache_alloc(extent_state_cache, mask);
213         if (!state)
214                 return state;
215         state->state = 0;
216         state->private = 0;
217         RB_CLEAR_NODE(&state->rb_node);
218         btrfs_leak_debug_add(&state->leak_list, &states);
219         atomic_set(&state->refs, 1);
220         init_waitqueue_head(&state->wq);
221         trace_alloc_extent_state(state, mask, _RET_IP_);
222         return state;
223 }
224
225 void free_extent_state(struct extent_state *state)
226 {
227         if (!state)
228                 return;
229         if (atomic_dec_and_test(&state->refs)) {
230                 WARN_ON(extent_state_in_tree(state));
231                 btrfs_leak_debug_del(&state->leak_list);
232                 trace_free_extent_state(state, _RET_IP_);
233                 kmem_cache_free(extent_state_cache, state);
234         }
235 }
236
237 static struct rb_node *tree_insert(struct rb_root *root,
238                                    struct rb_node *search_start,
239                                    u64 offset,
240                                    struct rb_node *node,
241                                    struct rb_node ***p_in,
242                                    struct rb_node **parent_in)
243 {
244         struct rb_node **p;
245         struct rb_node *parent = NULL;
246         struct tree_entry *entry;
247
248         if (p_in && parent_in) {
249                 p = *p_in;
250                 parent = *parent_in;
251                 goto do_insert;
252         }
253
254         p = search_start ? &search_start : &root->rb_node;
255         while (*p) {
256                 parent = *p;
257                 entry = rb_entry(parent, struct tree_entry, rb_node);
258
259                 if (offset < entry->start)
260                         p = &(*p)->rb_left;
261                 else if (offset > entry->end)
262                         p = &(*p)->rb_right;
263                 else
264                         return parent;
265         }
266
267 do_insert:
268         rb_link_node(node, parent, p);
269         rb_insert_color(node, root);
270         return NULL;
271 }
272
273 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
274                                       struct rb_node **prev_ret,
275                                       struct rb_node **next_ret,
276                                       struct rb_node ***p_ret,
277                                       struct rb_node **parent_ret)
278 {
279         struct rb_root *root = &tree->state;
280         struct rb_node **n = &root->rb_node;
281         struct rb_node *prev = NULL;
282         struct rb_node *orig_prev = NULL;
283         struct tree_entry *entry;
284         struct tree_entry *prev_entry = NULL;
285
286         while (*n) {
287                 prev = *n;
288                 entry = rb_entry(prev, struct tree_entry, rb_node);
289                 prev_entry = entry;
290
291                 if (offset < entry->start)
292                         n = &(*n)->rb_left;
293                 else if (offset > entry->end)
294                         n = &(*n)->rb_right;
295                 else
296                         return *n;
297         }
298
299         if (p_ret)
300                 *p_ret = n;
301         if (parent_ret)
302                 *parent_ret = prev;
303
304         if (prev_ret) {
305                 orig_prev = prev;
306                 while (prev && offset > prev_entry->end) {
307                         prev = rb_next(prev);
308                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
309                 }
310                 *prev_ret = prev;
311                 prev = orig_prev;
312         }
313
314         if (next_ret) {
315                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
316                 while (prev && offset < prev_entry->start) {
317                         prev = rb_prev(prev);
318                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
319                 }
320                 *next_ret = prev;
321         }
322         return NULL;
323 }
324
325 static inline struct rb_node *
326 tree_search_for_insert(struct extent_io_tree *tree,
327                        u64 offset,
328                        struct rb_node ***p_ret,
329                        struct rb_node **parent_ret)
330 {
331         struct rb_node *prev = NULL;
332         struct rb_node *ret;
333
334         ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
335         if (!ret)
336                 return prev;
337         return ret;
338 }
339
340 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
341                                           u64 offset)
342 {
343         return tree_search_for_insert(tree, offset, NULL, NULL);
344 }
345
346 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
347                      struct extent_state *other)
348 {
349         if (tree->ops && tree->ops->merge_extent_hook)
350                 tree->ops->merge_extent_hook(tree->mapping->host, new,
351                                              other);
352 }
353
354 /*
355  * utility function to look for merge candidates inside a given range.
356  * Any extents with matching state are merged together into a single
357  * extent in the tree.  Extents with EXTENT_IO in their state field
358  * are not merged because the end_io handlers need to be able to do
359  * operations on them without sleeping (or doing allocations/splits).
360  *
361  * This should be called with the tree lock held.
362  */
363 static void merge_state(struct extent_io_tree *tree,
364                         struct extent_state *state)
365 {
366         struct extent_state *other;
367         struct rb_node *other_node;
368
369         if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
370                 return;
371
372         other_node = rb_prev(&state->rb_node);
373         if (other_node) {
374                 other = rb_entry(other_node, struct extent_state, rb_node);
375                 if (other->end == state->start - 1 &&
376                     other->state == state->state) {
377                         merge_cb(tree, state, other);
378                         state->start = other->start;
379                         rb_erase(&other->rb_node, &tree->state);
380                         RB_CLEAR_NODE(&other->rb_node);
381                         free_extent_state(other);
382                 }
383         }
384         other_node = rb_next(&state->rb_node);
385         if (other_node) {
386                 other = rb_entry(other_node, struct extent_state, rb_node);
387                 if (other->start == state->end + 1 &&
388                     other->state == state->state) {
389                         merge_cb(tree, state, other);
390                         state->end = other->end;
391                         rb_erase(&other->rb_node, &tree->state);
392                         RB_CLEAR_NODE(&other->rb_node);
393                         free_extent_state(other);
394                 }
395         }
396 }
397
398 static void set_state_cb(struct extent_io_tree *tree,
399                          struct extent_state *state, unsigned long *bits)
400 {
401         if (tree->ops && tree->ops->set_bit_hook)
402                 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
403 }
404
405 static void clear_state_cb(struct extent_io_tree *tree,
406                            struct extent_state *state, unsigned long *bits)
407 {
408         if (tree->ops && tree->ops->clear_bit_hook)
409                 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
410 }
411
412 static void set_state_bits(struct extent_io_tree *tree,
413                            struct extent_state *state, unsigned long *bits);
414
415 /*
416  * insert an extent_state struct into the tree.  'bits' are set on the
417  * struct before it is inserted.
418  *
419  * This may return -EEXIST if the extent is already there, in which case the
420  * state struct is freed.
421  *
422  * The tree lock is not taken internally.  This is a utility function and
423  * probably isn't what you want to call (see set/clear_extent_bit).
424  */
425 static int insert_state(struct extent_io_tree *tree,
426                         struct extent_state *state, u64 start, u64 end,
427                         struct rb_node ***p,
428                         struct rb_node **parent,
429                         unsigned long *bits)
430 {
431         struct rb_node *node;
432
433         if (end < start)
434                 WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
435                        end, start);
436         state->start = start;
437         state->end = end;
438
439         set_state_bits(tree, state, bits);
440
441         node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
442         if (node) {
443                 struct extent_state *found;
444                 found = rb_entry(node, struct extent_state, rb_node);
445                 printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
446                        "%llu %llu\n",
447                        found->start, found->end, start, end);
448                 return -EEXIST;
449         }
450         merge_state(tree, state);
451         return 0;
452 }
453
454 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
455                      u64 split)
456 {
457         if (tree->ops && tree->ops->split_extent_hook)
458                 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
459 }
460
461 /*
462  * split a given extent state struct in two, inserting the preallocated
463  * struct 'prealloc' as the newly created second half.  'split' indicates an
464  * offset inside 'orig' where it should be split.
465  *
466  * Before calling,
467  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
468  * are two extent state structs in the tree:
469  * prealloc: [orig->start, split - 1]
470  * orig: [ split, orig->end ]
471  *
472  * The tree locks are not taken by this function. They need to be held
473  * by the caller.
474  */
475 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
476                        struct extent_state *prealloc, u64 split)
477 {
478         struct rb_node *node;
479
480         split_cb(tree, orig, split);
481
482         prealloc->start = orig->start;
483         prealloc->end = split - 1;
484         prealloc->state = orig->state;
485         orig->start = split;
486
487         node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
488                            &prealloc->rb_node, NULL, NULL);
489         if (node) {
490                 free_extent_state(prealloc);
491                 return -EEXIST;
492         }
493         return 0;
494 }
495
496 static struct extent_state *next_state(struct extent_state *state)
497 {
498         struct rb_node *next = rb_next(&state->rb_node);
499         if (next)
500                 return rb_entry(next, struct extent_state, rb_node);
501         else
502                 return NULL;
503 }
504
505 /*
506  * utility function to clear some bits in an extent state struct.
507  * it will optionally wake up any one waiting on this state (wake == 1).
508  *
509  * If no bits are set on the state struct after clearing things, the
510  * struct is freed and removed from the tree
511  */
512 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
513                                             struct extent_state *state,
514                                             unsigned long *bits, int wake)
515 {
516         struct extent_state *next;
517         unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS;
518
519         if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
520                 u64 range = state->end - state->start + 1;
521                 WARN_ON(range > tree->dirty_bytes);
522                 tree->dirty_bytes -= range;
523         }
524         clear_state_cb(tree, state, bits);
525         state->state &= ~bits_to_clear;
526         if (wake)
527                 wake_up(&state->wq);
528         if (state->state == 0) {
529                 next = next_state(state);
530                 if (extent_state_in_tree(state)) {
531                         rb_erase(&state->rb_node, &tree->state);
532                         RB_CLEAR_NODE(&state->rb_node);
533                         free_extent_state(state);
534                 } else {
535                         WARN_ON(1);
536                 }
537         } else {
538                 merge_state(tree, state);
539                 next = next_state(state);
540         }
541         return next;
542 }
543
544 static struct extent_state *
545 alloc_extent_state_atomic(struct extent_state *prealloc)
546 {
547         if (!prealloc)
548                 prealloc = alloc_extent_state(GFP_ATOMIC);
549
550         return prealloc;
551 }
552
553 static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
554 {
555         btrfs_panic(tree_fs_info(tree), err, "Locking error: "
556                     "Extent tree was modified by another "
557                     "thread while locked.");
558 }
559
560 /*
561  * clear some bits on a range in the tree.  This may require splitting
562  * or inserting elements in the tree, so the gfp mask is used to
563  * indicate which allocations or sleeping are allowed.
564  *
565  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
566  * the given range from the tree regardless of state (ie for truncate).
567  *
568  * the range [start, end] is inclusive.
569  *
570  * This takes the tree lock, and returns 0 on success and < 0 on error.
571  */
572 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
573                      unsigned long bits, int wake, int delete,
574                      struct extent_state **cached_state,
575                      gfp_t mask)
576 {
577         struct extent_state *state;
578         struct extent_state *cached;
579         struct extent_state *prealloc = NULL;
580         struct rb_node *node;
581         u64 last_end;
582         int err;
583         int clear = 0;
584
585         btrfs_debug_check_extent_io_range(tree, start, end);
586
587         if (bits & EXTENT_DELALLOC)
588                 bits |= EXTENT_NORESERVE;
589
590         if (delete)
591                 bits |= ~EXTENT_CTLBITS;
592         bits |= EXTENT_FIRST_DELALLOC;
593
594         if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
595                 clear = 1;
596 again:
597         if (!prealloc && (mask & __GFP_WAIT)) {
598                 /*
599                  * Don't care for allocation failure here because we might end
600                  * up not needing the pre-allocated extent state at all, which
601                  * is the case if we only have in the tree extent states that
602                  * cover our input range and don't cover too any other range.
603                  * If we end up needing a new extent state we allocate it later.
604                  */
605                 prealloc = alloc_extent_state(mask);
606         }
607
608         spin_lock(&tree->lock);
609         if (cached_state) {
610                 cached = *cached_state;
611
612                 if (clear) {
613                         *cached_state = NULL;
614                         cached_state = NULL;
615                 }
616
617                 if (cached && extent_state_in_tree(cached) &&
618                     cached->start <= start && cached->end > start) {
619                         if (clear)
620                                 atomic_dec(&cached->refs);
621                         state = cached;
622                         goto hit_next;
623                 }
624                 if (clear)
625                         free_extent_state(cached);
626         }
627         /*
628          * this search will find the extents that end after
629          * our range starts
630          */
631         node = tree_search(tree, start);
632         if (!node)
633                 goto out;
634         state = rb_entry(node, struct extent_state, rb_node);
635 hit_next:
636         if (state->start > end)
637                 goto out;
638         WARN_ON(state->end < start);
639         last_end = state->end;
640
641         /* the state doesn't have the wanted bits, go ahead */
642         if (!(state->state & bits)) {
643                 state = next_state(state);
644                 goto next;
645         }
646
647         /*
648          *     | ---- desired range ---- |
649          *  | state | or
650          *  | ------------- state -------------- |
651          *
652          * We need to split the extent we found, and may flip
653          * bits on second half.
654          *
655          * If the extent we found extends past our range, we
656          * just split and search again.  It'll get split again
657          * the next time though.
658          *
659          * If the extent we found is inside our range, we clear
660          * the desired bit on it.
661          */
662
663         if (state->start < start) {
664                 prealloc = alloc_extent_state_atomic(prealloc);
665                 BUG_ON(!prealloc);
666                 err = split_state(tree, state, prealloc, start);
667                 if (err)
668                         extent_io_tree_panic(tree, err);
669
670                 prealloc = NULL;
671                 if (err)
672                         goto out;
673                 if (state->end <= end) {
674                         state = clear_state_bit(tree, state, &bits, wake);
675                         goto next;
676                 }
677                 goto search_again;
678         }
679         /*
680          * | ---- desired range ---- |
681          *                        | state |
682          * We need to split the extent, and clear the bit
683          * on the first half
684          */
685         if (state->start <= end && state->end > end) {
686                 prealloc = alloc_extent_state_atomic(prealloc);
687                 BUG_ON(!prealloc);
688                 err = split_state(tree, state, prealloc, end + 1);
689                 if (err)
690                         extent_io_tree_panic(tree, err);
691
692                 if (wake)
693                         wake_up(&state->wq);
694
695                 clear_state_bit(tree, prealloc, &bits, wake);
696
697                 prealloc = NULL;
698                 goto out;
699         }
700
701         state = clear_state_bit(tree, state, &bits, wake);
702 next:
703         if (last_end == (u64)-1)
704                 goto out;
705         start = last_end + 1;
706         if (start <= end && state && !need_resched())
707                 goto hit_next;
708         goto search_again;
709
710 out:
711         spin_unlock(&tree->lock);
712         if (prealloc)
713                 free_extent_state(prealloc);
714
715         return 0;
716
717 search_again:
718         if (start > end)
719                 goto out;
720         spin_unlock(&tree->lock);
721         if (mask & __GFP_WAIT)
722                 cond_resched();
723         goto again;
724 }
725
726 static void wait_on_state(struct extent_io_tree *tree,
727                           struct extent_state *state)
728                 __releases(tree->lock)
729                 __acquires(tree->lock)
730 {
731         DEFINE_WAIT(wait);
732         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
733         spin_unlock(&tree->lock);
734         schedule();
735         spin_lock(&tree->lock);
736         finish_wait(&state->wq, &wait);
737 }
738
739 /*
740  * waits for one or more bits to clear on a range in the state tree.
741  * The range [start, end] is inclusive.
742  * The tree lock is taken by this function
743  */
744 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
745                             unsigned long bits)
746 {
747         struct extent_state *state;
748         struct rb_node *node;
749
750         btrfs_debug_check_extent_io_range(tree, start, end);
751
752         spin_lock(&tree->lock);
753 again:
754         while (1) {
755                 /*
756                  * this search will find all the extents that end after
757                  * our range starts
758                  */
759                 node = tree_search(tree, start);
760 process_node:
761                 if (!node)
762                         break;
763
764                 state = rb_entry(node, struct extent_state, rb_node);
765
766                 if (state->start > end)
767                         goto out;
768
769                 if (state->state & bits) {
770                         start = state->start;
771                         atomic_inc(&state->refs);
772                         wait_on_state(tree, state);
773                         free_extent_state(state);
774                         goto again;
775                 }
776                 start = state->end + 1;
777
778                 if (start > end)
779                         break;
780
781                 if (!cond_resched_lock(&tree->lock)) {
782                         node = rb_next(node);
783                         goto process_node;
784                 }
785         }
786 out:
787         spin_unlock(&tree->lock);
788 }
789
790 static void set_state_bits(struct extent_io_tree *tree,
791                            struct extent_state *state,
792                            unsigned long *bits)
793 {
794         unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS;
795
796         set_state_cb(tree, state, bits);
797         if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
798                 u64 range = state->end - state->start + 1;
799                 tree->dirty_bytes += range;
800         }
801         state->state |= bits_to_set;
802 }
803
804 static void cache_state_if_flags(struct extent_state *state,
805                                  struct extent_state **cached_ptr,
806                                  const u64 flags)
807 {
808         if (cached_ptr && !(*cached_ptr)) {
809                 if (!flags || (state->state & flags)) {
810                         *cached_ptr = state;
811                         atomic_inc(&state->refs);
812                 }
813         }
814 }
815
816 static void cache_state(struct extent_state *state,
817                         struct extent_state **cached_ptr)
818 {
819         return cache_state_if_flags(state, cached_ptr,
820                                     EXTENT_IOBITS | EXTENT_BOUNDARY);
821 }
822
823 /*
824  * set some bits on a range in the tree.  This may require allocations or
825  * sleeping, so the gfp mask is used to indicate what is allowed.
826  *
827  * If any of the exclusive bits are set, this will fail with -EEXIST if some
828  * part of the range already has the desired bits set.  The start of the
829  * existing range is returned in failed_start in this case.
830  *
831  * [start, end] is inclusive This takes the tree lock.
832  */
833
834 static int __must_check
835 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
836                  unsigned long bits, unsigned long exclusive_bits,
837                  u64 *failed_start, struct extent_state **cached_state,
838                  gfp_t mask)
839 {
840         struct extent_state *state;
841         struct extent_state *prealloc = NULL;
842         struct rb_node *node;
843         struct rb_node **p;
844         struct rb_node *parent;
845         int err = 0;
846         u64 last_start;
847         u64 last_end;
848
849         btrfs_debug_check_extent_io_range(tree, start, end);
850
851         bits |= EXTENT_FIRST_DELALLOC;
852 again:
853         if (!prealloc && (mask & __GFP_WAIT)) {
854                 prealloc = alloc_extent_state(mask);
855                 BUG_ON(!prealloc);
856         }
857
858         spin_lock(&tree->lock);
859         if (cached_state && *cached_state) {
860                 state = *cached_state;
861                 if (state->start <= start && state->end > start &&
862                     extent_state_in_tree(state)) {
863                         node = &state->rb_node;
864                         goto hit_next;
865                 }
866         }
867         /*
868          * this search will find all the extents that end after
869          * our range starts.
870          */
871         node = tree_search_for_insert(tree, start, &p, &parent);
872         if (!node) {
873                 prealloc = alloc_extent_state_atomic(prealloc);
874                 BUG_ON(!prealloc);
875                 err = insert_state(tree, prealloc, start, end,
876                                    &p, &parent, &bits);
877                 if (err)
878                         extent_io_tree_panic(tree, err);
879
880                 cache_state(prealloc, cached_state);
881                 prealloc = NULL;
882                 goto out;
883         }
884         state = rb_entry(node, struct extent_state, rb_node);
885 hit_next:
886         last_start = state->start;
887         last_end = state->end;
888
889         /*
890          * | ---- desired range ---- |
891          * | state |
892          *
893          * Just lock what we found and keep going
894          */
895         if (state->start == start && state->end <= end) {
896                 if (state->state & exclusive_bits) {
897                         *failed_start = state->start;
898                         err = -EEXIST;
899                         goto out;
900                 }
901
902                 set_state_bits(tree, state, &bits);
903                 cache_state(state, cached_state);
904                 merge_state(tree, state);
905                 if (last_end == (u64)-1)
906                         goto out;
907                 start = last_end + 1;
908                 state = next_state(state);
909                 if (start < end && state && state->start == start &&
910                     !need_resched())
911                         goto hit_next;
912                 goto search_again;
913         }
914
915         /*
916          *     | ---- desired range ---- |
917          * | state |
918          *   or
919          * | ------------- state -------------- |
920          *
921          * We need to split the extent we found, and may flip bits on
922          * second half.
923          *
924          * If the extent we found extends past our
925          * range, we just split and search again.  It'll get split
926          * again the next time though.
927          *
928          * If the extent we found is inside our range, we set the
929          * desired bit on it.
930          */
931         if (state->start < start) {
932                 if (state->state & exclusive_bits) {
933                         *failed_start = start;
934                         err = -EEXIST;
935                         goto out;
936                 }
937
938                 prealloc = alloc_extent_state_atomic(prealloc);
939                 BUG_ON(!prealloc);
940                 err = split_state(tree, state, prealloc, start);
941                 if (err)
942                         extent_io_tree_panic(tree, err);
943
944                 prealloc = NULL;
945                 if (err)
946                         goto out;
947                 if (state->end <= end) {
948                         set_state_bits(tree, state, &bits);
949                         cache_state(state, cached_state);
950                         merge_state(tree, state);
951                         if (last_end == (u64)-1)
952                                 goto out;
953                         start = last_end + 1;
954                         state = next_state(state);
955                         if (start < end && state && state->start == start &&
956                             !need_resched())
957                                 goto hit_next;
958                 }
959                 goto search_again;
960         }
961         /*
962          * | ---- desired range ---- |
963          *     | state | or               | state |
964          *
965          * There's a hole, we need to insert something in it and
966          * ignore the extent we found.
967          */
968         if (state->start > start) {
969                 u64 this_end;
970                 if (end < last_start)
971                         this_end = end;
972                 else
973                         this_end = last_start - 1;
974
975                 prealloc = alloc_extent_state_atomic(prealloc);
976                 BUG_ON(!prealloc);
977
978                 /*
979                  * Avoid to free 'prealloc' if it can be merged with
980                  * the later extent.
981                  */
982                 err = insert_state(tree, prealloc, start, this_end,
983                                    NULL, NULL, &bits);
984                 if (err)
985                         extent_io_tree_panic(tree, err);
986
987                 cache_state(prealloc, cached_state);
988                 prealloc = NULL;
989                 start = this_end + 1;
990                 goto search_again;
991         }
992         /*
993          * | ---- desired range ---- |
994          *                        | state |
995          * We need to split the extent, and set the bit
996          * on the first half
997          */
998         if (state->start <= end && state->end > end) {
999                 if (state->state & exclusive_bits) {
1000                         *failed_start = start;
1001                         err = -EEXIST;
1002                         goto out;
1003                 }
1004
1005                 prealloc = alloc_extent_state_atomic(prealloc);
1006                 BUG_ON(!prealloc);
1007                 err = split_state(tree, state, prealloc, end + 1);
1008                 if (err)
1009                         extent_io_tree_panic(tree, err);
1010
1011                 set_state_bits(tree, prealloc, &bits);
1012                 cache_state(prealloc, cached_state);
1013                 merge_state(tree, prealloc);
1014                 prealloc = NULL;
1015                 goto out;
1016         }
1017
1018         goto search_again;
1019
1020 out:
1021         spin_unlock(&tree->lock);
1022         if (prealloc)
1023                 free_extent_state(prealloc);
1024
1025         return err;
1026
1027 search_again:
1028         if (start > end)
1029                 goto out;
1030         spin_unlock(&tree->lock);
1031         if (mask & __GFP_WAIT)
1032                 cond_resched();
1033         goto again;
1034 }
1035
1036 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1037                    unsigned long bits, u64 * failed_start,
1038                    struct extent_state **cached_state, gfp_t mask)
1039 {
1040         return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1041                                 cached_state, mask);
1042 }
1043
1044
1045 /**
1046  * convert_extent_bit - convert all bits in a given range from one bit to
1047  *                      another
1048  * @tree:       the io tree to search
1049  * @start:      the start offset in bytes
1050  * @end:        the end offset in bytes (inclusive)
1051  * @bits:       the bits to set in this range
1052  * @clear_bits: the bits to clear in this range
1053  * @cached_state:       state that we're going to cache
1054  * @mask:       the allocation mask
1055  *
1056  * This will go through and set bits for the given range.  If any states exist
1057  * already in this range they are set with the given bit and cleared of the
1058  * clear_bits.  This is only meant to be used by things that are mergeable, ie
1059  * converting from say DELALLOC to DIRTY.  This is not meant to be used with
1060  * boundary bits like LOCK.
1061  */
1062 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1063                        unsigned long bits, unsigned long clear_bits,
1064                        struct extent_state **cached_state, gfp_t mask)
1065 {
1066         struct extent_state *state;
1067         struct extent_state *prealloc = NULL;
1068         struct rb_node *node;
1069         struct rb_node **p;
1070         struct rb_node *parent;
1071         int err = 0;
1072         u64 last_start;
1073         u64 last_end;
1074         bool first_iteration = true;
1075
1076         btrfs_debug_check_extent_io_range(tree, start, end);
1077
1078 again:
1079         if (!prealloc && (mask & __GFP_WAIT)) {
1080                 /*
1081                  * Best effort, don't worry if extent state allocation fails
1082                  * here for the first iteration. We might have a cached state
1083                  * that matches exactly the target range, in which case no
1084                  * extent state allocations are needed. We'll only know this
1085                  * after locking the tree.
1086                  */
1087                 prealloc = alloc_extent_state(mask);
1088                 if (!prealloc && !first_iteration)
1089                         return -ENOMEM;
1090         }
1091
1092         spin_lock(&tree->lock);
1093         if (cached_state && *cached_state) {
1094                 state = *cached_state;
1095                 if (state->start <= start && state->end > start &&
1096                     extent_state_in_tree(state)) {
1097                         node = &state->rb_node;
1098                         goto hit_next;
1099                 }
1100         }
1101
1102         /*
1103          * this search will find all the extents that end after
1104          * our range starts.
1105          */
1106         node = tree_search_for_insert(tree, start, &p, &parent);
1107         if (!node) {
1108                 prealloc = alloc_extent_state_atomic(prealloc);
1109                 if (!prealloc) {
1110                         err = -ENOMEM;
1111                         goto out;
1112                 }
1113                 err = insert_state(tree, prealloc, start, end,
1114                                    &p, &parent, &bits);
1115                 if (err)
1116                         extent_io_tree_panic(tree, err);
1117                 cache_state(prealloc, cached_state);
1118                 prealloc = NULL;
1119                 goto out;
1120         }
1121         state = rb_entry(node, struct extent_state, rb_node);
1122 hit_next:
1123         last_start = state->start;
1124         last_end = state->end;
1125
1126         /*
1127          * | ---- desired range ---- |
1128          * | state |
1129          *
1130          * Just lock what we found and keep going
1131          */
1132         if (state->start == start && state->end <= end) {
1133                 set_state_bits(tree, state, &bits);
1134                 cache_state(state, cached_state);
1135                 state = clear_state_bit(tree, state, &clear_bits, 0);
1136                 if (last_end == (u64)-1)
1137                         goto out;
1138                 start = last_end + 1;
1139                 if (start < end && state && state->start == start &&
1140                     !need_resched())
1141                         goto hit_next;
1142                 goto search_again;
1143         }
1144
1145         /*
1146          *     | ---- desired range ---- |
1147          * | state |
1148          *   or
1149          * | ------------- state -------------- |
1150          *
1151          * We need to split the extent we found, and may flip bits on
1152          * second half.
1153          *
1154          * If the extent we found extends past our
1155          * range, we just split and search again.  It'll get split
1156          * again the next time though.
1157          *
1158          * If the extent we found is inside our range, we set the
1159          * desired bit on it.
1160          */
1161         if (state->start < start) {
1162                 prealloc = alloc_extent_state_atomic(prealloc);
1163                 if (!prealloc) {
1164                         err = -ENOMEM;
1165                         goto out;
1166                 }
1167                 err = split_state(tree, state, prealloc, start);
1168                 if (err)
1169                         extent_io_tree_panic(tree, err);
1170                 prealloc = NULL;
1171                 if (err)
1172                         goto out;
1173                 if (state->end <= end) {
1174                         set_state_bits(tree, state, &bits);
1175                         cache_state(state, cached_state);
1176                         state = clear_state_bit(tree, state, &clear_bits, 0);
1177                         if (last_end == (u64)-1)
1178                                 goto out;
1179                         start = last_end + 1;
1180                         if (start < end && state && state->start == start &&
1181                             !need_resched())
1182                                 goto hit_next;
1183                 }
1184                 goto search_again;
1185         }
1186         /*
1187          * | ---- desired range ---- |
1188          *     | state | or               | state |
1189          *
1190          * There's a hole, we need to insert something in it and
1191          * ignore the extent we found.
1192          */
1193         if (state->start > start) {
1194                 u64 this_end;
1195                 if (end < last_start)
1196                         this_end = end;
1197                 else
1198                         this_end = last_start - 1;
1199
1200                 prealloc = alloc_extent_state_atomic(prealloc);
1201                 if (!prealloc) {
1202                         err = -ENOMEM;
1203                         goto out;
1204                 }
1205
1206                 /*
1207                  * Avoid to free 'prealloc' if it can be merged with
1208                  * the later extent.
1209                  */
1210                 err = insert_state(tree, prealloc, start, this_end,
1211                                    NULL, NULL, &bits);
1212                 if (err)
1213                         extent_io_tree_panic(tree, err);
1214                 cache_state(prealloc, cached_state);
1215                 prealloc = NULL;
1216                 start = this_end + 1;
1217                 goto search_again;
1218         }
1219         /*
1220          * | ---- desired range ---- |
1221          *                        | state |
1222          * We need to split the extent, and set the bit
1223          * on the first half
1224          */
1225         if (state->start <= end && state->end > end) {
1226                 prealloc = alloc_extent_state_atomic(prealloc);
1227                 if (!prealloc) {
1228                         err = -ENOMEM;
1229                         goto out;
1230                 }
1231
1232                 err = split_state(tree, state, prealloc, end + 1);
1233                 if (err)
1234                         extent_io_tree_panic(tree, err);
1235
1236                 set_state_bits(tree, prealloc, &bits);
1237                 cache_state(prealloc, cached_state);
1238                 clear_state_bit(tree, prealloc, &clear_bits, 0);
1239                 prealloc = NULL;
1240                 goto out;
1241         }
1242
1243         goto search_again;
1244
1245 out:
1246         spin_unlock(&tree->lock);
1247         if (prealloc)
1248                 free_extent_state(prealloc);
1249
1250         return err;
1251
1252 search_again:
1253         if (start > end)
1254                 goto out;
1255         spin_unlock(&tree->lock);
1256         if (mask & __GFP_WAIT)
1257                 cond_resched();
1258         first_iteration = false;
1259         goto again;
1260 }
1261
1262 /* wrappers around set/clear extent bit */
1263 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1264                      gfp_t mask)
1265 {
1266         return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1267                               NULL, mask);
1268 }
1269
1270 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1271                     unsigned long bits, gfp_t mask)
1272 {
1273         return set_extent_bit(tree, start, end, bits, NULL,
1274                               NULL, mask);
1275 }
1276
1277 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1278                       unsigned long bits, gfp_t mask)
1279 {
1280         return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1281 }
1282
1283 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1284                         struct extent_state **cached_state, gfp_t mask)
1285 {
1286         return set_extent_bit(tree, start, end,
1287                               EXTENT_DELALLOC | EXTENT_UPTODATE,
1288                               NULL, cached_state, mask);
1289 }
1290
1291 int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
1292                       struct extent_state **cached_state, gfp_t mask)
1293 {
1294         return set_extent_bit(tree, start, end,
1295                               EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
1296                               NULL, cached_state, mask);
1297 }
1298
1299 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1300                        gfp_t mask)
1301 {
1302         return clear_extent_bit(tree, start, end,
1303                                 EXTENT_DIRTY | EXTENT_DELALLOC |
1304                                 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1305 }
1306
1307 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1308                      gfp_t mask)
1309 {
1310         return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1311                               NULL, mask);
1312 }
1313
1314 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1315                         struct extent_state **cached_state, gfp_t mask)
1316 {
1317         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
1318                               cached_state, mask);
1319 }
1320
1321 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1322                           struct extent_state **cached_state, gfp_t mask)
1323 {
1324         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1325                                 cached_state, mask);
1326 }
1327
1328 /*
1329  * either insert or lock state struct between start and end use mask to tell
1330  * us if waiting is desired.
1331  */
1332 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1333                      unsigned long bits, struct extent_state **cached_state)
1334 {
1335         int err;
1336         u64 failed_start;
1337         while (1) {
1338                 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1339                                        EXTENT_LOCKED, &failed_start,
1340                                        cached_state, GFP_NOFS);
1341                 if (err == -EEXIST) {
1342                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1343                         start = failed_start;
1344                 } else
1345                         break;
1346                 WARN_ON(start > end);
1347         }
1348         return err;
1349 }
1350
1351 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1352 {
1353         return lock_extent_bits(tree, start, end, 0, NULL);
1354 }
1355
1356 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1357 {
1358         int err;
1359         u64 failed_start;
1360
1361         err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1362                                &failed_start, NULL, GFP_NOFS);
1363         if (err == -EEXIST) {
1364                 if (failed_start > start)
1365                         clear_extent_bit(tree, start, failed_start - 1,
1366                                          EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1367                 return 0;
1368         }
1369         return 1;
1370 }
1371
1372 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1373                          struct extent_state **cached, gfp_t mask)
1374 {
1375         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1376                                 mask);
1377 }
1378
1379 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1380 {
1381         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1382                                 GFP_NOFS);
1383 }
1384
1385 int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1386 {
1387         unsigned long index = start >> PAGE_CACHE_SHIFT;
1388         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1389         struct page *page;
1390
1391         while (index <= end_index) {
1392                 page = find_get_page(inode->i_mapping, index);
1393                 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1394                 clear_page_dirty_for_io(page);
1395                 page_cache_release(page);
1396                 index++;
1397         }
1398         return 0;
1399 }
1400
1401 int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1402 {
1403         unsigned long index = start >> PAGE_CACHE_SHIFT;
1404         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1405         struct page *page;
1406
1407         while (index <= end_index) {
1408                 page = find_get_page(inode->i_mapping, index);
1409                 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1410                 account_page_redirty(page);
1411                 __set_page_dirty_nobuffers(page);
1412                 page_cache_release(page);
1413                 index++;
1414         }
1415         return 0;
1416 }
1417
1418 /*
1419  * helper function to set both pages and extents in the tree writeback
1420  */
1421 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1422 {
1423         unsigned long index = start >> PAGE_CACHE_SHIFT;
1424         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1425         struct page *page;
1426
1427         while (index <= end_index) {
1428                 page = find_get_page(tree->mapping, index);
1429                 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1430                 set_page_writeback(page);
1431                 page_cache_release(page);
1432                 index++;
1433         }
1434         return 0;
1435 }
1436
1437 /* find the first state struct with 'bits' set after 'start', and
1438  * return it.  tree->lock must be held.  NULL will returned if
1439  * nothing was found after 'start'
1440  */
1441 static struct extent_state *
1442 find_first_extent_bit_state(struct extent_io_tree *tree,
1443                             u64 start, unsigned long bits)
1444 {
1445         struct rb_node *node;
1446         struct extent_state *state;
1447
1448         /*
1449          * this search will find all the extents that end after
1450          * our range starts.
1451          */
1452         node = tree_search(tree, start);
1453         if (!node)
1454                 goto out;
1455
1456         while (1) {
1457                 state = rb_entry(node, struct extent_state, rb_node);
1458                 if (state->end >= start && (state->state & bits))
1459                         return state;
1460
1461                 node = rb_next(node);
1462                 if (!node)
1463                         break;
1464         }
1465 out:
1466         return NULL;
1467 }
1468
1469 /*
1470  * find the first offset in the io tree with 'bits' set. zero is
1471  * returned if we find something, and *start_ret and *end_ret are
1472  * set to reflect the state struct that was found.
1473  *
1474  * If nothing was found, 1 is returned. If found something, return 0.
1475  */
1476 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1477                           u64 *start_ret, u64 *end_ret, unsigned long bits,
1478                           struct extent_state **cached_state)
1479 {
1480         struct extent_state *state;
1481         struct rb_node *n;
1482         int ret = 1;
1483
1484         spin_lock(&tree->lock);
1485         if (cached_state && *cached_state) {
1486                 state = *cached_state;
1487                 if (state->end == start - 1 && extent_state_in_tree(state)) {
1488                         n = rb_next(&state->rb_node);
1489                         while (n) {
1490                                 state = rb_entry(n, struct extent_state,
1491                                                  rb_node);
1492                                 if (state->state & bits)
1493                                         goto got_it;
1494                                 n = rb_next(n);
1495                         }
1496                         free_extent_state(*cached_state);
1497                         *cached_state = NULL;
1498                         goto out;
1499                 }
1500                 free_extent_state(*cached_state);
1501                 *cached_state = NULL;
1502         }
1503
1504         state = find_first_extent_bit_state(tree, start, bits);
1505 got_it:
1506         if (state) {
1507                 cache_state_if_flags(state, cached_state, 0);
1508                 *start_ret = state->start;
1509                 *end_ret = state->end;
1510                 ret = 0;
1511         }
1512 out:
1513         spin_unlock(&tree->lock);
1514         return ret;
1515 }
1516
1517 /*
1518  * find a contiguous range of bytes in the file marked as delalloc, not
1519  * more than 'max_bytes'.  start and end are used to return the range,
1520  *
1521  * 1 is returned if we find something, 0 if nothing was in the tree
1522  */
1523 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1524                                         u64 *start, u64 *end, u64 max_bytes,
1525                                         struct extent_state **cached_state)
1526 {
1527         struct rb_node *node;
1528         struct extent_state *state;
1529         u64 cur_start = *start;
1530         u64 found = 0;
1531         u64 total_bytes = 0;
1532
1533         spin_lock(&tree->lock);
1534
1535         /*
1536          * this search will find all the extents that end after
1537          * our range starts.
1538          */
1539         node = tree_search(tree, cur_start);
1540         if (!node) {
1541                 if (!found)
1542                         *end = (u64)-1;
1543                 goto out;
1544         }
1545
1546         while (1) {
1547                 state = rb_entry(node, struct extent_state, rb_node);
1548                 if (found && (state->start != cur_start ||
1549                               (state->state & EXTENT_BOUNDARY))) {
1550                         goto out;
1551                 }
1552                 if (!(state->state & EXTENT_DELALLOC)) {
1553                         if (!found)
1554                                 *end = state->end;
1555                         goto out;
1556                 }
1557                 if (!found) {
1558                         *start = state->start;
1559                         *cached_state = state;
1560                         atomic_inc(&state->refs);
1561                 }
1562                 found++;
1563                 *end = state->end;
1564                 cur_start = state->end + 1;
1565                 node = rb_next(node);
1566                 total_bytes += state->end - state->start + 1;
1567                 if (total_bytes >= max_bytes)
1568                         break;
1569                 if (!node)
1570                         break;
1571         }
1572 out:
1573         spin_unlock(&tree->lock);
1574         return found;
1575 }
1576
1577 static noinline void __unlock_for_delalloc(struct inode *inode,
1578                                            struct page *locked_page,
1579                                            u64 start, u64 end)
1580 {
1581         int ret;
1582         struct page *pages[16];
1583         unsigned long index = start >> PAGE_CACHE_SHIFT;
1584         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1585         unsigned long nr_pages = end_index - index + 1;
1586         int i;
1587
1588         if (index == locked_page->index && end_index == index)
1589                 return;
1590
1591         while (nr_pages > 0) {
1592                 ret = find_get_pages_contig(inode->i_mapping, index,
1593                                      min_t(unsigned long, nr_pages,
1594                                      ARRAY_SIZE(pages)), pages);
1595                 for (i = 0; i < ret; i++) {
1596                         if (pages[i] != locked_page)
1597                                 unlock_page(pages[i]);
1598                         page_cache_release(pages[i]);
1599                 }
1600                 nr_pages -= ret;
1601                 index += ret;
1602                 cond_resched();
1603         }
1604 }
1605
1606 static noinline int lock_delalloc_pages(struct inode *inode,
1607                                         struct page *locked_page,
1608                                         u64 delalloc_start,
1609                                         u64 delalloc_end)
1610 {
1611         unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1612         unsigned long start_index = index;
1613         unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1614         unsigned long pages_locked = 0;
1615         struct page *pages[16];
1616         unsigned long nrpages;
1617         int ret;
1618         int i;
1619
1620         /* the caller is responsible for locking the start index */
1621         if (index == locked_page->index && index == end_index)
1622                 return 0;
1623
1624         /* skip the page at the start index */
1625         nrpages = end_index - index + 1;
1626         while (nrpages > 0) {
1627                 ret = find_get_pages_contig(inode->i_mapping, index,
1628                                      min_t(unsigned long,
1629                                      nrpages, ARRAY_SIZE(pages)), pages);
1630                 if (ret == 0) {
1631                         ret = -EAGAIN;
1632                         goto done;
1633                 }
1634                 /* now we have an array of pages, lock them all */
1635                 for (i = 0; i < ret; i++) {
1636                         /*
1637                          * the caller is taking responsibility for
1638                          * locked_page
1639                          */
1640                         if (pages[i] != locked_page) {
1641                                 lock_page(pages[i]);
1642                                 if (!PageDirty(pages[i]) ||
1643                                     pages[i]->mapping != inode->i_mapping) {
1644                                         ret = -EAGAIN;
1645                                         unlock_page(pages[i]);
1646                                         page_cache_release(pages[i]);
1647                                         goto done;
1648                                 }
1649                         }
1650                         page_cache_release(pages[i]);
1651                         pages_locked++;
1652                 }
1653                 nrpages -= ret;
1654                 index += ret;
1655                 cond_resched();
1656         }
1657         ret = 0;
1658 done:
1659         if (ret && pages_locked) {
1660                 __unlock_for_delalloc(inode, locked_page,
1661                               delalloc_start,
1662                               ((u64)(start_index + pages_locked - 1)) <<
1663                               PAGE_CACHE_SHIFT);
1664         }
1665         return ret;
1666 }
1667
1668 /*
1669  * find a contiguous range of bytes in the file marked as delalloc, not
1670  * more than 'max_bytes'.  start and end are used to return the range,
1671  *
1672  * 1 is returned if we find something, 0 if nothing was in the tree
1673  */
1674 STATIC u64 find_lock_delalloc_range(struct inode *inode,
1675                                     struct extent_io_tree *tree,
1676                                     struct page *locked_page, u64 *start,
1677                                     u64 *end, u64 max_bytes)
1678 {
1679         u64 delalloc_start;
1680         u64 delalloc_end;
1681         u64 found;
1682         struct extent_state *cached_state = NULL;
1683         int ret;
1684         int loops = 0;
1685
1686 again:
1687         /* step one, find a bunch of delalloc bytes starting at start */
1688         delalloc_start = *start;
1689         delalloc_end = 0;
1690         found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1691                                     max_bytes, &cached_state);
1692         if (!found || delalloc_end <= *start) {
1693                 *start = delalloc_start;
1694                 *end = delalloc_end;
1695                 free_extent_state(cached_state);
1696                 return 0;
1697         }
1698
1699         /*
1700          * start comes from the offset of locked_page.  We have to lock
1701          * pages in order, so we can't process delalloc bytes before
1702          * locked_page
1703          */
1704         if (delalloc_start < *start)
1705                 delalloc_start = *start;
1706
1707         /*
1708          * make sure to limit the number of pages we try to lock down
1709          */
1710         if (delalloc_end + 1 - delalloc_start > max_bytes)
1711                 delalloc_end = delalloc_start + max_bytes - 1;
1712
1713         /* step two, lock all the pages after the page that has start */
1714         ret = lock_delalloc_pages(inode, locked_page,
1715                                   delalloc_start, delalloc_end);
1716         if (ret == -EAGAIN) {
1717                 /* some of the pages are gone, lets avoid looping by
1718                  * shortening the size of the delalloc range we're searching
1719                  */
1720                 free_extent_state(cached_state);
1721                 cached_state = NULL;
1722                 if (!loops) {
1723                         max_bytes = PAGE_CACHE_SIZE;
1724                         loops = 1;
1725                         goto again;
1726                 } else {
1727                         found = 0;
1728                         goto out_failed;
1729                 }
1730         }
1731         BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1732
1733         /* step three, lock the state bits for the whole range */
1734         lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
1735
1736         /* then test to make sure it is all still delalloc */
1737         ret = test_range_bit(tree, delalloc_start, delalloc_end,
1738                              EXTENT_DELALLOC, 1, cached_state);
1739         if (!ret) {
1740                 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1741                                      &cached_state, GFP_NOFS);
1742                 __unlock_for_delalloc(inode, locked_page,
1743                               delalloc_start, delalloc_end);
1744                 cond_resched();
1745                 goto again;
1746         }
1747         free_extent_state(cached_state);
1748         *start = delalloc_start;
1749         *end = delalloc_end;
1750 out_failed:
1751         return found;
1752 }
1753
1754 int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1755                                  struct page *locked_page,
1756                                  unsigned long clear_bits,
1757                                  unsigned long page_ops)
1758 {
1759         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1760         int ret;
1761         struct page *pages[16];
1762         unsigned long index = start >> PAGE_CACHE_SHIFT;
1763         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1764         unsigned long nr_pages = end_index - index + 1;
1765         int i;
1766
1767         clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1768         if (page_ops == 0)
1769                 return 0;
1770
1771         if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1772                 mapping_set_error(inode->i_mapping, -EIO);
1773
1774         while (nr_pages > 0) {
1775                 ret = find_get_pages_contig(inode->i_mapping, index,
1776                                      min_t(unsigned long,
1777                                      nr_pages, ARRAY_SIZE(pages)), pages);
1778                 for (i = 0; i < ret; i++) {
1779
1780                         if (page_ops & PAGE_SET_PRIVATE2)
1781                                 SetPagePrivate2(pages[i]);
1782
1783                         if (pages[i] == locked_page) {
1784                                 page_cache_release(pages[i]);
1785                                 continue;
1786                         }
1787                         if (page_ops & PAGE_CLEAR_DIRTY)
1788                                 clear_page_dirty_for_io(pages[i]);
1789                         if (page_ops & PAGE_SET_WRITEBACK)
1790                                 set_page_writeback(pages[i]);
1791                         if (page_ops & PAGE_SET_ERROR)
1792                                 SetPageError(pages[i]);
1793                         if (page_ops & PAGE_END_WRITEBACK)
1794                                 end_page_writeback(pages[i]);
1795                         if (page_ops & PAGE_UNLOCK)
1796                                 unlock_page(pages[i]);
1797                         page_cache_release(pages[i]);
1798                 }
1799                 nr_pages -= ret;
1800                 index += ret;
1801                 cond_resched();
1802         }
1803         return 0;
1804 }
1805
1806 /*
1807  * count the number of bytes in the tree that have a given bit(s)
1808  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1809  * cached.  The total number found is returned.
1810  */
1811 u64 count_range_bits(struct extent_io_tree *tree,
1812                      u64 *start, u64 search_end, u64 max_bytes,
1813                      unsigned long bits, int contig)
1814 {
1815         struct rb_node *node;
1816         struct extent_state *state;
1817         u64 cur_start = *start;
1818         u64 total_bytes = 0;
1819         u64 last = 0;
1820         int found = 0;
1821
1822         if (WARN_ON(search_end <= cur_start))
1823                 return 0;
1824
1825         spin_lock(&tree->lock);
1826         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1827                 total_bytes = tree->dirty_bytes;
1828                 goto out;
1829         }
1830         /*
1831          * this search will find all the extents that end after
1832          * our range starts.
1833          */
1834         node = tree_search(tree, cur_start);
1835         if (!node)
1836                 goto out;
1837
1838         while (1) {
1839                 state = rb_entry(node, struct extent_state, rb_node);
1840                 if (state->start > search_end)
1841                         break;
1842                 if (contig && found && state->start > last + 1)
1843                         break;
1844                 if (state->end >= cur_start && (state->state & bits) == bits) {
1845                         total_bytes += min(search_end, state->end) + 1 -
1846                                        max(cur_start, state->start);
1847                         if (total_bytes >= max_bytes)
1848                                 break;
1849                         if (!found) {
1850                                 *start = max(cur_start, state->start);
1851                                 found = 1;
1852                         }
1853                         last = state->end;
1854                 } else if (contig && found) {
1855                         break;
1856                 }
1857                 node = rb_next(node);
1858                 if (!node)
1859                         break;
1860         }
1861 out:
1862         spin_unlock(&tree->lock);
1863         return total_bytes;
1864 }
1865
1866 /*
1867  * set the private field for a given byte offset in the tree.  If there isn't
1868  * an extent_state there already, this does nothing.
1869  */
1870 static int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1871 {
1872         struct rb_node *node;
1873         struct extent_state *state;
1874         int ret = 0;
1875
1876         spin_lock(&tree->lock);
1877         /*
1878          * this search will find all the extents that end after
1879          * our range starts.
1880          */
1881         node = tree_search(tree, start);
1882         if (!node) {
1883                 ret = -ENOENT;
1884                 goto out;
1885         }
1886         state = rb_entry(node, struct extent_state, rb_node);
1887         if (state->start != start) {
1888                 ret = -ENOENT;
1889                 goto out;
1890         }
1891         state->private = private;
1892 out:
1893         spin_unlock(&tree->lock);
1894         return ret;
1895 }
1896
1897 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1898 {
1899         struct rb_node *node;
1900         struct extent_state *state;
1901         int ret = 0;
1902
1903         spin_lock(&tree->lock);
1904         /*
1905          * this search will find all the extents that end after
1906          * our range starts.
1907          */
1908         node = tree_search(tree, start);
1909         if (!node) {
1910                 ret = -ENOENT;
1911                 goto out;
1912         }
1913         state = rb_entry(node, struct extent_state, rb_node);
1914         if (state->start != start) {
1915                 ret = -ENOENT;
1916                 goto out;
1917         }
1918         *private = state->private;
1919 out:
1920         spin_unlock(&tree->lock);
1921         return ret;
1922 }
1923
1924 /*
1925  * searches a range in the state tree for a given mask.
1926  * If 'filled' == 1, this returns 1 only if every extent in the tree
1927  * has the bits set.  Otherwise, 1 is returned if any bit in the
1928  * range is found set.
1929  */
1930 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1931                    unsigned long bits, int filled, struct extent_state *cached)
1932 {
1933         struct extent_state *state = NULL;
1934         struct rb_node *node;
1935         int bitset = 0;
1936
1937         spin_lock(&tree->lock);
1938         if (cached && extent_state_in_tree(cached) && cached->start <= start &&
1939             cached->end > start)
1940                 node = &cached->rb_node;
1941         else
1942                 node = tree_search(tree, start);
1943         while (node && start <= end) {
1944                 state = rb_entry(node, struct extent_state, rb_node);
1945
1946                 if (filled && state->start > start) {
1947                         bitset = 0;
1948                         break;
1949                 }
1950
1951                 if (state->start > end)
1952                         break;
1953
1954                 if (state->state & bits) {
1955                         bitset = 1;
1956                         if (!filled)
1957                                 break;
1958                 } else if (filled) {
1959                         bitset = 0;
1960                         break;
1961                 }
1962
1963                 if (state->end == (u64)-1)
1964                         break;
1965
1966                 start = state->end + 1;
1967                 if (start > end)
1968                         break;
1969                 node = rb_next(node);
1970                 if (!node) {
1971                         if (filled)
1972                                 bitset = 0;
1973                         break;
1974                 }
1975         }
1976         spin_unlock(&tree->lock);
1977         return bitset;
1978 }
1979
1980 /*
1981  * helper function to set a given page up to date if all the
1982  * extents in the tree for that page are up to date
1983  */
1984 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1985 {
1986         u64 start = page_offset(page);
1987         u64 end = start + PAGE_CACHE_SIZE - 1;
1988         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1989                 SetPageUptodate(page);
1990 }
1991
1992 int free_io_failure(struct inode *inode, struct io_failure_record *rec)
1993 {
1994         int ret;
1995         int err = 0;
1996         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1997
1998         set_state_private(failure_tree, rec->start, 0);
1999         ret = clear_extent_bits(failure_tree, rec->start,
2000                                 rec->start + rec->len - 1,
2001                                 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2002         if (ret)
2003                 err = ret;
2004
2005         ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
2006                                 rec->start + rec->len - 1,
2007                                 EXTENT_DAMAGED, GFP_NOFS);
2008         if (ret && !err)
2009                 err = ret;
2010
2011         kfree(rec);
2012         return err;
2013 }
2014
2015 /*
2016  * this bypasses the standard btrfs submit functions deliberately, as
2017  * the standard behavior is to write all copies in a raid setup. here we only
2018  * want to write the one bad copy. so we do the mapping for ourselves and issue
2019  * submit_bio directly.
2020  * to avoid any synchronization issues, wait for the data after writing, which
2021  * actually prevents the read that triggered the error from finishing.
2022  * currently, there can be no more than two copies of every data bit. thus,
2023  * exactly one rewrite is required.
2024  */
2025 int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
2026                       struct page *page, unsigned int pg_offset, int mirror_num)
2027 {
2028         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2029         struct bio *bio;
2030         struct btrfs_device *dev;
2031         u64 map_length = 0;
2032         u64 sector;
2033         struct btrfs_bio *bbio = NULL;
2034         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
2035         int ret;
2036
2037         ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
2038         BUG_ON(!mirror_num);
2039
2040         /* we can't repair anything in raid56 yet */
2041         if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
2042                 return 0;
2043
2044         bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2045         if (!bio)
2046                 return -EIO;
2047         bio->bi_iter.bi_size = 0;
2048         map_length = length;
2049
2050         ret = btrfs_map_block(fs_info, WRITE, logical,
2051                               &map_length, &bbio, mirror_num);
2052         if (ret) {
2053                 bio_put(bio);
2054                 return -EIO;
2055         }
2056         BUG_ON(mirror_num != bbio->mirror_num);
2057         sector = bbio->stripes[mirror_num-1].physical >> 9;
2058         bio->bi_iter.bi_sector = sector;
2059         dev = bbio->stripes[mirror_num-1].dev;
2060         kfree(bbio);
2061         if (!dev || !dev->bdev || !dev->writeable) {
2062                 bio_put(bio);
2063                 return -EIO;
2064         }
2065         bio->bi_bdev = dev->bdev;
2066         bio_add_page(bio, page, length, pg_offset);
2067
2068         if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
2069                 /* try to remap that extent elsewhere? */
2070                 bio_put(bio);
2071                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2072                 return -EIO;
2073         }
2074
2075         printk_ratelimited_in_rcu(KERN_INFO
2076                                   "BTRFS: read error corrected: ino %llu off %llu (dev %s sector %llu)\n",
2077                                   btrfs_ino(inode), start,
2078                                   rcu_str_deref(dev->name), sector);
2079         bio_put(bio);
2080         return 0;
2081 }
2082
2083 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2084                          int mirror_num)
2085 {
2086         u64 start = eb->start;
2087         unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
2088         int ret = 0;
2089
2090         if (root->fs_info->sb->s_flags & MS_RDONLY)
2091                 return -EROFS;
2092
2093         for (i = 0; i < num_pages; i++) {
2094                 struct page *p = eb->pages[i];
2095
2096                 ret = repair_io_failure(root->fs_info->btree_inode, start,
2097                                         PAGE_CACHE_SIZE, start, p,
2098                                         start - page_offset(p), mirror_num);
2099                 if (ret)
2100                         break;
2101                 start += PAGE_CACHE_SIZE;
2102         }
2103
2104         return ret;
2105 }
2106
2107 /*
2108  * each time an IO finishes, we do a fast check in the IO failure tree
2109  * to see if we need to process or clean up an io_failure_record
2110  */
2111 int clean_io_failure(struct inode *inode, u64 start, struct page *page,
2112                      unsigned int pg_offset)
2113 {
2114         u64 private;
2115         u64 private_failure;
2116         struct io_failure_record *failrec;
2117         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2118         struct extent_state *state;
2119         int num_copies;
2120         int ret;
2121
2122         private = 0;
2123         ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
2124                                 (u64)-1, 1, EXTENT_DIRTY, 0);
2125         if (!ret)
2126                 return 0;
2127
2128         ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
2129                                 &private_failure);
2130         if (ret)
2131                 return 0;
2132
2133         failrec = (struct io_failure_record *)(unsigned long) private_failure;
2134         BUG_ON(!failrec->this_mirror);
2135
2136         if (failrec->in_validation) {
2137                 /* there was no real error, just free the record */
2138                 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
2139                          failrec->start);
2140                 goto out;
2141         }
2142         if (fs_info->sb->s_flags & MS_RDONLY)
2143                 goto out;
2144
2145         spin_lock(&BTRFS_I(inode)->io_tree.lock);
2146         state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
2147                                             failrec->start,
2148                                             EXTENT_LOCKED);
2149         spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2150
2151         if (state && state->start <= failrec->start &&
2152             state->end >= failrec->start + failrec->len - 1) {
2153                 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2154                                               failrec->len);
2155                 if (num_copies > 1)  {
2156                         repair_io_failure(inode, start, failrec->len,
2157                                           failrec->logical, page,
2158                                           pg_offset, failrec->failed_mirror);
2159                 }
2160         }
2161
2162 out:
2163         free_io_failure(inode, failrec);
2164
2165         return 0;
2166 }
2167
2168 /*
2169  * Can be called when
2170  * - hold extent lock
2171  * - under ordered extent
2172  * - the inode is freeing
2173  */
2174 void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
2175 {
2176         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2177         struct io_failure_record *failrec;
2178         struct extent_state *state, *next;
2179
2180         if (RB_EMPTY_ROOT(&failure_tree->state))
2181                 return;
2182
2183         spin_lock(&failure_tree->lock);
2184         state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2185         while (state) {
2186                 if (state->start > end)
2187                         break;
2188
2189                 ASSERT(state->end <= end);
2190
2191                 next = next_state(state);
2192
2193                 failrec = (struct io_failure_record *)(unsigned long)state->private;
2194                 free_extent_state(state);
2195                 kfree(failrec);
2196
2197                 state = next;
2198         }
2199         spin_unlock(&failure_tree->lock);
2200 }
2201
2202 int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2203                                 struct io_failure_record **failrec_ret)
2204 {
2205         struct io_failure_record *failrec;
2206         u64 private;
2207         struct extent_map *em;
2208         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2209         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2210         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2211         int ret;
2212         u64 logical;
2213
2214         ret = get_state_private(failure_tree, start, &private);
2215         if (ret) {
2216                 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2217                 if (!failrec)
2218                         return -ENOMEM;
2219
2220                 failrec->start = start;
2221                 failrec->len = end - start + 1;
2222                 failrec->this_mirror = 0;
2223                 failrec->bio_flags = 0;
2224                 failrec->in_validation = 0;
2225
2226                 read_lock(&em_tree->lock);
2227                 em = lookup_extent_mapping(em_tree, start, failrec->len);
2228                 if (!em) {
2229                         read_unlock(&em_tree->lock);
2230                         kfree(failrec);
2231                         return -EIO;
2232                 }
2233
2234                 if (em->start > start || em->start + em->len <= start) {
2235                         free_extent_map(em);
2236                         em = NULL;
2237                 }
2238                 read_unlock(&em_tree->lock);
2239                 if (!em) {
2240                         kfree(failrec);
2241                         return -EIO;
2242                 }
2243
2244                 logical = start - em->start;
2245                 logical = em->block_start + logical;
2246                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2247                         logical = em->block_start;
2248                         failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2249                         extent_set_compress_type(&failrec->bio_flags,
2250                                                  em->compress_type);
2251                 }
2252
2253                 pr_debug("Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu\n",
2254                          logical, start, failrec->len);
2255
2256                 failrec->logical = logical;
2257                 free_extent_map(em);
2258
2259                 /* set the bits in the private failure tree */
2260                 ret = set_extent_bits(failure_tree, start, end,
2261                                         EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2262                 if (ret >= 0)
2263                         ret = set_state_private(failure_tree, start,
2264                                                 (u64)(unsigned long)failrec);
2265                 /* set the bits in the inode's tree */
2266                 if (ret >= 0)
2267                         ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2268                                                 GFP_NOFS);
2269                 if (ret < 0) {
2270                         kfree(failrec);
2271                         return ret;
2272                 }
2273         } else {
2274                 failrec = (struct io_failure_record *)(unsigned long)private;
2275                 pr_debug("Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d\n",
2276                          failrec->logical, failrec->start, failrec->len,
2277                          failrec->in_validation);
2278                 /*
2279                  * when data can be on disk more than twice, add to failrec here
2280                  * (e.g. with a list for failed_mirror) to make
2281                  * clean_io_failure() clean all those errors at once.
2282                  */
2283         }
2284
2285         *failrec_ret = failrec;
2286
2287         return 0;
2288 }
2289
2290 int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
2291                            struct io_failure_record *failrec, int failed_mirror)
2292 {
2293         int num_copies;
2294
2295         num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
2296                                       failrec->logical, failrec->len);
2297         if (num_copies == 1) {
2298                 /*
2299                  * we only have a single copy of the data, so don't bother with
2300                  * all the retry and error correction code that follows. no
2301                  * matter what the error is, it is very likely to persist.
2302                  */
2303                 pr_debug("Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
2304                          num_copies, failrec->this_mirror, failed_mirror);
2305                 return 0;
2306         }
2307
2308         /*
2309          * there are two premises:
2310          *      a) deliver good data to the caller
2311          *      b) correct the bad sectors on disk
2312          */
2313         if (failed_bio->bi_vcnt > 1) {
2314                 /*
2315                  * to fulfill b), we need to know the exact failing sectors, as
2316                  * we don't want to rewrite any more than the failed ones. thus,
2317                  * we need separate read requests for the failed bio
2318                  *
2319                  * if the following BUG_ON triggers, our validation request got
2320                  * merged. we need separate requests for our algorithm to work.
2321                  */
2322                 BUG_ON(failrec->in_validation);
2323                 failrec->in_validation = 1;
2324                 failrec->this_mirror = failed_mirror;
2325         } else {
2326                 /*
2327                  * we're ready to fulfill a) and b) alongside. get a good copy
2328                  * of the failed sector and if we succeed, we have setup
2329                  * everything for repair_io_failure to do the rest for us.
2330                  */
2331                 if (failrec->in_validation) {
2332                         BUG_ON(failrec->this_mirror != failed_mirror);
2333                         failrec->in_validation = 0;
2334                         failrec->this_mirror = 0;
2335                 }
2336                 failrec->failed_mirror = failed_mirror;
2337                 failrec->this_mirror++;
2338                 if (failrec->this_mirror == failed_mirror)
2339                         failrec->this_mirror++;
2340         }
2341
2342         if (failrec->this_mirror > num_copies) {
2343                 pr_debug("Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
2344                          num_copies, failrec->this_mirror, failed_mirror);
2345                 return 0;
2346         }
2347
2348         return 1;
2349 }
2350
2351
2352 struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
2353                                     struct io_failure_record *failrec,
2354                                     struct page *page, int pg_offset, int icsum,
2355                                     bio_end_io_t *endio_func, void *data)
2356 {
2357         struct bio *bio;
2358         struct btrfs_io_bio *btrfs_failed_bio;
2359         struct btrfs_io_bio *btrfs_bio;
2360
2361         bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2362         if (!bio)
2363                 return NULL;
2364
2365         bio->bi_end_io = endio_func;
2366         bio->bi_iter.bi_sector = failrec->logical >> 9;
2367         bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2368         bio->bi_iter.bi_size = 0;
2369         bio->bi_private = data;
2370
2371         btrfs_failed_bio = btrfs_io_bio(failed_bio);
2372         if (btrfs_failed_bio->csum) {
2373                 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2374                 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2375
2376                 btrfs_bio = btrfs_io_bio(bio);
2377                 btrfs_bio->csum = btrfs_bio->csum_inline;
2378                 icsum *= csum_size;
2379                 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
2380                        csum_size);
2381         }
2382
2383         bio_add_page(bio, page, failrec->len, pg_offset);
2384
2385         return bio;
2386 }
2387
2388 /*
2389  * this is a generic handler for readpage errors (default
2390  * readpage_io_failed_hook). if other copies exist, read those and write back
2391  * good data to the failed position. does not investigate in remapping the
2392  * failed extent elsewhere, hoping the device will be smart enough to do this as
2393  * needed
2394  */
2395
2396 static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2397                               struct page *page, u64 start, u64 end,
2398                               int failed_mirror)
2399 {
2400         struct io_failure_record *failrec;
2401         struct inode *inode = page->mapping->host;
2402         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2403         struct bio *bio;
2404         int read_mode;
2405         int ret;
2406
2407         BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2408
2409         ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
2410         if (ret)
2411                 return ret;
2412
2413         ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
2414         if (!ret) {
2415                 free_io_failure(inode, failrec);
2416                 return -EIO;
2417         }
2418
2419         if (failed_bio->bi_vcnt > 1)
2420                 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2421         else
2422                 read_mode = READ_SYNC;
2423
2424         phy_offset >>= inode->i_sb->s_blocksize_bits;
2425         bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
2426                                       start - page_offset(page),
2427                                       (int)phy_offset, failed_bio->bi_end_io,
2428                                       NULL);
2429         if (!bio) {
2430                 free_io_failure(inode, failrec);
2431                 return -EIO;
2432         }
2433
2434         pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n",
2435                  read_mode, failrec->this_mirror, failrec->in_validation);
2436
2437         ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2438                                          failrec->this_mirror,
2439                                          failrec->bio_flags, 0);
2440         if (ret) {
2441                 free_io_failure(inode, failrec);
2442                 bio_put(bio);
2443         }
2444
2445         return ret;
2446 }
2447
2448 /* lots and lots of room for performance fixes in the end_bio funcs */
2449
2450 int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2451 {
2452         int uptodate = (err == 0);
2453         struct extent_io_tree *tree;
2454         int ret = 0;
2455
2456         tree = &BTRFS_I(page->mapping->host)->io_tree;
2457
2458         if (tree->ops && tree->ops->writepage_end_io_hook) {
2459                 ret = tree->ops->writepage_end_io_hook(page, start,
2460                                                end, NULL, uptodate);
2461                 if (ret)
2462                         uptodate = 0;
2463         }
2464
2465         if (!uptodate) {
2466                 ClearPageUptodate(page);
2467                 SetPageError(page);
2468                 ret = ret < 0 ? ret : -EIO;
2469                 mapping_set_error(page->mapping, ret);
2470         }
2471         return 0;
2472 }
2473
2474 /*
2475  * after a writepage IO is done, we need to:
2476  * clear the uptodate bits on error
2477  * clear the writeback bits in the extent tree for this IO
2478  * end_page_writeback if the page has no more pending IO
2479  *
2480  * Scheduling is not allowed, so the extent state tree is expected
2481  * to have one and only one object corresponding to this IO.
2482  */
2483 static void end_bio_extent_writepage(struct bio *bio, int err)
2484 {
2485         struct bio_vec *bvec;
2486         u64 start;
2487         u64 end;
2488         int i;
2489
2490         bio_for_each_segment_all(bvec, bio, i) {
2491                 struct page *page = bvec->bv_page;
2492
2493                 /* We always issue full-page reads, but if some block
2494                  * in a page fails to read, blk_update_request() will
2495                  * advance bv_offset and adjust bv_len to compensate.
2496                  * Print a warning for nonzero offsets, and an error
2497                  * if they don't add up to a full page.  */
2498                 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
2499                         if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
2500                                 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2501                                    "partial page write in btrfs with offset %u and length %u",
2502                                         bvec->bv_offset, bvec->bv_len);
2503                         else
2504                                 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2505                                    "incomplete page write in btrfs with offset %u and "
2506                                    "length %u",
2507                                         bvec->bv_offset, bvec->bv_len);
2508                 }
2509
2510                 start = page_offset(page);
2511                 end = start + bvec->bv_offset + bvec->bv_len - 1;
2512
2513                 if (end_extent_writepage(page, err, start, end))
2514                         continue;
2515
2516                 end_page_writeback(page);
2517         }
2518
2519         bio_put(bio);
2520 }
2521
2522 static void
2523 endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2524                               int uptodate)
2525 {
2526         struct extent_state *cached = NULL;
2527         u64 end = start + len - 1;
2528
2529         if (uptodate && tree->track_uptodate)
2530                 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2531         unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2532 }
2533
2534 /*
2535  * after a readpage IO is done, we need to:
2536  * clear the uptodate bits on error
2537  * set the uptodate bits if things worked
2538  * set the page up to date if all extents in the tree are uptodate
2539  * clear the lock bit in the extent tree
2540  * unlock the page if there are no other extents locked for it
2541  *
2542  * Scheduling is not allowed, so the extent state tree is expected
2543  * to have one and only one object corresponding to this IO.
2544  */
2545 static void end_bio_extent_readpage(struct bio *bio, int err)
2546 {
2547         struct bio_vec *bvec;
2548         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2549         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2550         struct extent_io_tree *tree;
2551         u64 offset = 0;
2552         u64 start;
2553         u64 end;
2554         u64 len;
2555         u64 extent_start = 0;
2556         u64 extent_len = 0;
2557         int mirror;
2558         int ret;
2559         int i;
2560
2561         if (err)
2562                 uptodate = 0;
2563
2564         bio_for_each_segment_all(bvec, bio, i) {
2565                 struct page *page = bvec->bv_page;
2566                 struct inode *inode = page->mapping->host;
2567
2568                 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2569                          "mirror=%u\n", (u64)bio->bi_iter.bi_sector, err,
2570                          io_bio->mirror_num);
2571                 tree = &BTRFS_I(inode)->io_tree;
2572
2573                 /* We always issue full-page reads, but if some block
2574                  * in a page fails to read, blk_update_request() will
2575                  * advance bv_offset and adjust bv_len to compensate.
2576                  * Print a warning for nonzero offsets, and an error
2577                  * if they don't add up to a full page.  */
2578                 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
2579                         if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
2580                                 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2581                                    "partial page read in btrfs with offset %u and length %u",
2582                                         bvec->bv_offset, bvec->bv_len);
2583                         else
2584                                 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2585                                    "incomplete page read in btrfs with offset %u and "
2586                                    "length %u",
2587                                         bvec->bv_offset, bvec->bv_len);
2588                 }
2589
2590                 start = page_offset(page);
2591                 end = start + bvec->bv_offset + bvec->bv_len - 1;
2592                 len = bvec->bv_len;
2593
2594                 mirror = io_bio->mirror_num;
2595                 if (likely(uptodate && tree->ops &&
2596                            tree->ops->readpage_end_io_hook)) {
2597                         ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2598                                                               page, start, end,
2599                                                               mirror);
2600                         if (ret)
2601                                 uptodate = 0;
2602                         else
2603                                 clean_io_failure(inode, start, page, 0);
2604                 }
2605
2606                 if (likely(uptodate))
2607                         goto readpage_ok;
2608
2609                 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2610                         ret = tree->ops->readpage_io_failed_hook(page, mirror);
2611                         if (!ret && !err &&
2612                             test_bit(BIO_UPTODATE, &bio->bi_flags))
2613                                 uptodate = 1;
2614                 } else {
2615                         /*
2616                          * The generic bio_readpage_error handles errors the
2617                          * following way: If possible, new read requests are
2618                          * created and submitted and will end up in
2619                          * end_bio_extent_readpage as well (if we're lucky, not
2620                          * in the !uptodate case). In that case it returns 0 and
2621                          * we just go on with the next page in our bio. If it
2622                          * can't handle the error it will return -EIO and we
2623                          * remain responsible for that page.
2624                          */
2625                         ret = bio_readpage_error(bio, offset, page, start, end,
2626                                                  mirror);
2627                         if (ret == 0) {
2628                                 uptodate =
2629                                         test_bit(BIO_UPTODATE, &bio->bi_flags);
2630                                 if (err)
2631                                         uptodate = 0;
2632                                 offset += len;
2633                                 continue;
2634                         }
2635                 }
2636 readpage_ok:
2637                 if (likely(uptodate)) {
2638                         loff_t i_size = i_size_read(inode);
2639                         pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2640                         unsigned off;
2641
2642                         /* Zero out the end if this page straddles i_size */
2643                         off = i_size & (PAGE_CACHE_SIZE-1);
2644                         if (page->index == end_index && off)
2645                                 zero_user_segment(page, off, PAGE_CACHE_SIZE);
2646                         SetPageUptodate(page);
2647                 } else {
2648                         ClearPageUptodate(page);
2649                         SetPageError(page);
2650                 }
2651                 unlock_page(page);
2652                 offset += len;
2653
2654                 if (unlikely(!uptodate)) {
2655                         if (extent_len) {
2656                                 endio_readpage_release_extent(tree,
2657                                                               extent_start,
2658                                                               extent_len, 1);
2659                                 extent_start = 0;
2660                                 extent_len = 0;
2661                         }
2662                         endio_readpage_release_extent(tree, start,
2663                                                       end - start + 1, 0);
2664                 } else if (!extent_len) {
2665                         extent_start = start;
2666                         extent_len = end + 1 - start;
2667                 } else if (extent_start + extent_len == start) {
2668                         extent_len += end + 1 - start;
2669                 } else {
2670                         endio_readpage_release_extent(tree, extent_start,
2671                                                       extent_len, uptodate);
2672                         extent_start = start;
2673                         extent_len = end + 1 - start;
2674                 }
2675         }
2676
2677         if (extent_len)
2678                 endio_readpage_release_extent(tree, extent_start, extent_len,
2679                                               uptodate);
2680         if (io_bio->end_io)
2681                 io_bio->end_io(io_bio, err);
2682         bio_put(bio);
2683 }
2684
2685 /*
2686  * this allocates from the btrfs_bioset.  We're returning a bio right now
2687  * but you can call btrfs_io_bio for the appropriate container_of magic
2688  */
2689 struct bio *
2690 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2691                 gfp_t gfp_flags)
2692 {
2693         struct btrfs_io_bio *btrfs_bio;
2694         struct bio *bio;
2695
2696         bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
2697
2698         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2699                 while (!bio && (nr_vecs /= 2)) {
2700                         bio = bio_alloc_bioset(gfp_flags,
2701                                                nr_vecs, btrfs_bioset);
2702                 }
2703         }
2704
2705         if (bio) {
2706                 bio->bi_bdev = bdev;
2707                 bio->bi_iter.bi_sector = first_sector;
2708                 btrfs_bio = btrfs_io_bio(bio);
2709                 btrfs_bio->csum = NULL;
2710                 btrfs_bio->csum_allocated = NULL;
2711                 btrfs_bio->end_io = NULL;
2712         }
2713         return bio;
2714 }
2715
2716 struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
2717 {
2718         struct btrfs_io_bio *btrfs_bio;
2719         struct bio *new;
2720
2721         new = bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
2722         if (new) {
2723                 btrfs_bio = btrfs_io_bio(new);
2724                 btrfs_bio->csum = NULL;
2725                 btrfs_bio->csum_allocated = NULL;
2726                 btrfs_bio->end_io = NULL;
2727         }
2728         return new;
2729 }
2730
2731 /* this also allocates from the btrfs_bioset */
2732 struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
2733 {
2734         struct btrfs_io_bio *btrfs_bio;
2735         struct bio *bio;
2736
2737         bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
2738         if (bio) {
2739                 btrfs_bio = btrfs_io_bio(bio);
2740                 btrfs_bio->csum = NULL;
2741                 btrfs_bio->csum_allocated = NULL;
2742                 btrfs_bio->end_io = NULL;
2743         }
2744         return bio;
2745 }
2746
2747
2748 static int __must_check submit_one_bio(int rw, struct bio *bio,
2749                                        int mirror_num, unsigned long bio_flags)
2750 {
2751         int ret = 0;
2752         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2753         struct page *page = bvec->bv_page;
2754         struct extent_io_tree *tree = bio->bi_private;
2755         u64 start;
2756
2757         start = page_offset(page) + bvec->bv_offset;
2758
2759         bio->bi_private = NULL;
2760
2761         bio_get(bio);
2762
2763         if (tree->ops && tree->ops->submit_bio_hook)
2764                 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2765                                            mirror_num, bio_flags, start);
2766         else
2767                 btrfsic_submit_bio(rw, bio);
2768
2769         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2770                 ret = -EOPNOTSUPP;
2771         bio_put(bio);
2772         return ret;
2773 }
2774
2775 static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
2776                      unsigned long offset, size_t size, struct bio *bio,
2777                      unsigned long bio_flags)
2778 {
2779         int ret = 0;
2780         if (tree->ops && tree->ops->merge_bio_hook)
2781                 ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
2782                                                 bio_flags);
2783         BUG_ON(ret < 0);
2784         return ret;
2785
2786 }
2787
2788 static int submit_extent_page(int rw, struct extent_io_tree *tree,
2789                               struct page *page, sector_t sector,
2790                               size_t size, unsigned long offset,
2791                               struct block_device *bdev,
2792                               struct bio **bio_ret,
2793                               unsigned long max_pages,
2794                               bio_end_io_t end_io_func,
2795                               int mirror_num,
2796                               unsigned long prev_bio_flags,
2797                               unsigned long bio_flags)
2798 {
2799         int ret = 0;
2800         struct bio *bio;
2801         int nr;
2802         int contig = 0;
2803         int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2804         int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2805         size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2806
2807         if (bio_ret && *bio_ret) {
2808                 bio = *bio_ret;
2809                 if (old_compressed)
2810                         contig = bio->bi_iter.bi_sector == sector;
2811                 else
2812                         contig = bio_end_sector(bio) == sector;
2813
2814                 if (prev_bio_flags != bio_flags || !contig ||
2815                     merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
2816                     bio_add_page(bio, page, page_size, offset) < page_size) {
2817                         ret = submit_one_bio(rw, bio, mirror_num,
2818                                              prev_bio_flags);
2819                         if (ret < 0)
2820                                 return ret;
2821                         bio = NULL;
2822                 } else {
2823                         return 0;
2824                 }
2825         }
2826         if (this_compressed)
2827                 nr = BIO_MAX_PAGES;
2828         else
2829                 nr = bio_get_nr_vecs(bdev);
2830
2831         bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2832         if (!bio)
2833                 return -ENOMEM;
2834
2835         bio_add_page(bio, page, page_size, offset);
2836         bio->bi_end_io = end_io_func;
2837         bio->bi_private = tree;
2838
2839         if (bio_ret)
2840                 *bio_ret = bio;
2841         else
2842                 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2843
2844         return ret;
2845 }
2846
2847 static void attach_extent_buffer_page(struct extent_buffer *eb,
2848                                       struct page *page)
2849 {
2850         if (!PagePrivate(page)) {
2851                 SetPagePrivate(page);
2852                 page_cache_get(page);
2853                 set_page_private(page, (unsigned long)eb);
2854         } else {
2855                 WARN_ON(page->private != (unsigned long)eb);
2856         }
2857 }
2858
2859 void set_page_extent_mapped(struct page *page)
2860 {
2861         if (!PagePrivate(page)) {
2862                 SetPagePrivate(page);
2863                 page_cache_get(page);
2864                 set_page_private(page, EXTENT_PAGE_PRIVATE);
2865         }
2866 }
2867
2868 static struct extent_map *
2869 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2870                  u64 start, u64 len, get_extent_t *get_extent,
2871                  struct extent_map **em_cached)
2872 {
2873         struct extent_map *em;
2874
2875         if (em_cached && *em_cached) {
2876                 em = *em_cached;
2877                 if (extent_map_in_tree(em) && start >= em->start &&
2878                     start < extent_map_end(em)) {
2879                         atomic_inc(&em->refs);
2880                         return em;
2881                 }
2882
2883                 free_extent_map(em);
2884                 *em_cached = NULL;
2885         }
2886
2887         em = get_extent(inode, page, pg_offset, start, len, 0);
2888         if (em_cached && !IS_ERR_OR_NULL(em)) {
2889                 BUG_ON(*em_cached);
2890                 atomic_inc(&em->refs);
2891                 *em_cached = em;
2892         }
2893         return em;
2894 }
2895 /*
2896  * basic readpage implementation.  Locked extent state structs are inserted
2897  * into the tree that are removed when the IO is done (by the end_io
2898  * handlers)
2899  * XXX JDM: This needs looking at to ensure proper page locking
2900  */
2901 static int __do_readpage(struct extent_io_tree *tree,
2902                          struct page *page,
2903                          get_extent_t *get_extent,
2904                          struct extent_map **em_cached,
2905                          struct bio **bio, int mirror_num,
2906                          unsigned long *bio_flags, int rw)
2907 {
2908         struct inode *inode = page->mapping->host;
2909         u64 start = page_offset(page);
2910         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2911         u64 end;
2912         u64 cur = start;
2913         u64 extent_offset;
2914         u64 last_byte = i_size_read(inode);
2915         u64 block_start;
2916         u64 cur_end;
2917         sector_t sector;
2918         struct extent_map *em;
2919         struct block_device *bdev;
2920         int ret;
2921         int nr = 0;
2922         int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
2923         size_t pg_offset = 0;
2924         size_t iosize;
2925         size_t disk_io_size;
2926         size_t blocksize = inode->i_sb->s_blocksize;
2927         unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
2928
2929         set_page_extent_mapped(page);
2930
2931         end = page_end;
2932         if (!PageUptodate(page)) {
2933                 if (cleancache_get_page(page) == 0) {
2934                         BUG_ON(blocksize != PAGE_SIZE);
2935                         unlock_extent(tree, start, end);
2936                         goto out;
2937                 }
2938         }
2939
2940         if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2941                 char *userpage;
2942                 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2943
2944                 if (zero_offset) {
2945                         iosize = PAGE_CACHE_SIZE - zero_offset;
2946                         userpage = kmap_atomic(page);
2947                         memset(userpage + zero_offset, 0, iosize);
2948                         flush_dcache_page(page);
2949                         kunmap_atomic(userpage);
2950                 }
2951         }
2952         while (cur <= end) {
2953                 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2954
2955                 if (cur >= last_byte) {
2956                         char *userpage;
2957                         struct extent_state *cached = NULL;
2958
2959                         iosize = PAGE_CACHE_SIZE - pg_offset;
2960                         userpage = kmap_atomic(page);
2961                         memset(userpage + pg_offset, 0, iosize);
2962                         flush_dcache_page(page);
2963                         kunmap_atomic(userpage);
2964                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2965                                             &cached, GFP_NOFS);
2966                         if (!parent_locked)
2967                                 unlock_extent_cached(tree, cur,
2968                                                      cur + iosize - 1,
2969                                                      &cached, GFP_NOFS);
2970                         break;
2971                 }
2972                 em = __get_extent_map(inode, page, pg_offset, cur,
2973                                       end - cur + 1, get_extent, em_cached);
2974                 if (IS_ERR_OR_NULL(em)) {
2975                         SetPageError(page);
2976                         if (!parent_locked)
2977                                 unlock_extent(tree, cur, end);
2978                         break;
2979                 }
2980                 extent_offset = cur - em->start;
2981                 BUG_ON(extent_map_end(em) <= cur);
2982                 BUG_ON(end < cur);
2983
2984                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2985                         this_bio_flag |= EXTENT_BIO_COMPRESSED;
2986                         extent_set_compress_type(&this_bio_flag,
2987                                                  em->compress_type);
2988                 }
2989
2990                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2991                 cur_end = min(extent_map_end(em) - 1, end);
2992                 iosize = ALIGN(iosize, blocksize);
2993                 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2994                         disk_io_size = em->block_len;
2995                         sector = em->block_start >> 9;
2996                 } else {
2997                         sector = (em->block_start + extent_offset) >> 9;
2998                         disk_io_size = iosize;
2999                 }
3000                 bdev = em->bdev;
3001                 block_start = em->block_start;
3002                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3003                         block_start = EXTENT_MAP_HOLE;
3004                 free_extent_map(em);
3005                 em = NULL;
3006
3007                 /* we've found a hole, just zero and go on */
3008                 if (block_start == EXTENT_MAP_HOLE) {
3009                         char *userpage;
3010                         struct extent_state *cached = NULL;
3011
3012                         userpage = kmap_atomic(page);
3013                         memset(userpage + pg_offset, 0, iosize);
3014                         flush_dcache_page(page);
3015                         kunmap_atomic(userpage);
3016
3017                         set_extent_uptodate(tree, cur, cur + iosize - 1,
3018                                             &cached, GFP_NOFS);
3019                         unlock_extent_cached(tree, cur, cur + iosize - 1,
3020                                              &cached, GFP_NOFS);
3021                         cur = cur + iosize;
3022                         pg_offset += iosize;
3023                         continue;
3024                 }
3025                 /* the get_extent function already copied into the page */
3026                 if (test_range_bit(tree, cur, cur_end,
3027                                    EXTENT_UPTODATE, 1, NULL)) {
3028                         check_page_uptodate(tree, page);
3029                         if (!parent_locked)
3030                                 unlock_extent(tree, cur, cur + iosize - 1);
3031                         cur = cur + iosize;
3032                         pg_offset += iosize;
3033                         continue;
3034                 }
3035                 /* we have an inline extent but it didn't get marked up
3036                  * to date.  Error out
3037                  */
3038                 if (block_start == EXTENT_MAP_INLINE) {
3039                         SetPageError(page);
3040                         if (!parent_locked)
3041                                 unlock_extent(tree, cur, cur + iosize - 1);
3042                         cur = cur + iosize;
3043                         pg_offset += iosize;
3044                         continue;
3045                 }
3046
3047                 pnr -= page->index;
3048                 ret = submit_extent_page(rw, tree, page,
3049                                          sector, disk_io_size, pg_offset,
3050                                          bdev, bio, pnr,
3051                                          end_bio_extent_readpage, mirror_num,
3052                                          *bio_flags,
3053                                          this_bio_flag);
3054                 if (!ret) {
3055                         nr++;
3056                         *bio_flags = this_bio_flag;
3057                 } else {
3058                         SetPageError(page);
3059                         if (!parent_locked)
3060                                 unlock_extent(tree, cur, cur + iosize - 1);
3061                 }
3062                 cur = cur + iosize;
3063                 pg_offset += iosize;
3064         }
3065 out:
3066         if (!nr) {
3067                 if (!PageError(page))
3068                         SetPageUptodate(page);
3069                 unlock_page(page);
3070         }
3071         return 0;
3072 }
3073
3074 static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3075                                              struct page *pages[], int nr_pages,
3076                                              u64 start, u64 end,
3077                                              get_extent_t *get_extent,
3078                                              struct extent_map **em_cached,
3079                                              struct bio **bio, int mirror_num,
3080                                              unsigned long *bio_flags, int rw)
3081 {
3082         struct inode *inode;
3083         struct btrfs_ordered_extent *ordered;
3084         int index;
3085
3086         inode = pages[0]->mapping->host;
3087         while (1) {
3088                 lock_extent(tree, start, end);
3089                 ordered = btrfs_lookup_ordered_range(inode, start,
3090                                                      end - start + 1);
3091                 if (!ordered)
3092                         break;
3093                 unlock_extent(tree, start, end);
3094                 btrfs_start_ordered_extent(inode, ordered, 1);
3095                 btrfs_put_ordered_extent(ordered);
3096         }
3097
3098         for (index = 0; index < nr_pages; index++) {
3099                 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
3100                               mirror_num, bio_flags, rw);
3101                 page_cache_release(pages[index]);
3102         }
3103 }
3104
3105 static void __extent_readpages(struct extent_io_tree *tree,
3106                                struct page *pages[],
3107                                int nr_pages, get_extent_t *get_extent,
3108                                struct extent_map **em_cached,
3109                                struct bio **bio, int mirror_num,
3110                                unsigned long *bio_flags, int rw)
3111 {
3112         u64 start = 0;
3113         u64 end = 0;
3114         u64 page_start;
3115         int index;
3116         int first_index = 0;
3117
3118         for (index = 0; index < nr_pages; index++) {
3119                 page_start = page_offset(pages[index]);
3120                 if (!end) {
3121                         start = page_start;
3122                         end = start + PAGE_CACHE_SIZE - 1;
3123                         first_index = index;
3124                 } else if (end + 1 == page_start) {
3125                         end += PAGE_CACHE_SIZE;
3126                 } else {
3127                         __do_contiguous_readpages(tree, &pages[first_index],
3128                                                   index - first_index, start,
3129                                                   end, get_extent, em_cached,
3130                                                   bio, mirror_num, bio_flags,
3131                                                   rw);
3132                         start = page_start;
3133                         end = start + PAGE_CACHE_SIZE - 1;
3134                         first_index = index;
3135                 }
3136         }
3137
3138         if (end)
3139                 __do_contiguous_readpages(tree, &pages[first_index],
3140                                           index - first_index, start,
3141                                           end, get_extent, em_cached, bio,
3142                                           mirror_num, bio_flags, rw);
3143 }
3144
3145 static int __extent_read_full_page(struct extent_io_tree *tree,
3146                                    struct page *page,
3147                                    get_extent_t *get_extent,
3148                                    struct bio **bio, int mirror_num,
3149                                    unsigned long *bio_flags, int rw)
3150 {
3151         struct inode *inode = page->mapping->host;
3152         struct btrfs_ordered_extent *ordered;
3153         u64 start = page_offset(page);
3154         u64 end = start + PAGE_CACHE_SIZE - 1;
3155         int ret;
3156
3157         while (1) {
3158                 lock_extent(tree, start, end);
3159                 ordered = btrfs_lookup_ordered_extent(inode, start);
3160                 if (!ordered)
3161                         break;
3162                 unlock_extent(tree, start, end);
3163                 btrfs_start_ordered_extent(inode, ordered, 1);
3164                 btrfs_put_ordered_extent(ordered);
3165         }
3166
3167         ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3168                             bio_flags, rw);
3169         return ret;
3170 }
3171
3172 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
3173                             get_extent_t *get_extent, int mirror_num)
3174 {
3175         struct bio *bio = NULL;
3176         unsigned long bio_flags = 0;
3177         int ret;
3178
3179         ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
3180                                       &bio_flags, READ);
3181         if (bio)
3182                 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3183         return ret;
3184 }
3185
3186 int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
3187                                  get_extent_t *get_extent, int mirror_num)
3188 {
3189         struct bio *bio = NULL;
3190         unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED;
3191         int ret;
3192
3193         ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
3194                                       &bio_flags, READ);
3195         if (bio)
3196                 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3197         return ret;
3198 }
3199
3200 static noinline void update_nr_written(struct page *page,
3201                                       struct writeback_control *wbc,
3202                                       unsigned long nr_written)
3203 {
3204         wbc->nr_to_write -= nr_written;
3205         if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
3206             wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
3207                 page->mapping->writeback_index = page->index + nr_written;
3208 }
3209
3210 /*
3211  * helper for __extent_writepage, doing all of the delayed allocation setup.
3212  *
3213  * This returns 1 if our fill_delalloc function did all the work required
3214  * to write the page (copy into inline extent).  In this case the IO has
3215  * been started and the page is already unlocked.
3216  *
3217  * This returns 0 if all went well (page still locked)
3218  * This returns < 0 if there were errors (page still locked)
3219  */
3220 static noinline_for_stack int writepage_delalloc(struct inode *inode,
3221                               struct page *page, struct writeback_control *wbc,
3222                               struct extent_page_data *epd,
3223                               u64 delalloc_start,
3224                               unsigned long *nr_written)
3225 {
3226         struct extent_io_tree *tree = epd->tree;
3227         u64 page_end = delalloc_start + PAGE_CACHE_SIZE - 1;
3228         u64 nr_delalloc;
3229         u64 delalloc_to_write = 0;
3230         u64 delalloc_end = 0;
3231         int ret;
3232         int page_started = 0;
3233
3234         if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
3235                 return 0;
3236
3237         while (delalloc_end < page_end) {
3238                 nr_delalloc = find_lock_delalloc_range(inode, tree,
3239                                                page,
3240                                                &delalloc_start,
3241                                                &delalloc_end,
3242                                                128 * 1024 * 1024);
3243                 if (nr_delalloc == 0) {
3244                         delalloc_start = delalloc_end + 1;
3245                         continue;
3246                 }
3247                 ret = tree->ops->fill_delalloc(inode, page,
3248                                                delalloc_start,
3249                                                delalloc_end,
3250                                                &page_started,
3251                                                nr_written);
3252                 /* File system has been set read-only */
3253                 if (ret) {
3254                         SetPageError(page);
3255                         /* fill_delalloc should be return < 0 for error
3256                          * but just in case, we use > 0 here meaning the
3257                          * IO is started, so we don't want to return > 0
3258                          * unless things are going well.
3259                          */
3260                         ret = ret < 0 ? ret : -EIO;
3261                         goto done;
3262                 }
3263                 /*
3264                  * delalloc_end is already one less than the total
3265                  * length, so we don't subtract one from
3266                  * PAGE_CACHE_SIZE
3267                  */
3268                 delalloc_to_write += (delalloc_end - delalloc_start +
3269                                       PAGE_CACHE_SIZE) >>
3270                                       PAGE_CACHE_SHIFT;
3271                 delalloc_start = delalloc_end + 1;
3272         }
3273         if (wbc->nr_to_write < delalloc_to_write) {
3274                 int thresh = 8192;
3275
3276                 if (delalloc_to_write < thresh * 2)
3277                         thresh = delalloc_to_write;
3278                 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3279                                          thresh);
3280         }
3281
3282         /* did the fill delalloc function already unlock and start
3283          * the IO?
3284          */
3285         if (page_started) {
3286                 /*
3287                  * we've unlocked the page, so we can't update
3288                  * the mapping's writeback index, just update
3289                  * nr_to_write.
3290                  */
3291                 wbc->nr_to_write -= *nr_written;
3292                 return 1;
3293         }
3294
3295         ret = 0;
3296
3297 done:
3298         return ret;
3299 }
3300
3301 /*
3302  * helper for __extent_writepage.  This calls the writepage start hooks,
3303  * and does the loop to map the page into extents and bios.
3304  *
3305  * We return 1 if the IO is started and the page is unlocked,
3306  * 0 if all went well (page still locked)
3307  * < 0 if there were errors (page still locked)
3308  */
3309 static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3310                                  struct page *page,
3311                                  struct writeback_control *wbc,
3312                                  struct extent_page_data *epd,
3313                                  loff_t i_size,
3314                                  unsigned long nr_written,
3315                                  int write_flags, int *nr_ret)
3316 {
3317         struct extent_io_tree *tree = epd->tree;
3318         u64 start = page_offset(page);
3319         u64 page_end = start + PAGE_CACHE_SIZE - 1;
3320         u64 end;
3321         u64 cur = start;
3322         u64 extent_offset;
3323         u64 block_start;
3324         u64 iosize;
3325         sector_t sector;
3326         struct extent_state *cached_state = NULL;
3327         struct extent_map *em;
3328         struct block_device *bdev;
3329         size_t pg_offset = 0;
3330         size_t blocksize;
3331         int ret = 0;
3332         int nr = 0;
3333         bool compressed;
3334
3335         if (tree->ops && tree->ops->writepage_start_hook) {
3336                 ret = tree->ops->writepage_start_hook(page, start,
3337                                                       page_end);
3338                 if (ret) {
3339                         /* Fixup worker will requeue */
3340                         if (ret == -EBUSY)
3341                                 wbc->pages_skipped++;
3342                         else
3343                                 redirty_page_for_writepage(wbc, page);
3344
3345                         update_nr_written(page, wbc, nr_written);
3346                         unlock_page(page);
3347                         ret = 1;
3348                         goto done_unlocked;
3349                 }
3350         }
3351
3352         /*
3353          * we don't want to touch the inode after unlocking the page,
3354          * so we update the mapping writeback index now
3355          */
3356         update_nr_written(page, wbc, nr_written + 1);
3357
3358         end = page_end;
3359         if (i_size <= start) {
3360                 if (tree->ops && tree->ops->writepage_end_io_hook)
3361                         tree->ops->writepage_end_io_hook(page, start,
3362                                                          page_end, NULL, 1);
3363                 goto done;
3364         }
3365
3366         blocksize = inode->i_sb->s_blocksize;
3367
3368         while (cur <= end) {
3369                 u64 em_end;
3370                 if (cur >= i_size) {
3371                         if (tree->ops && tree->ops->writepage_end_io_hook)
3372                                 tree->ops->writepage_end_io_hook(page, cur,
3373                                                          page_end, NULL, 1);
3374                         break;
3375                 }
3376                 em = epd->get_extent(inode, page, pg_offset, cur,
3377                                      end - cur + 1, 1);
3378                 if (IS_ERR_OR_NULL(em)) {
3379                         SetPageError(page);
3380                         ret = PTR_ERR_OR_ZERO(em);
3381                         break;
3382                 }
3383
3384                 extent_offset = cur - em->start;
3385                 em_end = extent_map_end(em);
3386                 BUG_ON(em_end <= cur);
3387                 BUG_ON(end < cur);
3388                 iosize = min(em_end - cur, end - cur + 1);
3389                 iosize = ALIGN(iosize, blocksize);
3390                 sector = (em->block_start + extent_offset) >> 9;
3391                 bdev = em->bdev;
3392                 block_start = em->block_start;
3393                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3394                 free_extent_map(em);
3395                 em = NULL;
3396
3397                 /*
3398                  * compressed and inline extents are written through other
3399                  * paths in the FS
3400                  */
3401                 if (compressed || block_start == EXTENT_MAP_HOLE ||
3402                     block_start == EXTENT_MAP_INLINE) {
3403                         /*
3404                          * end_io notification does not happen here for
3405                          * compressed extents
3406                          */
3407                         if (!compressed && tree->ops &&
3408                             tree->ops->writepage_end_io_hook)
3409                                 tree->ops->writepage_end_io_hook(page, cur,
3410                                                          cur + iosize - 1,
3411                                                          NULL, 1);
3412                         else if (compressed) {
3413                                 /* we don't want to end_page_writeback on
3414                                  * a compressed extent.  this happens
3415                                  * elsewhere
3416                                  */
3417                                 nr++;
3418                         }
3419
3420                         cur += iosize;
3421                         pg_offset += iosize;
3422                         continue;
3423                 }
3424
3425                 if (tree->ops && tree->ops->writepage_io_hook) {
3426                         ret = tree->ops->writepage_io_hook(page, cur,
3427                                                 cur + iosize - 1);
3428                 } else {
3429                         ret = 0;
3430                 }
3431                 if (ret) {
3432                         SetPageError(page);
3433                 } else {
3434                         unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1;
3435
3436                         set_range_writeback(tree, cur, cur + iosize - 1);
3437                         if (!PageWriteback(page)) {
3438                                 btrfs_err(BTRFS_I(inode)->root->fs_info,
3439                                            "page %lu not writeback, cur %llu end %llu",
3440                                        page->index, cur, end);
3441                         }
3442
3443                         ret = submit_extent_page(write_flags, tree, page,
3444                                                  sector, iosize, pg_offset,
3445                                                  bdev, &epd->bio, max_nr,
3446                                                  end_bio_extent_writepage,
3447                                                  0, 0, 0);
3448                         if (ret)
3449                                 SetPageError(page);
3450                 }
3451                 cur = cur + iosize;
3452                 pg_offset += iosize;
3453                 nr++;
3454         }
3455 done:
3456         *nr_ret = nr;
3457
3458 done_unlocked:
3459
3460         /* drop our reference on any cached states */
3461         free_extent_state(cached_state);
3462         return ret;
3463 }
3464
3465 /*
3466  * the writepage semantics are similar to regular writepage.  extent
3467  * records are inserted to lock ranges in the tree, and as dirty areas
3468  * are found, they are marked writeback.  Then the lock bits are removed
3469  * and the end_io handler clears the writeback ranges
3470  */
3471 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3472                               void *data)
3473 {
3474         struct inode *inode = page->mapping->host;
3475         struct extent_page_data *epd = data;
3476         u64 start = page_offset(page);
3477         u64 page_end = start + PAGE_CACHE_SIZE - 1;
3478         int ret;
3479         int nr = 0;
3480         size_t pg_offset = 0;
3481         loff_t i_size = i_size_read(inode);
3482         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
3483         int write_flags;
3484         unsigned long nr_written = 0;
3485
3486         if (wbc->sync_mode == WB_SYNC_ALL)
3487                 write_flags = WRITE_SYNC;
3488         else
3489                 write_flags = WRITE;
3490
3491         trace___extent_writepage(page, inode, wbc);
3492
3493         WARN_ON(!PageLocked(page));
3494
3495         ClearPageError(page);
3496
3497         pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
3498         if (page->index > end_index ||
3499            (page->index == end_index && !pg_offset)) {
3500                 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
3501                 unlock_page(page);
3502                 return 0;
3503         }
3504
3505         if (page->index == end_index) {
3506                 char *userpage;
3507
3508                 userpage = kmap_atomic(page);
3509                 memset(userpage + pg_offset, 0,
3510                        PAGE_CACHE_SIZE - pg_offset);
3511                 kunmap_atomic(userpage);
3512                 flush_dcache_page(page);
3513         }
3514
3515         pg_offset = 0;
3516
3517         set_page_extent_mapped(page);
3518
3519         ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written);
3520         if (ret == 1)
3521                 goto done_unlocked;
3522         if (ret)
3523                 goto done;
3524
3525         ret = __extent_writepage_io(inode, page, wbc, epd,
3526                                     i_size, nr_written, write_flags, &nr);
3527         if (ret == 1)
3528                 goto done_unlocked;
3529
3530 done:
3531         if (nr == 0) {
3532                 /* make sure the mapping tag for page dirty gets cleared */
3533                 set_page_writeback(page);
3534                 end_page_writeback(page);
3535         }
3536         if (PageError(page)) {
3537                 ret = ret < 0 ? ret : -EIO;
3538                 end_extent_writepage(page, ret, start, page_end);
3539         }
3540         unlock_page(page);
3541         return ret;
3542
3543 done_unlocked:
3544         return 0;
3545 }
3546
3547 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3548 {
3549         wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3550                        TASK_UNINTERRUPTIBLE);
3551 }
3552
3553 static noinline_for_stack int
3554 lock_extent_buffer_for_io(struct extent_buffer *eb,
3555                           struct btrfs_fs_info *fs_info,
3556                           struct extent_page_data *epd)
3557 {
3558         unsigned long i, num_pages;
3559         int flush = 0;
3560         int ret = 0;
3561
3562         if (!btrfs_try_tree_write_lock(eb)) {
3563                 flush = 1;
3564                 flush_write_bio(epd);
3565                 btrfs_tree_lock(eb);
3566         }
3567
3568         if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3569                 btrfs_tree_unlock(eb);
3570                 if (!epd->sync_io)
3571                         return 0;
3572                 if (!flush) {
3573                         flush_write_bio(epd);
3574                         flush = 1;
3575                 }
3576                 while (1) {
3577                         wait_on_extent_buffer_writeback(eb);
3578                         btrfs_tree_lock(eb);
3579                         if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3580                                 break;
3581                         btrfs_tree_unlock(eb);
3582                 }
3583         }
3584
3585         /*
3586          * We need to do this to prevent races in people who check if the eb is
3587          * under IO since we can end up having no IO bits set for a short period
3588          * of time.
3589          */
3590         spin_lock(&eb->refs_lock);
3591         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3592                 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3593                 spin_unlock(&eb->refs_lock);
3594                 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3595                 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
3596                                      -eb->len,
3597                                      fs_info->dirty_metadata_batch);
3598                 ret = 1;
3599         } else {
3600                 spin_unlock(&eb->refs_lock);
3601         }
3602
3603         btrfs_tree_unlock(eb);
3604
3605         if (!ret)
3606                 return ret;
3607
3608         num_pages = num_extent_pages(eb->start, eb->len);
3609         for (i = 0; i < num_pages; i++) {
3610                 struct page *p = eb->pages[i];
3611
3612                 if (!trylock_page(p)) {
3613                         if (!flush) {
3614                                 flush_write_bio(epd);
3615                                 flush = 1;
3616                         }
3617                         lock_page(p);
3618                 }
3619         }
3620
3621         return ret;
3622 }
3623
3624 static void end_extent_buffer_writeback(struct extent_buffer *eb)
3625 {
3626         clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3627         smp_mb__after_atomic();
3628         wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3629 }
3630
3631 static void set_btree_ioerr(struct page *page)
3632 {
3633         struct extent_buffer *eb = (struct extent_buffer *)page->private;
3634         struct btrfs_inode *btree_ino = BTRFS_I(eb->fs_info->btree_inode);
3635
3636         SetPageError(page);
3637         if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3638                 return;
3639
3640         /*
3641          * If writeback for a btree extent that doesn't belong to a log tree
3642          * failed, increment the counter transaction->eb_write_errors.
3643          * We do this because while the transaction is running and before it's
3644          * committing (when we call filemap_fdata[write|wait]_range against
3645          * the btree inode), we might have
3646          * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
3647          * returns an error or an error happens during writeback, when we're
3648          * committing the transaction we wouldn't know about it, since the pages
3649          * can be no longer dirty nor marked anymore for writeback (if a
3650          * subsequent modification to the extent buffer didn't happen before the
3651          * transaction commit), which makes filemap_fdata[write|wait]_range not
3652          * able to find the pages tagged with SetPageError at transaction
3653          * commit time. So if this happens we must abort the transaction,
3654          * otherwise we commit a super block with btree roots that point to
3655          * btree nodes/leafs whose content on disk is invalid - either garbage
3656          * or the content of some node/leaf from a past generation that got
3657          * cowed or deleted and is no longer valid.
3658          *
3659          * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
3660          * not be enough - we need to distinguish between log tree extents vs
3661          * non-log tree extents, and the next filemap_fdatawait_range() call
3662          * will catch and clear such errors in the mapping - and that call might
3663          * be from a log sync and not from a transaction commit. Also, checking
3664          * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
3665          * not done and would not be reliable - the eb might have been released
3666          * from memory and reading it back again means that flag would not be
3667          * set (since it's a runtime flag, not persisted on disk).
3668          *
3669          * Using the flags below in the btree inode also makes us achieve the
3670          * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
3671          * writeback for all dirty pages and before filemap_fdatawait_range()
3672          * is called, the writeback for all dirty pages had already finished
3673          * with errors - because we were not using AS_EIO/AS_ENOSPC,
3674          * filemap_fdatawait_range() would return success, as it could not know
3675          * that writeback errors happened (the pages were no longer tagged for
3676          * writeback).
3677          */
3678         switch (eb->log_index) {
3679         case -1:
3680                 set_bit(BTRFS_INODE_BTREE_ERR, &btree_ino->runtime_flags);
3681                 break;
3682         case 0:
3683                 set_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
3684                 break;
3685         case 1:
3686                 set_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
3687                 break;
3688         default:
3689                 BUG(); /* unexpected, logic error */
3690         }
3691 }
3692
3693 static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3694 {
3695         struct bio_vec *bvec;
3696         struct extent_buffer *eb;
3697         int i, done;
3698
3699         bio_for_each_segment_all(bvec, bio, i) {
3700                 struct page *page = bvec->bv_page;
3701
3702                 eb = (struct extent_buffer *)page->private;
3703                 BUG_ON(!eb);
3704                 done = atomic_dec_and_test(&eb->io_pages);
3705
3706                 if (err || test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
3707                         ClearPageUptodate(page);
3708                         set_btree_ioerr(page);
3709                 }
3710
3711                 end_page_writeback(page);
3712
3713                 if (!done)
3714                         continue;
3715
3716                 end_extent_buffer_writeback(eb);
3717         }
3718
3719         bio_put(bio);
3720 }
3721
3722 static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3723                         struct btrfs_fs_info *fs_info,
3724                         struct writeback_control *wbc,
3725                         struct extent_page_data *epd)
3726 {
3727         struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3728         struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
3729         u64 offset = eb->start;
3730         unsigned long i, num_pages;
3731         unsigned long bio_flags = 0;
3732         int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
3733         int ret = 0;
3734
3735         clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
3736         num_pages = num_extent_pages(eb->start, eb->len);
3737         atomic_set(&eb->io_pages, num_pages);
3738         if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3739                 bio_flags = EXTENT_BIO_TREE_LOG;
3740
3741         for (i = 0; i < num_pages; i++) {
3742                 struct page *p = eb->pages[i];
3743
3744                 clear_page_dirty_for_io(p);
3745                 set_page_writeback(p);
3746                 ret = submit_extent_page(rw, tree, p, offset >> 9,
3747                                          PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3748                                          -1, end_bio_extent_buffer_writepage,
3749                                          0, epd->bio_flags, bio_flags);
3750                 epd->bio_flags = bio_flags;
3751                 if (ret) {
3752                         set_btree_ioerr(p);
3753                         end_page_writeback(p);
3754                         if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3755                                 end_extent_buffer_writeback(eb);
3756                         ret = -EIO;
3757                         break;
3758                 }
3759                 offset += PAGE_CACHE_SIZE;
3760                 update_nr_written(p, wbc, 1);
3761                 unlock_page(p);
3762         }
3763
3764         if (unlikely(ret)) {
3765                 for (; i < num_pages; i++) {
3766                         struct page *p = eb->pages[i];
3767                         clear_page_dirty_for_io(p);
3768                         unlock_page(p);
3769                 }
3770         }
3771
3772         return ret;
3773 }
3774
3775 int btree_write_cache_pages(struct address_space *mapping,
3776                                    struct writeback_control *wbc)
3777 {
3778         struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3779         struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3780         struct extent_buffer *eb, *prev_eb = NULL;
3781         struct extent_page_data epd = {
3782                 .bio = NULL,
3783                 .tree = tree,
3784                 .extent_locked = 0,
3785                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3786                 .bio_flags = 0,
3787         };
3788         int ret = 0;
3789         int done = 0;
3790         int nr_to_write_done = 0;
3791         struct pagevec pvec;
3792         int nr_pages;
3793         pgoff_t index;
3794         pgoff_t end;            /* Inclusive */
3795         int scanned = 0;
3796         int tag;
3797
3798         pagevec_init(&pvec, 0);
3799         if (wbc->range_cyclic) {
3800                 index = mapping->writeback_index; /* Start from prev offset */
3801                 end = -1;
3802         } else {
3803                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3804                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3805                 scanned = 1;
3806         }
3807         if (wbc->sync_mode == WB_SYNC_ALL)
3808                 tag = PAGECACHE_TAG_TOWRITE;
3809         else
3810                 tag = PAGECACHE_TAG_DIRTY;
3811 retry:
3812         if (wbc->sync_mode == WB_SYNC_ALL)
3813                 tag_pages_for_writeback(mapping, index, end);
3814         while (!done && !nr_to_write_done && (index <= end) &&
3815                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3816                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3817                 unsigned i;
3818
3819                 scanned = 1;
3820                 for (i = 0; i < nr_pages; i++) {
3821                         struct page *page = pvec.pages[i];
3822
3823                         if (!PagePrivate(page))
3824                                 continue;
3825
3826                         if (!wbc->range_cyclic && page->index > end) {
3827                                 done = 1;
3828                                 break;
3829                         }
3830
3831                         spin_lock(&mapping->private_lock);
3832                         if (!PagePrivate(page)) {
3833                                 spin_unlock(&mapping->private_lock);
3834                                 continue;
3835                         }
3836
3837                         eb = (struct extent_buffer *)page->private;
3838
3839                         /*
3840                          * Shouldn't happen and normally this would be a BUG_ON
3841                          * but no sense in crashing the users box for something
3842                          * we can survive anyway.
3843                          */
3844                         if (WARN_ON(!eb)) {
3845                                 spin_unlock(&mapping->private_lock);
3846                                 continue;
3847                         }
3848
3849                         if (eb == prev_eb) {
3850                                 spin_unlock(&mapping->private_lock);
3851                                 continue;
3852                         }
3853
3854                         ret = atomic_inc_not_zero(&eb->refs);
3855                         spin_unlock(&mapping->private_lock);
3856                         if (!ret)
3857                                 continue;
3858
3859                         prev_eb = eb;
3860                         ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3861                         if (!ret) {
3862                                 free_extent_buffer(eb);
3863                                 continue;
3864                         }
3865
3866                         ret = write_one_eb(eb, fs_info, wbc, &epd);
3867                         if (ret) {
3868                                 done = 1;
3869                                 free_extent_buffer(eb);
3870                                 break;
3871                         }
3872                         free_extent_buffer(eb);
3873
3874                         /*
3875                          * the filesystem may choose to bump up nr_to_write.
3876                          * We have to make sure to honor the new nr_to_write
3877                          * at any time
3878                          */
3879                         nr_to_write_done = wbc->nr_to_write <= 0;
3880                 }
3881                 pagevec_release(&pvec);
3882                 cond_resched();
3883         }
3884         if (!scanned && !done) {
3885                 /*
3886                  * We hit the last page and there is more work to be done: wrap
3887                  * back to the start of the file
3888                  */
3889                 scanned = 1;
3890                 index = 0;
3891                 goto retry;
3892         }
3893         flush_write_bio(&epd);
3894         return ret;
3895 }
3896
3897 /**
3898  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3899  * @mapping: address space structure to write
3900  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3901  * @writepage: function called for each page
3902  * @data: data passed to writepage function
3903  *
3904  * If a page is already under I/O, write_cache_pages() skips it, even
3905  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
3906  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
3907  * and msync() need to guarantee that all the data which was dirty at the time
3908  * the call was made get new I/O started against them.  If wbc->sync_mode is
3909  * WB_SYNC_ALL then we were called for data integrity and we must wait for
3910  * existing IO to complete.
3911  */
3912 static int extent_write_cache_pages(struct extent_io_tree *tree,
3913                              struct address_space *mapping,
3914                              struct writeback_control *wbc,
3915                              writepage_t writepage, void *data,
3916                              void (*flush_fn)(void *))
3917 {
3918         struct inode *inode = mapping->host;
3919         int ret = 0;
3920         int done = 0;
3921         int err = 0;
3922         int nr_to_write_done = 0;
3923         struct pagevec pvec;
3924         int nr_pages;
3925         pgoff_t index;
3926         pgoff_t end;            /* Inclusive */
3927         int scanned = 0;
3928         int tag;
3929
3930         /*
3931          * We have to hold onto the inode so that ordered extents can do their
3932          * work when the IO finishes.  The alternative to this is failing to add
3933          * an ordered extent if the igrab() fails there and that is a huge pain
3934          * to deal with, so instead just hold onto the inode throughout the
3935          * writepages operation.  If it fails here we are freeing up the inode
3936          * anyway and we'd rather not waste our time writing out stuff that is
3937          * going to be truncated anyway.
3938          */
3939         if (!igrab(inode))
3940                 return 0;
3941
3942         pagevec_init(&pvec, 0);
3943         if (wbc->range_cyclic) {
3944                 index = mapping->writeback_index; /* Start from prev offset */
3945                 end = -1;
3946         } else {
3947                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3948                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3949                 scanned = 1;
3950         }
3951         if (wbc->sync_mode == WB_SYNC_ALL)
3952                 tag = PAGECACHE_TAG_TOWRITE;
3953         else
3954                 tag = PAGECACHE_TAG_DIRTY;
3955 retry:
3956         if (wbc->sync_mode == WB_SYNC_ALL)
3957                 tag_pages_for_writeback(mapping, index, end);
3958         while (!done && !nr_to_write_done && (index <= end) &&
3959                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3960                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3961                 unsigned i;
3962
3963                 scanned = 1;
3964                 for (i = 0; i < nr_pages; i++) {
3965                         struct page *page = pvec.pages[i];
3966
3967                         /*
3968                          * At this point we hold neither mapping->tree_lock nor
3969                          * lock on the page itself: the page may be truncated or
3970                          * invalidated (changing page->mapping to NULL), or even
3971                          * swizzled back from swapper_space to tmpfs file
3972                          * mapping
3973                          */
3974                         if (!trylock_page(page)) {
3975                                 flush_fn(data);
3976                                 lock_page(page);
3977                         }
3978
3979                         if (unlikely(page->mapping != mapping)) {
3980                                 unlock_page(page);
3981                                 continue;
3982                         }
3983
3984                         if (!wbc->range_cyclic && page->index > end) {
3985                                 done = 1;
3986                                 unlock_page(page);
3987                                 continue;
3988                         }
3989
3990                         if (wbc->sync_mode != WB_SYNC_NONE) {
3991                                 if (PageWriteback(page))
3992                                         flush_fn(data);
3993                                 wait_on_page_writeback(page);
3994                         }
3995
3996                         if (PageWriteback(page) ||
3997                             !clear_page_dirty_for_io(page)) {
3998                                 unlock_page(page);
3999                                 continue;
4000                         }
4001
4002                         ret = (*writepage)(page, wbc, data);
4003
4004                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
4005                                 unlock_page(page);
4006                                 ret = 0;
4007                         }
4008                         if (!err && ret < 0)
4009                                 err = ret;
4010
4011                         /*
4012                          * the filesystem may choose to bump up nr_to_write.
4013                          * We have to make sure to honor the new nr_to_write
4014                          * at any time
4015                          */
4016                         nr_to_write_done = wbc->nr_to_write <= 0;
4017                 }
4018                 pagevec_release(&pvec);
4019                 cond_resched();
4020         }
4021         if (!scanned && !done && !err) {
4022                 /*
4023                  * We hit the last page and there is more work to be done: wrap
4024                  * back to the start of the file
4025                  */
4026                 scanned = 1;
4027                 index = 0;
4028                 goto retry;
4029         }
4030         btrfs_add_delayed_iput(inode);
4031         return err;
4032 }
4033
4034 static void flush_epd_write_bio(struct extent_page_data *epd)
4035 {
4036         if (epd->bio) {
4037                 int rw = WRITE;
4038                 int ret;
4039
4040                 if (epd->sync_io)
4041                         rw = WRITE_SYNC;
4042
4043                 ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
4044                 BUG_ON(ret < 0); /* -ENOMEM */
4045                 epd->bio = NULL;
4046         }
4047 }
4048
4049 static noinline void flush_write_bio(void *data)
4050 {
4051         struct extent_page_data *epd = data;
4052         flush_epd_write_bio(epd);
4053 }
4054
4055 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
4056                           get_extent_t *get_extent,
4057                           struct writeback_control *wbc)
4058 {
4059         int ret;
4060         struct extent_page_data epd = {
4061                 .bio = NULL,
4062                 .tree = tree,
4063                 .get_extent = get_extent,
4064                 .extent_locked = 0,
4065                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4066                 .bio_flags = 0,
4067         };
4068
4069         ret = __extent_writepage(page, wbc, &epd);
4070
4071         flush_epd_write_bio(&epd);
4072         return ret;
4073 }
4074
4075 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
4076                               u64 start, u64 end, get_extent_t *get_extent,
4077                               int mode)
4078 {
4079         int ret = 0;
4080         struct address_space *mapping = inode->i_mapping;
4081         struct page *page;
4082         unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
4083                 PAGE_CACHE_SHIFT;
4084
4085         struct extent_page_data epd = {
4086                 .bio = NULL,
4087                 .tree = tree,
4088                 .get_extent = get_extent,
4089                 .extent_locked = 1,
4090                 .sync_io = mode == WB_SYNC_ALL,
4091                 .bio_flags = 0,
4092         };
4093         struct writeback_control wbc_writepages = {
4094                 .sync_mode      = mode,
4095                 .nr_to_write    = nr_pages * 2,
4096                 .range_start    = start,
4097                 .range_end      = end + 1,
4098         };
4099
4100         while (start <= end) {
4101                 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
4102                 if (clear_page_dirty_for_io(page))
4103                         ret = __extent_writepage(page, &wbc_writepages, &epd);
4104                 else {
4105                         if (tree->ops && tree->ops->writepage_end_io_hook)
4106                                 tree->ops->writepage_end_io_hook(page, start,
4107                                                  start + PAGE_CACHE_SIZE - 1,
4108                                                  NULL, 1);
4109                         unlock_page(page);
4110                 }
4111                 page_cache_release(page);
4112                 start += PAGE_CACHE_SIZE;
4113         }
4114
4115         flush_epd_write_bio(&epd);
4116         return ret;
4117 }
4118
4119 int extent_writepages(struct extent_io_tree *tree,
4120                       struct address_space *mapping,
4121                       get_extent_t *get_extent,
4122                       struct writeback_control *wbc)
4123 {
4124         int ret = 0;
4125         struct extent_page_data epd = {
4126                 .bio = NULL,
4127                 .tree = tree,
4128                 .get_extent = get_extent,
4129                 .extent_locked = 0,
4130                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4131                 .bio_flags = 0,
4132         };
4133
4134         ret = extent_write_cache_pages(tree, mapping, wbc,
4135                                        __extent_writepage, &epd,
4136                                        flush_write_bio);
4137         flush_epd_write_bio(&epd);
4138         return ret;
4139 }
4140
4141 int extent_readpages(struct extent_io_tree *tree,
4142                      struct address_space *mapping,
4143                      struct list_head *pages, unsigned nr_pages,
4144                      get_extent_t get_extent)
4145 {
4146         struct bio *bio = NULL;
4147         unsigned page_idx;
4148         unsigned long bio_flags = 0;
4149         struct page *pagepool[16];
4150         struct page *page;
4151         struct extent_map *em_cached = NULL;
4152         int nr = 0;
4153
4154         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
4155                 page = list_entry(pages->prev, struct page, lru);
4156
4157                 prefetchw(&page->flags);
4158                 list_del(&page->lru);
4159                 if (add_to_page_cache_lru(page, mapping,
4160                                         page->index, GFP_NOFS)) {
4161                         page_cache_release(page);
4162                         continue;
4163                 }
4164
4165                 pagepool[nr++] = page;
4166                 if (nr < ARRAY_SIZE(pagepool))
4167                         continue;
4168                 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
4169                                    &bio, 0, &bio_flags, READ);
4170                 nr = 0;
4171         }
4172         if (nr)
4173                 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
4174                                    &bio, 0, &bio_flags, READ);
4175
4176         if (em_cached)
4177                 free_extent_map(em_cached);
4178
4179         BUG_ON(!list_empty(pages));
4180         if (bio)
4181                 return submit_one_bio(READ, bio, 0, bio_flags);
4182         return 0;
4183 }
4184
4185 /*
4186  * basic invalidatepage code, this waits on any locked or writeback
4187  * ranges corresponding to the page, and then deletes any extent state
4188  * records from the tree
4189  */
4190 int extent_invalidatepage(struct extent_io_tree *tree,
4191                           struct page *page, unsigned long offset)
4192 {
4193         struct extent_state *cached_state = NULL;
4194         u64 start = page_offset(page);
4195         u64 end = start + PAGE_CACHE_SIZE - 1;
4196         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4197
4198         start += ALIGN(offset, blocksize);
4199         if (start > end)
4200                 return 0;
4201
4202         lock_extent_bits(tree, start, end, 0, &cached_state);
4203         wait_on_page_writeback(page);
4204         clear_extent_bit(tree, start, end,
4205                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4206                          EXTENT_DO_ACCOUNTING,
4207                          1, 1, &cached_state, GFP_NOFS);
4208         return 0;
4209 }
4210
4211 /*
4212  * a helper for releasepage, this tests for areas of the page that
4213  * are locked or under IO and drops the related state bits if it is safe
4214  * to drop the page.
4215  */
4216 static int try_release_extent_state(struct extent_map_tree *map,
4217                                     struct extent_io_tree *tree,
4218                                     struct page *page, gfp_t mask)
4219 {
4220         u64 start = page_offset(page);
4221         u64 end = start + PAGE_CACHE_SIZE - 1;
4222         int ret = 1;
4223
4224         if (test_range_bit(tree, start, end,
4225                            EXTENT_IOBITS, 0, NULL))
4226                 ret = 0;
4227         else {
4228                 if ((mask & GFP_NOFS) == GFP_NOFS)
4229                         mask = GFP_NOFS;
4230                 /*
4231                  * at this point we can safely clear everything except the
4232                  * locked bit and the nodatasum bit
4233                  */
4234                 ret = clear_extent_bit(tree, start, end,
4235                                  ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4236                                  0, 0, NULL, mask);
4237
4238                 /* if clear_extent_bit failed for enomem reasons,
4239                  * we can't allow the release to continue.
4240                  */
4241                 if (ret < 0)
4242                         ret = 0;
4243                 else
4244                         ret = 1;
4245         }
4246         return ret;
4247 }
4248
4249 /*
4250  * a helper for releasepage.  As long as there are no locked extents
4251  * in the range corresponding to the page, both state records and extent
4252  * map records are removed
4253  */
4254 int try_release_extent_mapping(struct extent_map_tree *map,
4255                                struct extent_io_tree *tree, struct page *page,
4256                                gfp_t mask)
4257 {
4258         struct extent_map *em;
4259         u64 start = page_offset(page);
4260         u64 end = start + PAGE_CACHE_SIZE - 1;
4261
4262         if ((mask & __GFP_WAIT) &&
4263             page->mapping->host->i_size > 16 * 1024 * 1024) {
4264                 u64 len;
4265                 while (start <= end) {
4266                         len = end - start + 1;
4267                         write_lock(&map->lock);
4268                         em = lookup_extent_mapping(map, start, len);
4269                         if (!em) {
4270                                 write_unlock(&map->lock);
4271                                 break;
4272                         }
4273                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4274                             em->start != start) {
4275                                 write_unlock(&map->lock);
4276                                 free_extent_map(em);
4277                                 break;
4278                         }
4279                         if (!test_range_bit(tree, em->start,
4280                                             extent_map_end(em) - 1,
4281                                             EXTENT_LOCKED | EXTENT_WRITEBACK,
4282                                             0, NULL)) {
4283                                 remove_extent_mapping(map, em);
4284                                 /* once for the rb tree */
4285                                 free_extent_map(em);
4286                         }
4287                         start = extent_map_end(em);
4288                         write_unlock(&map->lock);
4289
4290                         /* once for us */
4291                         free_extent_map(em);
4292                 }
4293         }
4294         return try_release_extent_state(map, tree, page, mask);
4295 }
4296
4297 /*
4298  * helper function for fiemap, which doesn't want to see any holes.
4299  * This maps until we find something past 'last'
4300  */
4301 static struct extent_map *get_extent_skip_holes(struct inode *inode,
4302                                                 u64 offset,
4303                                                 u64 last,
4304                                                 get_extent_t *get_extent)
4305 {
4306         u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
4307         struct extent_map *em;
4308         u64 len;
4309
4310         if (offset >= last)
4311                 return NULL;
4312
4313         while (1) {
4314                 len = last - offset;
4315                 if (len == 0)
4316                         break;
4317                 len = ALIGN(len, sectorsize);
4318                 em = get_extent(inode, NULL, 0, offset, len, 0);
4319                 if (IS_ERR_OR_NULL(em))
4320                         return em;
4321
4322                 /* if this isn't a hole return it */
4323                 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
4324                     em->block_start != EXTENT_MAP_HOLE) {
4325                         return em;
4326                 }
4327
4328                 /* this is a hole, advance to the next extent */
4329                 offset = extent_map_end(em);
4330                 free_extent_map(em);
4331                 if (offset >= last)
4332                         break;
4333         }
4334         return NULL;
4335 }
4336
4337 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4338                 __u64 start, __u64 len, get_extent_t *get_extent)
4339 {
4340         int ret = 0;
4341         u64 off = start;
4342         u64 max = start + len;
4343         u32 flags = 0;
4344         u32 found_type;
4345         u64 last;
4346         u64 last_for_get_extent = 0;
4347         u64 disko = 0;
4348         u64 isize = i_size_read(inode);
4349         struct btrfs_key found_key;
4350         struct extent_map *em = NULL;
4351         struct extent_state *cached_state = NULL;
4352         struct btrfs_path *path;
4353         struct btrfs_root *root = BTRFS_I(inode)->root;
4354         int end = 0;
4355         u64 em_start = 0;
4356         u64 em_len = 0;
4357         u64 em_end = 0;
4358
4359         if (len == 0)
4360                 return -EINVAL;
4361
4362         path = btrfs_alloc_path();
4363         if (!path)
4364                 return -ENOMEM;
4365         path->leave_spinning = 1;
4366
4367         start = round_down(start, BTRFS_I(inode)->root->sectorsize);
4368         len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start;
4369
4370         /*
4371          * lookup the last file extent.  We're not using i_size here
4372          * because there might be preallocation past i_size
4373          */
4374         ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
4375                                        0);
4376         if (ret < 0) {
4377                 btrfs_free_path(path);
4378                 return ret;
4379         }
4380         WARN_ON(!ret);
4381         path->slots[0]--;
4382         btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4383         found_type = found_key.type;
4384
4385         /* No extents, but there might be delalloc bits */
4386         if (found_key.objectid != btrfs_ino(inode) ||
4387             found_type != BTRFS_EXTENT_DATA_KEY) {
4388                 /* have to trust i_size as the end */
4389                 last = (u64)-1;
4390                 last_for_get_extent = isize;
4391         } else {
4392                 /*
4393                  * remember the start of the last extent.  There are a
4394                  * bunch of different factors that go into the length of the
4395                  * extent, so its much less complex to remember where it started
4396                  */
4397                 last = found_key.offset;
4398                 last_for_get_extent = last + 1;
4399         }
4400         btrfs_release_path(path);
4401
4402         /*
4403          * we might have some extents allocated but more delalloc past those
4404          * extents.  so, we trust isize unless the start of the last extent is
4405          * beyond isize
4406          */
4407         if (last < isize) {
4408                 last = (u64)-1;
4409                 last_for_get_extent = isize;
4410         }
4411
4412         lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
4413                          &cached_state);
4414
4415         em = get_extent_skip_holes(inode, start, last_for_get_extent,
4416                                    get_extent);
4417         if (!em)
4418                 goto out;
4419         if (IS_ERR(em)) {
4420                 ret = PTR_ERR(em);
4421                 goto out;
4422         }
4423
4424         while (!end) {
4425                 u64 offset_in_extent = 0;
4426
4427                 /* break if the extent we found is outside the range */
4428                 if (em->start >= max || extent_map_end(em) < off)
4429                         break;
4430
4431                 /*
4432                  * get_extent may return an extent that starts before our
4433                  * requested range.  We have to make sure the ranges
4434                  * we return to fiemap always move forward and don't
4435                  * overlap, so adjust the offsets here
4436                  */
4437                 em_start = max(em->start, off);
4438
4439                 /*
4440                  * record the offset from the start of the extent
4441                  * for adjusting the disk offset below.  Only do this if the
4442                  * extent isn't compressed since our in ram offset may be past
4443                  * what we have actually allocated on disk.
4444                  */
4445                 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4446                         offset_in_extent = em_start - em->start;
4447                 em_end = extent_map_end(em);
4448                 em_len = em_end - em_start;
4449                 disko = 0;
4450                 flags = 0;
4451
4452                 /*
4453                  * bump off for our next call to get_extent
4454                  */
4455                 off = extent_map_end(em);
4456                 if (off >= max)
4457                         end = 1;
4458
4459                 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4460                         end = 1;
4461                         flags |= FIEMAP_EXTENT_LAST;
4462                 } else if (em->block_start == EXTENT_MAP_INLINE) {
4463                         flags |= (FIEMAP_EXTENT_DATA_INLINE |
4464                                   FIEMAP_EXTENT_NOT_ALIGNED);
4465                 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
4466                         flags |= (FIEMAP_EXTENT_DELALLOC |
4467                                   FIEMAP_EXTENT_UNKNOWN);
4468                 } else if (fieinfo->fi_extents_max) {
4469                         u64 bytenr = em->block_start -
4470                                 (em->start - em->orig_start);
4471
4472                         disko = em->block_start + offset_in_extent;
4473
4474                         /*
4475                          * As btrfs supports shared space, this information
4476                          * can be exported to userspace tools via
4477                          * flag FIEMAP_EXTENT_SHARED.  If fi_extents_max == 0
4478                          * then we're just getting a count and we can skip the
4479                          * lookup stuff.
4480                          */
4481                         ret = btrfs_check_shared(NULL, root->fs_info,
4482                                                  root->objectid,
4483                                                  btrfs_ino(inode), bytenr);
4484                         if (ret < 0)
4485                                 goto out_free;
4486                         if (ret)
4487                                 flags |= FIEMAP_EXTENT_SHARED;
4488                         ret = 0;
4489                 }
4490                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4491                         flags |= FIEMAP_EXTENT_ENCODED;
4492
4493                 free_extent_map(em);
4494                 em = NULL;
4495                 if ((em_start >= last) || em_len == (u64)-1 ||
4496                    (last == (u64)-1 && isize <= em_end)) {
4497                         flags |= FIEMAP_EXTENT_LAST;
4498                         end = 1;
4499                 }
4500
4501                 /* now scan forward to see if this is really the last extent. */
4502                 em = get_extent_skip_holes(inode, off, last_for_get_extent,
4503                                            get_extent);
4504                 if (IS_ERR(em)) {
4505                         ret = PTR_ERR(em);
4506                         goto out;
4507                 }
4508                 if (!em) {
4509                         flags |= FIEMAP_EXTENT_LAST;
4510                         end = 1;
4511                 }
4512                 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
4513                                               em_len, flags);
4514                 if (ret)
4515                         goto out_free;
4516         }
4517 out_free:
4518         free_extent_map(em);
4519 out:
4520         btrfs_free_path(path);
4521         unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4522                              &cached_state, GFP_NOFS);
4523         return ret;
4524 }
4525
4526 static void __free_extent_buffer(struct extent_buffer *eb)
4527 {
4528         btrfs_leak_debug_del(&eb->leak_list);
4529         kmem_cache_free(extent_buffer_cache, eb);
4530 }
4531
4532 int extent_buffer_under_io(struct extent_buffer *eb)
4533 {
4534         return (atomic_read(&eb->io_pages) ||
4535                 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4536                 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4537 }
4538
4539 /*
4540  * Helper for releasing extent buffer page.
4541  */
4542 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
4543 {
4544         unsigned long index;
4545         struct page *page;
4546         int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4547
4548         BUG_ON(extent_buffer_under_io(eb));
4549
4550         index = num_extent_pages(eb->start, eb->len);
4551         if (index == 0)
4552                 return;
4553
4554         do {
4555                 index--;
4556                 page = eb->pages[index];
4557                 if (page && mapped) {
4558                         spin_lock(&page->mapping->private_lock);
4559                         /*
4560                          * We do this since we'll remove the pages after we've
4561                          * removed the eb from the radix tree, so we could race
4562                          * and have this page now attached to the new eb.  So
4563                          * only clear page_private if it's still connected to
4564                          * this eb.
4565                          */
4566                         if (PagePrivate(page) &&
4567                             page->private == (unsigned long)eb) {
4568                                 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4569                                 BUG_ON(PageDirty(page));
4570                                 BUG_ON(PageWriteback(page));
4571                                 /*
4572                                  * We need to make sure we haven't be attached
4573                                  * to a new eb.
4574                                  */
4575                                 ClearPagePrivate(page);
4576                                 set_page_private(page, 0);
4577                                 /* One for the page private */
4578                                 page_cache_release(page);
4579                         }
4580                         spin_unlock(&page->mapping->private_lock);
4581
4582                 }
4583                 if (page) {
4584                         /* One for when we alloced the page */
4585                         page_cache_release(page);
4586                 }
4587         } while (index != 0);
4588 }
4589
4590 /*
4591  * Helper for releasing the extent buffer.
4592  */
4593 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4594 {
4595         btrfs_release_extent_buffer_page(eb);
4596         __free_extent_buffer(eb);
4597 }
4598
4599 static struct extent_buffer *
4600 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4601                       unsigned long len, gfp_t mask)
4602 {
4603         struct extent_buffer *eb = NULL;
4604
4605         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
4606         if (eb == NULL)
4607                 return NULL;
4608         eb->start = start;
4609         eb->len = len;
4610         eb->fs_info = fs_info;
4611         eb->bflags = 0;
4612         rwlock_init(&eb->lock);
4613         atomic_set(&eb->write_locks, 0);
4614         atomic_set(&eb->read_locks, 0);
4615         atomic_set(&eb->blocking_readers, 0);
4616         atomic_set(&eb->blocking_writers, 0);
4617         atomic_set(&eb->spinning_readers, 0);
4618         atomic_set(&eb->spinning_writers, 0);
4619         eb->lock_nested = 0;
4620         init_waitqueue_head(&eb->write_lock_wq);
4621         init_waitqueue_head(&eb->read_lock_wq);
4622
4623         btrfs_leak_debug_add(&eb->leak_list, &buffers);
4624
4625         spin_lock_init(&eb->refs_lock);
4626         atomic_set(&eb->refs, 1);
4627         atomic_set(&eb->io_pages, 0);
4628
4629         /*
4630          * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4631          */
4632         BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4633                 > MAX_INLINE_EXTENT_BUFFER_SIZE);
4634         BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4635
4636         return eb;
4637 }
4638
4639 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4640 {
4641         unsigned long i;
4642         struct page *p;
4643         struct extent_buffer *new;
4644         unsigned long num_pages = num_extent_pages(src->start, src->len);
4645
4646         new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS);
4647         if (new == NULL)
4648                 return NULL;
4649
4650         for (i = 0; i < num_pages; i++) {
4651                 p = alloc_page(GFP_NOFS);
4652                 if (!p) {
4653                         btrfs_release_extent_buffer(new);
4654                         return NULL;
4655                 }
4656                 attach_extent_buffer_page(new, p);
4657                 WARN_ON(PageDirty(p));
4658                 SetPageUptodate(p);
4659                 new->pages[i] = p;
4660         }
4661
4662         copy_extent_buffer(new, src, 0, 0, src->len);
4663         set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4664         set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4665
4666         return new;
4667 }
4668
4669 struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
4670 {
4671         struct extent_buffer *eb;
4672         unsigned long num_pages = num_extent_pages(0, len);
4673         unsigned long i;
4674
4675         eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS);
4676         if (!eb)
4677                 return NULL;
4678
4679         for (i = 0; i < num_pages; i++) {
4680                 eb->pages[i] = alloc_page(GFP_NOFS);
4681                 if (!eb->pages[i])
4682                         goto err;
4683         }
4684         set_extent_buffer_uptodate(eb);
4685         btrfs_set_header_nritems(eb, 0);
4686         set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4687
4688         return eb;
4689 err:
4690         for (; i > 0; i--)
4691                 __free_page(eb->pages[i - 1]);
4692         __free_extent_buffer(eb);
4693         return NULL;
4694 }
4695
4696 static void check_buffer_tree_ref(struct extent_buffer *eb)
4697 {
4698         int refs;
4699         /* the ref bit is tricky.  We have to make sure it is set
4700          * if we have the buffer dirty.   Otherwise the
4701          * code to free a buffer can end up dropping a dirty
4702          * page
4703          *
4704          * Once the ref bit is set, it won't go away while the
4705          * buffer is dirty or in writeback, and it also won't
4706          * go away while we have the reference count on the
4707          * eb bumped.
4708          *
4709          * We can't just set the ref bit without bumping the
4710          * ref on the eb because free_extent_buffer might
4711          * see the ref bit and try to clear it.  If this happens
4712          * free_extent_buffer might end up dropping our original
4713          * ref by mistake and freeing the page before we are able
4714          * to add one more ref.
4715          *
4716          * So bump the ref count first, then set the bit.  If someone
4717          * beat us to it, drop the ref we added.
4718          */
4719         refs = atomic_read(&eb->refs);
4720         if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4721                 return;
4722
4723         spin_lock(&eb->refs_lock);
4724         if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4725                 atomic_inc(&eb->refs);
4726         spin_unlock(&eb->refs_lock);
4727 }
4728
4729 static void mark_extent_buffer_accessed(struct extent_buffer *eb,
4730                 struct page *accessed)
4731 {
4732         unsigned long num_pages, i;
4733
4734         check_buffer_tree_ref(eb);
4735
4736         num_pages = num_extent_pages(eb->start, eb->len);
4737         for (i = 0; i < num_pages; i++) {
4738                 struct page *p = eb->pages[i];
4739
4740                 if (p != accessed)
4741                         mark_page_accessed(p);
4742         }
4743 }
4744
4745 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4746                                          u64 start)
4747 {
4748         struct extent_buffer *eb;
4749
4750         rcu_read_lock();
4751         eb = radix_tree_lookup(&fs_info->buffer_radix,
4752                                start >> PAGE_CACHE_SHIFT);
4753         if (eb && atomic_inc_not_zero(&eb->refs)) {
4754                 rcu_read_unlock();
4755                 mark_extent_buffer_accessed(eb, NULL);
4756                 return eb;
4757         }
4758         rcu_read_unlock();
4759
4760         return NULL;
4761 }
4762
4763 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4764 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
4765                                                u64 start, unsigned long len)
4766 {
4767         struct extent_buffer *eb, *exists = NULL;
4768         int ret;
4769
4770         eb = find_extent_buffer(fs_info, start);
4771         if (eb)
4772                 return eb;
4773         eb = alloc_dummy_extent_buffer(start, len);
4774         if (!eb)
4775                 return NULL;
4776         eb->fs_info = fs_info;
4777 again:
4778         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4779         if (ret)
4780                 goto free_eb;
4781         spin_lock(&fs_info->buffer_lock);
4782         ret = radix_tree_insert(&fs_info->buffer_radix,
4783                                 start >> PAGE_CACHE_SHIFT, eb);
4784         spin_unlock(&fs_info->buffer_lock);
4785         radix_tree_preload_end();
4786         if (ret == -EEXIST) {
4787                 exists = find_extent_buffer(fs_info, start);
4788                 if (exists)
4789                         goto free_eb;
4790                 else
4791                         goto again;
4792         }
4793         check_buffer_tree_ref(eb);
4794         set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4795
4796         /*
4797          * We will free dummy extent buffer's if they come into
4798          * free_extent_buffer with a ref count of 2, but if we are using this we
4799          * want the buffers to stay in memory until we're done with them, so
4800          * bump the ref count again.
4801          */
4802         atomic_inc(&eb->refs);
4803         return eb;
4804 free_eb:
4805         btrfs_release_extent_buffer(eb);
4806         return exists;
4807 }
4808 #endif
4809
4810 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4811                                           u64 start, unsigned long len)
4812 {
4813         unsigned long num_pages = num_extent_pages(start, len);
4814         unsigned long i;
4815         unsigned long index = start >> PAGE_CACHE_SHIFT;
4816         struct extent_buffer *eb;
4817         struct extent_buffer *exists = NULL;
4818         struct page *p;
4819         struct address_space *mapping = fs_info->btree_inode->i_mapping;
4820         int uptodate = 1;
4821         int ret;
4822
4823         eb = find_extent_buffer(fs_info, start);
4824         if (eb)
4825                 return eb;
4826
4827         eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS);
4828         if (!eb)
4829                 return NULL;
4830
4831         for (i = 0; i < num_pages; i++, index++) {
4832                 p = find_or_create_page(mapping, index, GFP_NOFS);
4833                 if (!p)
4834                         goto free_eb;
4835
4836                 spin_lock(&mapping->private_lock);
4837                 if (PagePrivate(p)) {
4838                         /*
4839                          * We could have already allocated an eb for this page
4840                          * and attached one so lets see if we can get a ref on
4841                          * the existing eb, and if we can we know it's good and
4842                          * we can just return that one, else we know we can just
4843                          * overwrite page->private.
4844                          */
4845                         exists = (struct extent_buffer *)p->private;
4846                         if (atomic_inc_not_zero(&exists->refs)) {
4847                                 spin_unlock(&mapping->private_lock);
4848                                 unlock_page(p);
4849                                 page_cache_release(p);
4850                                 mark_extent_buffer_accessed(exists, p);
4851                                 goto free_eb;
4852                         }
4853
4854                         /*
4855                          * Do this so attach doesn't complain and we need to
4856                          * drop the ref the old guy had.
4857                          */
4858                         ClearPagePrivate(p);
4859                         WARN_ON(PageDirty(p));
4860                         page_cache_release(p);
4861                 }
4862                 attach_extent_buffer_page(eb, p);
4863                 spin_unlock(&mapping->private_lock);
4864                 WARN_ON(PageDirty(p));
4865                 eb->pages[i] = p;
4866                 if (!PageUptodate(p))
4867                         uptodate = 0;
4868
4869                 /*
4870                  * see below about how we avoid a nasty race with release page
4871                  * and why we unlock later
4872                  */
4873         }
4874         if (uptodate)
4875                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4876 again:
4877         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4878         if (ret)
4879                 goto free_eb;
4880
4881         spin_lock(&fs_info->buffer_lock);
4882         ret = radix_tree_insert(&fs_info->buffer_radix,
4883                                 start >> PAGE_CACHE_SHIFT, eb);
4884         spin_unlock(&fs_info->buffer_lock);
4885         radix_tree_preload_end();
4886         if (ret == -EEXIST) {
4887                 exists = find_extent_buffer(fs_info, start);
4888                 if (exists)
4889                         goto free_eb;
4890                 else
4891                         goto again;
4892         }
4893         /* add one reference for the tree */
4894         check_buffer_tree_ref(eb);
4895         set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4896
4897         /*
4898          * there is a race where release page may have
4899          * tried to find this extent buffer in the radix
4900          * but failed.  It will tell the VM it is safe to
4901          * reclaim the, and it will clear the page private bit.
4902          * We must make sure to set the page private bit properly
4903          * after the extent buffer is in the radix tree so
4904          * it doesn't get lost
4905          */
4906         SetPageChecked(eb->pages[0]);
4907         for (i = 1; i < num_pages; i++) {
4908                 p = eb->pages[i];
4909                 ClearPageChecked(p);
4910                 unlock_page(p);
4911         }
4912         unlock_page(eb->pages[0]);
4913         return eb;
4914
4915 free_eb:
4916         for (i = 0; i < num_pages; i++) {
4917                 if (eb->pages[i])
4918                         unlock_page(eb->pages[i]);
4919         }
4920
4921         WARN_ON(!atomic_dec_and_test(&eb->refs));
4922         btrfs_release_extent_buffer(eb);
4923         return exists;
4924 }
4925
4926 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4927 {
4928         struct extent_buffer *eb =
4929                         container_of(head, struct extent_buffer, rcu_head);
4930
4931         __free_extent_buffer(eb);
4932 }
4933
4934 /* Expects to have eb->eb_lock already held */
4935 static int release_extent_buffer(struct extent_buffer *eb)
4936 {
4937         WARN_ON(atomic_read(&eb->refs) == 0);
4938         if (atomic_dec_and_test(&eb->refs)) {
4939                 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
4940                         struct btrfs_fs_info *fs_info = eb->fs_info;
4941
4942                         spin_unlock(&eb->refs_lock);
4943
4944                         spin_lock(&fs_info->buffer_lock);
4945                         radix_tree_delete(&fs_info->buffer_radix,
4946                                           eb->start >> PAGE_CACHE_SHIFT);
4947                         spin_unlock(&fs_info->buffer_lock);
4948                 } else {
4949                         spin_unlock(&eb->refs_lock);
4950                 }
4951
4952                 /* Should be safe to release our pages at this point */
4953                 btrfs_release_extent_buffer_page(eb);
4954                 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4955                 return 1;
4956         }
4957         spin_unlock(&eb->refs_lock);
4958
4959         return 0;
4960 }
4961
4962 void free_extent_buffer(struct extent_buffer *eb)
4963 {
4964         int refs;
4965         int old;
4966         if (!eb)
4967                 return;
4968
4969         while (1) {
4970                 refs = atomic_read(&eb->refs);
4971                 if (refs <= 3)
4972                         break;
4973                 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
4974                 if (old == refs)
4975                         return;
4976         }
4977
4978         spin_lock(&eb->refs_lock);
4979         if (atomic_read(&eb->refs) == 2 &&
4980             test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4981                 atomic_dec(&eb->refs);
4982
4983         if (atomic_read(&eb->refs) == 2 &&
4984             test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4985             !extent_buffer_under_io(eb) &&
4986             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4987                 atomic_dec(&eb->refs);
4988
4989         /*
4990          * I know this is terrible, but it's temporary until we stop tracking
4991          * the uptodate bits and such for the extent buffers.
4992          */
4993         release_extent_buffer(eb);
4994 }
4995
4996 void free_extent_buffer_stale(struct extent_buffer *eb)
4997 {
4998         if (!eb)
4999                 return;
5000
5001         spin_lock(&eb->refs_lock);
5002         set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
5003
5004         if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
5005             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5006                 atomic_dec(&eb->refs);
5007         release_extent_buffer(eb);
5008 }
5009
5010 void clear_extent_buffer_dirty(struct extent_buffer *eb)
5011 {
5012         unsigned long i;
5013         unsigned long num_pages;
5014         struct page *page;
5015
5016         num_pages = num_extent_pages(eb->start, eb->len);
5017
5018         for (i = 0; i < num_pages; i++) {
5019                 page = eb->pages[i];
5020                 if (!PageDirty(page))
5021                         continue;
5022
5023                 lock_page(page);
5024                 WARN_ON(!PagePrivate(page));
5025
5026                 clear_page_dirty_for_io(page);
5027                 spin_lock_irq(&page->mapping->tree_lock);
5028                 if (!PageDirty(page)) {
5029                         radix_tree_tag_clear(&page->mapping->page_tree,
5030                                                 page_index(page),
5031                                                 PAGECACHE_TAG_DIRTY);
5032                 }
5033                 spin_unlock_irq(&page->mapping->tree_lock);
5034                 ClearPageError(page);
5035                 unlock_page(page);
5036         }
5037         WARN_ON(atomic_read(&eb->refs) == 0);
5038 }
5039
5040 int set_extent_buffer_dirty(struct extent_buffer *eb)
5041 {
5042         unsigned long i;
5043         unsigned long num_pages;
5044         int was_dirty = 0;
5045
5046         check_buffer_tree_ref(eb);
5047
5048         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
5049
5050         num_pages = num_extent_pages(eb->start, eb->len);
5051         WARN_ON(atomic_read(&eb->refs) == 0);
5052         WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
5053
5054         for (i = 0; i < num_pages; i++)
5055                 set_page_dirty(eb->pages[i]);
5056         return was_dirty;
5057 }
5058
5059 int clear_extent_buffer_uptodate(struct extent_buffer *eb)
5060 {
5061         unsigned long i;
5062         struct page *page;
5063         unsigned long num_pages;
5064
5065         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5066         num_pages = num_extent_pages(eb->start, eb->len);
5067         for (i = 0; i < num_pages; i++) {
5068                 page = eb->pages[i];
5069                 if (page)
5070                         ClearPageUptodate(page);
5071         }
5072         return 0;
5073 }
5074
5075 int set_extent_buffer_uptodate(struct extent_buffer *eb)
5076 {
5077         unsigned long i;
5078         struct page *page;
5079         unsigned long num_pages;
5080
5081         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5082         num_pages = num_extent_pages(eb->start, eb->len);
5083         for (i = 0; i < num_pages; i++) {
5084                 page = eb->pages[i];
5085                 SetPageUptodate(page);
5086         }
5087         return 0;
5088 }
5089
5090 int extent_buffer_uptodate(struct extent_buffer *eb)
5091 {
5092         return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5093 }
5094
5095 int read_extent_buffer_pages(struct extent_io_tree *tree,
5096                              struct extent_buffer *eb, u64 start, int wait,
5097                              get_extent_t *get_extent, int mirror_num)
5098 {
5099         unsigned long i;
5100         unsigned long start_i;
5101         struct page *page;
5102         int err;
5103         int ret = 0;
5104         int locked_pages = 0;
5105         int all_uptodate = 1;
5106         unsigned long num_pages;
5107         unsigned long num_reads = 0;
5108         struct bio *bio = NULL;
5109         unsigned long bio_flags = 0;
5110
5111         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
5112                 return 0;
5113
5114         if (start) {
5115                 WARN_ON(start < eb->start);
5116                 start_i = (start >> PAGE_CACHE_SHIFT) -
5117                         (eb->start >> PAGE_CACHE_SHIFT);
5118         } else {
5119                 start_i = 0;
5120         }
5121
5122         num_pages = num_extent_pages(eb->start, eb->len);
5123         for (i = start_i; i < num_pages; i++) {
5124                 page = eb->pages[i];
5125                 if (wait == WAIT_NONE) {
5126                         if (!trylock_page(page))
5127                                 goto unlock_exit;
5128                 } else {
5129                         lock_page(page);
5130                 }
5131                 locked_pages++;
5132                 if (!PageUptodate(page)) {
5133                         num_reads++;
5134                         all_uptodate = 0;
5135                 }
5136         }
5137         if (all_uptodate) {
5138                 if (start_i == 0)
5139                         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5140                 goto unlock_exit;
5141         }
5142
5143         clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
5144         eb->read_mirror = 0;
5145         atomic_set(&eb->io_pages, num_reads);
5146         for (i = start_i; i < num_pages; i++) {
5147                 page = eb->pages[i];
5148                 if (!PageUptodate(page)) {
5149                         ClearPageError(page);
5150                         err = __extent_read_full_page(tree, page,
5151                                                       get_extent, &bio,
5152                                                       mirror_num, &bio_flags,
5153                                                       READ | REQ_META);
5154                         if (err)
5155                                 ret = err;
5156                 } else {
5157                         unlock_page(page);
5158                 }
5159         }
5160
5161         if (bio) {
5162                 err = submit_one_bio(READ | REQ_META, bio, mirror_num,
5163                                      bio_flags);
5164                 if (err)
5165                         return err;
5166         }
5167
5168         if (ret || wait != WAIT_COMPLETE)
5169                 return ret;
5170
5171         for (i = start_i; i < num_pages; i++) {
5172                 page = eb->pages[i];
5173                 wait_on_page_locked(page);
5174                 if (!PageUptodate(page))
5175                         ret = -EIO;
5176         }
5177
5178         return ret;
5179
5180 unlock_exit:
5181         i = start_i;
5182         while (locked_pages > 0) {
5183                 page = eb->pages[i];
5184                 i++;
5185                 unlock_page(page);
5186                 locked_pages--;
5187         }
5188         return ret;
5189 }
5190
5191 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
5192                         unsigned long start,
5193                         unsigned long len)
5194 {
5195         size_t cur;
5196         size_t offset;
5197         struct page *page;
5198         char *kaddr;
5199         char *dst = (char *)dstv;
5200         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5201         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5202
5203         WARN_ON(start > eb->len);
5204         WARN_ON(start + len > eb->start + eb->len);
5205
5206         offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5207
5208         while (len > 0) {
5209                 page = eb->pages[i];
5210
5211                 cur = min(len, (PAGE_CACHE_SIZE - offset));
5212                 kaddr = page_address(page);
5213                 memcpy(dst, kaddr + offset, cur);
5214
5215                 dst += cur;
5216                 len -= cur;
5217                 offset = 0;
5218                 i++;
5219         }
5220 }
5221
5222 int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
5223                         unsigned long start,
5224                         unsigned long len)
5225 {
5226         size_t cur;
5227         size_t offset;
5228         struct page *page;
5229         char *kaddr;
5230         char __user *dst = (char __user *)dstv;
5231         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5232         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5233         int ret = 0;
5234
5235         WARN_ON(start > eb->len);
5236         WARN_ON(start + len > eb->start + eb->len);
5237
5238         offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5239
5240         while (len > 0) {
5241                 page = eb->pages[i];
5242
5243                 cur = min(len, (PAGE_CACHE_SIZE - offset));
5244                 kaddr = page_address(page);
5245                 if (copy_to_user(dst, kaddr + offset, cur)) {
5246                         ret = -EFAULT;
5247                         break;
5248                 }
5249
5250                 dst += cur;
5251                 len -= cur;
5252                 offset = 0;
5253                 i++;
5254         }
5255
5256         return ret;
5257 }
5258
5259 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5260                                unsigned long min_len, char **map,
5261                                unsigned long *map_start,
5262                                unsigned long *map_len)
5263 {
5264         size_t offset = start & (PAGE_CACHE_SIZE - 1);
5265         char *kaddr;
5266         struct page *p;
5267         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5268         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5269         unsigned long end_i = (start_offset + start + min_len - 1) >>
5270                 PAGE_CACHE_SHIFT;
5271
5272         if (i != end_i)
5273                 return -EINVAL;
5274
5275         if (i == 0) {
5276                 offset = start_offset;
5277                 *map_start = 0;
5278         } else {
5279                 offset = 0;
5280                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
5281         }
5282
5283         if (start + min_len > eb->len) {
5284                 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
5285                        "wanted %lu %lu\n",
5286                        eb->start, eb->len, start, min_len);
5287                 return -EINVAL;
5288         }
5289
5290         p = eb->pages[i];
5291         kaddr = page_address(p);
5292         *map = kaddr + offset;
5293         *map_len = PAGE_CACHE_SIZE - offset;
5294         return 0;
5295 }
5296
5297 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
5298                           unsigned long start,
5299                           unsigned long len)
5300 {
5301         size_t cur;
5302         size_t offset;
5303         struct page *page;
5304         char *kaddr;
5305         char *ptr = (char *)ptrv;
5306         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5307         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5308         int ret = 0;
5309
5310         WARN_ON(start > eb->len);
5311         WARN_ON(start + len > eb->start + eb->len);
5312
5313         offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5314
5315         while (len > 0) {
5316                 page = eb->pages[i];
5317
5318                 cur = min(len, (PAGE_CACHE_SIZE - offset));
5319
5320                 kaddr = page_address(page);
5321                 ret = memcmp(ptr, kaddr + offset, cur);
5322                 if (ret)
5323                         break;
5324
5325                 ptr += cur;
5326                 len -= cur;
5327                 offset = 0;
5328                 i++;
5329         }
5330         return ret;
5331 }
5332
5333 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5334                          unsigned long start, unsigned long len)
5335 {
5336         size_t cur;
5337         size_t offset;
5338         struct page *page;
5339         char *kaddr;
5340         char *src = (char *)srcv;
5341         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5342         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5343
5344         WARN_ON(start > eb->len);
5345         WARN_ON(start + len > eb->start + eb->len);
5346
5347         offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5348
5349         while (len > 0) {
5350                 page = eb->pages[i];
5351                 WARN_ON(!PageUptodate(page));
5352
5353                 cur = min(len, PAGE_CACHE_SIZE - offset);
5354                 kaddr = page_address(page);
5355                 memcpy(kaddr + offset, src, cur);
5356
5357                 src += cur;
5358                 len -= cur;
5359                 offset = 0;
5360                 i++;
5361         }
5362 }
5363
5364 void memset_extent_buffer(struct extent_buffer *eb, char c,
5365                           unsigned long start, unsigned long len)
5366 {
5367         size_t cur;
5368         size_t offset;
5369         struct page *page;
5370         char *kaddr;
5371         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5372         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5373
5374         WARN_ON(start > eb->len);
5375         WARN_ON(start + len > eb->start + eb->len);
5376
5377         offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5378
5379         while (len > 0) {
5380                 page = eb->pages[i];
5381                 WARN_ON(!PageUptodate(page));
5382
5383                 cur = min(len, PAGE_CACHE_SIZE - offset);
5384                 kaddr = page_address(page);
5385                 memset(kaddr + offset, c, cur);
5386
5387                 len -= cur;
5388                 offset = 0;
5389                 i++;
5390         }
5391 }
5392
5393 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5394                         unsigned long dst_offset, unsigned long src_offset,
5395                         unsigned long len)
5396 {
5397         u64 dst_len = dst->len;
5398         size_t cur;
5399         size_t offset;
5400         struct page *page;
5401         char *kaddr;
5402         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5403         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5404
5405         WARN_ON(src->len != dst_len);
5406
5407         offset = (start_offset + dst_offset) &
5408                 (PAGE_CACHE_SIZE - 1);
5409
5410         while (len > 0) {
5411                 page = dst->pages[i];
5412                 WARN_ON(!PageUptodate(page));
5413
5414                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
5415
5416                 kaddr = page_address(page);
5417                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
5418
5419                 src_offset += cur;
5420                 len -= cur;
5421                 offset = 0;
5422                 i++;
5423         }
5424 }
5425
5426 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5427 {
5428         unsigned long distance = (src > dst) ? src - dst : dst - src;
5429         return distance < len;
5430 }
5431
5432 static void copy_pages(struct page *dst_page, struct page *src_page,
5433                        unsigned long dst_off, unsigned long src_off,
5434                        unsigned long len)
5435 {
5436         char *dst_kaddr = page_address(dst_page);
5437         char *src_kaddr;
5438         int must_memmove = 0;
5439
5440         if (dst_page != src_page) {
5441                 src_kaddr = page_address(src_page);
5442         } else {
5443                 src_kaddr = dst_kaddr;
5444                 if (areas_overlap(src_off, dst_off, len))
5445                         must_memmove = 1;
5446         }
5447
5448         if (must_memmove)
5449                 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5450         else
5451                 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5452 }
5453
5454 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5455                            unsigned long src_offset, unsigned long len)
5456 {
5457         size_t cur;
5458         size_t dst_off_in_page;
5459         size_t src_off_in_page;
5460         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5461         unsigned long dst_i;
5462         unsigned long src_i;
5463
5464         if (src_offset + len > dst->len) {
5465                 printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
5466                        "len %lu dst len %lu\n", src_offset, len, dst->len);
5467                 BUG_ON(1);
5468         }
5469         if (dst_offset + len > dst->len) {
5470                 printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
5471                        "len %lu dst len %lu\n", dst_offset, len, dst->len);
5472                 BUG_ON(1);
5473         }
5474
5475         while (len > 0) {
5476                 dst_off_in_page = (start_offset + dst_offset) &
5477                         (PAGE_CACHE_SIZE - 1);
5478                 src_off_in_page = (start_offset + src_offset) &
5479                         (PAGE_CACHE_SIZE - 1);
5480
5481                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5482                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
5483
5484                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
5485                                                src_off_in_page));
5486                 cur = min_t(unsigned long, cur,
5487                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
5488
5489                 copy_pages(dst->pages[dst_i], dst->pages[src_i],
5490                            dst_off_in_page, src_off_in_page, cur);
5491
5492                 src_offset += cur;
5493                 dst_offset += cur;
5494                 len -= cur;
5495         }
5496 }
5497
5498 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5499                            unsigned long src_offset, unsigned long len)
5500 {
5501         size_t cur;
5502         size_t dst_off_in_page;
5503         size_t src_off_in_page;
5504         unsigned long dst_end = dst_offset + len - 1;
5505         unsigned long src_end = src_offset + len - 1;
5506         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5507         unsigned long dst_i;
5508         unsigned long src_i;
5509
5510         if (src_offset + len > dst->len) {
5511                 printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
5512                        "len %lu len %lu\n", src_offset, len, dst->len);
5513                 BUG_ON(1);
5514         }
5515         if (dst_offset + len > dst->len) {
5516                 printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
5517                        "len %lu len %lu\n", dst_offset, len, dst->len);
5518                 BUG_ON(1);
5519         }
5520         if (dst_offset < src_offset) {
5521                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5522                 return;
5523         }
5524         while (len > 0) {
5525                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
5526                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
5527
5528                 dst_off_in_page = (start_offset + dst_end) &
5529                         (PAGE_CACHE_SIZE - 1);
5530                 src_off_in_page = (start_offset + src_end) &
5531                         (PAGE_CACHE_SIZE - 1);
5532
5533                 cur = min_t(unsigned long, len, src_off_in_page + 1);
5534                 cur = min(cur, dst_off_in_page + 1);
5535                 copy_pages(dst->pages[dst_i], dst->pages[src_i],
5536                            dst_off_in_page - cur + 1,
5537                            src_off_in_page - cur + 1, cur);
5538
5539                 dst_end -= cur;
5540                 src_end -= cur;
5541                 len -= cur;
5542         }
5543 }
5544
5545 int try_release_extent_buffer(struct page *page)
5546 {
5547         struct extent_buffer *eb;
5548
5549         /*
5550          * We need to make sure noboody is attaching this page to an eb right
5551          * now.
5552          */
5553         spin_lock(&page->mapping->private_lock);
5554         if (!PagePrivate(page)) {
5555                 spin_unlock(&page->mapping->private_lock);
5556                 return 1;
5557         }
5558
5559         eb = (struct extent_buffer *)page->private;
5560         BUG_ON(!eb);
5561
5562         /*
5563          * This is a little awful but should be ok, we need to make sure that
5564          * the eb doesn't disappear out from under us while we're looking at
5565          * this page.
5566          */
5567         spin_lock(&eb->refs_lock);
5568         if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5569                 spin_unlock(&eb->refs_lock);
5570                 spin_unlock(&page->mapping->private_lock);
5571                 return 0;
5572         }
5573         spin_unlock(&page->mapping->private_lock);
5574
5575         /*
5576          * If tree ref isn't set then we know the ref on this eb is a real ref,
5577          * so just return, this page will likely be freed soon anyway.
5578          */
5579         if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5580                 spin_unlock(&eb->refs_lock);
5581                 return 0;
5582         }
5583
5584         return release_extent_buffer(eb);
5585 }