Merge remote-tracking branches 'asoc/fix/atmel', 'asoc/fix/fsl', 'asoc/fix/tegra...
[linux-drm-fsl-dcu.git] / fs / nfs / blocklayout / extents.c
1 /*
2  *  linux/fs/nfs/blocklayout/blocklayout.h
3  *
4  *  Module for the NFSv4.1 pNFS block layout driver.
5  *
6  *  Copyright (c) 2006 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Andy Adamson <andros@citi.umich.edu>
10  *  Fred Isaman <iisaman@umich.edu>
11  *
12  * permission is granted to use, copy, create derivative works and
13  * redistribute this software and such derivative works for any purpose,
14  * so long as the name of the university of michigan is not used in
15  * any advertising or publicity pertaining to the use or distribution
16  * of this software without specific, written prior authorization.  if
17  * the above copyright notice or any other identification of the
18  * university of michigan is included in any copy of any portion of
19  * this software, then the disclaimer below must also be included.
20  *
21  * this software is provided as is, without representation from the
22  * university of michigan as to its fitness for any purpose, and without
23  * warranty by the university of michigan of any kind, either express
24  * or implied, including without limitation the implied warranties of
25  * merchantability and fitness for a particular purpose.  the regents
26  * of the university of michigan shall not be liable for any damages,
27  * including special, indirect, incidental, or consequential damages,
28  * with respect to any claim arising out or in connection with the use
29  * of the software, even if it has been or is hereafter advised of the
30  * possibility of such damages.
31  */
32
33 #include "blocklayout.h"
34 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
35
36 /* Bit numbers */
37 #define EXTENT_INITIALIZED 0
38 #define EXTENT_WRITTEN     1
39 #define EXTENT_IN_COMMIT   2
40 #define INTERNAL_EXISTS    MY_MAX_TAGS
41 #define INTERNAL_MASK      ((1 << INTERNAL_EXISTS) - 1)
42
43 /* Returns largest t<=s s.t. t%base==0 */
44 static inline sector_t normalize(sector_t s, int base)
45 {
46         sector_t tmp = s; /* Since do_div modifies its argument */
47         return s - sector_div(tmp, base);
48 }
49
50 static inline sector_t normalize_up(sector_t s, int base)
51 {
52         return normalize(s + base - 1, base);
53 }
54
55 /* Complete stub using list while determine API wanted */
56
57 /* Returns tags, or negative */
58 static int32_t _find_entry(struct my_tree *tree, u64 s)
59 {
60         struct pnfs_inval_tracking *pos;
61
62         dprintk("%s(%llu) enter\n", __func__, s);
63         list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) {
64                 if (pos->it_sector > s)
65                         continue;
66                 else if (pos->it_sector == s)
67                         return pos->it_tags & INTERNAL_MASK;
68                 else
69                         break;
70         }
71         return -ENOENT;
72 }
73
74 static inline
75 int _has_tag(struct my_tree *tree, u64 s, int32_t tag)
76 {
77         int32_t tags;
78
79         dprintk("%s(%llu, %i) enter\n", __func__, s, tag);
80         s = normalize(s, tree->mtt_step_size);
81         tags = _find_entry(tree, s);
82         if ((tags < 0) || !(tags & (1 << tag)))
83                 return 0;
84         else
85                 return 1;
86 }
87
88 /* Creates entry with tag, or if entry already exists, unions tag to it.
89  * If storage is not NULL, newly created entry will use it.
90  * Returns number of entries added, or negative on error.
91  */
92 static int _add_entry(struct my_tree *tree, u64 s, int32_t tag,
93                       struct pnfs_inval_tracking *storage)
94 {
95         int found = 0;
96         struct pnfs_inval_tracking *pos;
97
98         dprintk("%s(%llu, %i, %p) enter\n", __func__, s, tag, storage);
99         list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) {
100                 if (pos->it_sector > s)
101                         continue;
102                 else if (pos->it_sector == s) {
103                         found = 1;
104                         break;
105                 } else
106                         break;
107         }
108         if (found) {
109                 pos->it_tags |= (1 << tag);
110                 return 0;
111         } else {
112                 struct pnfs_inval_tracking *new;
113                 new = storage;
114                 new->it_sector = s;
115                 new->it_tags = (1 << tag);
116                 list_add(&new->it_link, &pos->it_link);
117                 return 1;
118         }
119 }
120
121 /* XXXX Really want option to not create */
122 /* Over range, unions tag with existing entries, else creates entry with tag */
123 static int _set_range(struct my_tree *tree, int32_t tag, u64 s, u64 length)
124 {
125         u64 i;
126
127         dprintk("%s(%i, %llu, %llu) enter\n", __func__, tag, s, length);
128         for (i = normalize(s, tree->mtt_step_size); i < s + length;
129              i += tree->mtt_step_size)
130                 if (_add_entry(tree, i, tag, NULL))
131                         return -ENOMEM;
132         return 0;
133 }
134
135 /* Ensure that future operations on given range of tree will not malloc */
136 static int _preload_range(struct pnfs_inval_markings *marks,
137                 u64 offset, u64 length)
138 {
139         u64 start, end, s;
140         int count, i, used = 0, status = -ENOMEM;
141         struct pnfs_inval_tracking **storage;
142         struct my_tree  *tree = &marks->im_tree;
143
144         dprintk("%s(%llu, %llu) enter\n", __func__, offset, length);
145         start = normalize(offset, tree->mtt_step_size);
146         end = normalize_up(offset + length, tree->mtt_step_size);
147         count = (int)(end - start) / (int)tree->mtt_step_size;
148
149         /* Pre-malloc what memory we might need */
150         storage = kcalloc(count, sizeof(*storage), GFP_NOFS);
151         if (!storage)
152                 return -ENOMEM;
153         for (i = 0; i < count; i++) {
154                 storage[i] = kmalloc(sizeof(struct pnfs_inval_tracking),
155                                      GFP_NOFS);
156                 if (!storage[i])
157                         goto out_cleanup;
158         }
159
160         spin_lock_bh(&marks->im_lock);
161         for (s = start; s < end; s += tree->mtt_step_size)
162                 used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]);
163         spin_unlock_bh(&marks->im_lock);
164
165         status = 0;
166
167  out_cleanup:
168         for (i = used; i < count; i++) {
169                 if (!storage[i])
170                         break;
171                 kfree(storage[i]);
172         }
173         kfree(storage);
174         return status;
175 }
176
177 /* We are relying on page lock to serialize this */
178 int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect)
179 {
180         int rv;
181
182         spin_lock_bh(&marks->im_lock);
183         rv = _has_tag(&marks->im_tree, isect, EXTENT_INITIALIZED);
184         spin_unlock_bh(&marks->im_lock);
185         return rv;
186 }
187
188 /* Assume start, end already sector aligned */
189 static int
190 _range_has_tag(struct my_tree *tree, u64 start, u64 end, int32_t tag)
191 {
192         struct pnfs_inval_tracking *pos;
193         u64 expect = 0;
194
195         dprintk("%s(%llu, %llu, %i) enter\n", __func__, start, end, tag);
196         list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) {
197                 if (pos->it_sector >= end)
198                         continue;
199                 if (!expect) {
200                         if ((pos->it_sector == end - tree->mtt_step_size) &&
201                             (pos->it_tags & (1 << tag))) {
202                                 expect = pos->it_sector - tree->mtt_step_size;
203                                 if (pos->it_sector < tree->mtt_step_size || expect < start)
204                                         return 1;
205                                 continue;
206                         } else {
207                                 return 0;
208                         }
209                 }
210                 if (pos->it_sector != expect || !(pos->it_tags & (1 << tag)))
211                         return 0;
212                 expect -= tree->mtt_step_size;
213                 if (expect < start)
214                         return 1;
215         }
216         return 0;
217 }
218
219 static int is_range_written(struct pnfs_inval_markings *marks,
220                             sector_t start, sector_t end)
221 {
222         int rv;
223
224         spin_lock_bh(&marks->im_lock);
225         rv = _range_has_tag(&marks->im_tree, start, end, EXTENT_WRITTEN);
226         spin_unlock_bh(&marks->im_lock);
227         return rv;
228 }
229
230 /* Marks sectors in [offest, offset_length) as having been initialized.
231  * All lengths are step-aligned, where step is min(pagesize, blocksize).
232  * Currently assumes offset is page-aligned
233  */
234 int bl_mark_sectors_init(struct pnfs_inval_markings *marks,
235                              sector_t offset, sector_t length)
236 {
237         sector_t start, end;
238
239         dprintk("%s(offset=%llu,len=%llu) enter\n",
240                 __func__, (u64)offset, (u64)length);
241
242         start = normalize(offset, marks->im_block_size);
243         end = normalize_up(offset + length, marks->im_block_size);
244         if (_preload_range(marks, start, end - start))
245                 goto outerr;
246
247         spin_lock_bh(&marks->im_lock);
248         if (_set_range(&marks->im_tree, EXTENT_INITIALIZED, offset, length))
249                 goto out_unlock;
250         spin_unlock_bh(&marks->im_lock);
251
252         return 0;
253
254 out_unlock:
255         spin_unlock_bh(&marks->im_lock);
256 outerr:
257         return -ENOMEM;
258 }
259
260 /* Marks sectors in [offest, offset+length) as having been written to disk.
261  * All lengths should be block aligned.
262  */
263 static int mark_written_sectors(struct pnfs_inval_markings *marks,
264                                 sector_t offset, sector_t length)
265 {
266         int status;
267
268         dprintk("%s(offset=%llu,len=%llu) enter\n", __func__,
269                 (u64)offset, (u64)length);
270         spin_lock_bh(&marks->im_lock);
271         status = _set_range(&marks->im_tree, EXTENT_WRITTEN, offset, length);
272         spin_unlock_bh(&marks->im_lock);
273         return status;
274 }
275
276 static void print_short_extent(struct pnfs_block_short_extent *be)
277 {
278         dprintk("PRINT SHORT EXTENT extent %p\n", be);
279         if (be) {
280                 dprintk("        be_f_offset %llu\n", (u64)be->bse_f_offset);
281                 dprintk("        be_length   %llu\n", (u64)be->bse_length);
282         }
283 }
284
285 static void print_clist(struct list_head *list, unsigned int count)
286 {
287         struct pnfs_block_short_extent *be;
288         unsigned int i = 0;
289
290         ifdebug(FACILITY) {
291                 printk(KERN_DEBUG "****************\n");
292                 printk(KERN_DEBUG "Extent list looks like:\n");
293                 list_for_each_entry(be, list, bse_node) {
294                         i++;
295                         print_short_extent(be);
296                 }
297                 if (i != count)
298                         printk(KERN_DEBUG "\n\nExpected %u entries\n\n\n", count);
299                 printk(KERN_DEBUG "****************\n");
300         }
301 }
302
303 /* Note: In theory, we should do more checking that devid's match between
304  * old and new, but if they don't, the lists are too corrupt to salvage anyway.
305  */
306 /* Note this is very similar to bl_add_merge_extent */
307 static void add_to_commitlist(struct pnfs_block_layout *bl,
308                               struct pnfs_block_short_extent *new)
309 {
310         struct list_head *clist = &bl->bl_commit;
311         struct pnfs_block_short_extent *old, *save;
312         sector_t end = new->bse_f_offset + new->bse_length;
313
314         dprintk("%s enter\n", __func__);
315         print_short_extent(new);
316         print_clist(clist, bl->bl_count);
317         bl->bl_count++;
318         /* Scan for proper place to insert, extending new to the left
319          * as much as possible.
320          */
321         list_for_each_entry_safe(old, save, clist, bse_node) {
322                 if (new->bse_f_offset < old->bse_f_offset)
323                         break;
324                 if (end <= old->bse_f_offset + old->bse_length) {
325                         /* Range is already in list */
326                         bl->bl_count--;
327                         kfree(new);
328                         return;
329                 } else if (new->bse_f_offset <=
330                                 old->bse_f_offset + old->bse_length) {
331                         /* new overlaps or abuts existing be */
332                         if (new->bse_mdev == old->bse_mdev) {
333                                 /* extend new to fully replace old */
334                                 new->bse_length += new->bse_f_offset -
335                                                 old->bse_f_offset;
336                                 new->bse_f_offset = old->bse_f_offset;
337                                 list_del(&old->bse_node);
338                                 bl->bl_count--;
339                                 kfree(old);
340                         }
341                 }
342         }
343         /* Note that if we never hit the above break, old will not point to a
344          * valid extent.  However, in that case &old->bse_node==list.
345          */
346         list_add_tail(&new->bse_node, &old->bse_node);
347         /* Scan forward for overlaps.  If we find any, extend new and
348          * remove the overlapped extent.
349          */
350         old = list_prepare_entry(new, clist, bse_node);
351         list_for_each_entry_safe_continue(old, save, clist, bse_node) {
352                 if (end < old->bse_f_offset)
353                         break;
354                 /* new overlaps or abuts old */
355                 if (new->bse_mdev == old->bse_mdev) {
356                         if (end < old->bse_f_offset + old->bse_length) {
357                                 /* extend new to fully cover old */
358                                 end = old->bse_f_offset + old->bse_length;
359                                 new->bse_length = end - new->bse_f_offset;
360                         }
361                         list_del(&old->bse_node);
362                         bl->bl_count--;
363                         kfree(old);
364                 }
365         }
366         dprintk("%s: after merging\n", __func__);
367         print_clist(clist, bl->bl_count);
368 }
369
370 /* Note the range described by offset, length is guaranteed to be contained
371  * within be.
372  * new will be freed, either by this function or add_to_commitlist if they
373  * decide not to use it, or after LAYOUTCOMMIT uses it in the commitlist.
374  */
375 int bl_mark_for_commit(struct pnfs_block_extent *be,
376                     sector_t offset, sector_t length,
377                     struct pnfs_block_short_extent *new)
378 {
379         sector_t new_end, end = offset + length;
380         struct pnfs_block_layout *bl = container_of(be->be_inval,
381                                                     struct pnfs_block_layout,
382                                                     bl_inval);
383
384         mark_written_sectors(be->be_inval, offset, length);
385         /* We want to add the range to commit list, but it must be
386          * block-normalized, and verified that the normalized range has
387          * been entirely written to disk.
388          */
389         new->bse_f_offset = offset;
390         offset = normalize(offset, bl->bl_blocksize);
391         if (offset < new->bse_f_offset) {
392                 if (is_range_written(be->be_inval, offset, new->bse_f_offset))
393                         new->bse_f_offset = offset;
394                 else
395                         new->bse_f_offset = offset + bl->bl_blocksize;
396         }
397         new_end = normalize_up(end, bl->bl_blocksize);
398         if (end < new_end) {
399                 if (is_range_written(be->be_inval, end, new_end))
400                         end = new_end;
401                 else
402                         end = new_end - bl->bl_blocksize;
403         }
404         if (end <= new->bse_f_offset) {
405                 kfree(new);
406                 return 0;
407         }
408         new->bse_length = end - new->bse_f_offset;
409         new->bse_devid = be->be_devid;
410         new->bse_mdev = be->be_mdev;
411
412         spin_lock(&bl->bl_ext_lock);
413         add_to_commitlist(bl, new);
414         spin_unlock(&bl->bl_ext_lock);
415         return 0;
416 }
417
418 static void print_bl_extent(struct pnfs_block_extent *be)
419 {
420         dprintk("PRINT EXTENT extent %p\n", be);
421         if (be) {
422                 dprintk("        be_f_offset %llu\n", (u64)be->be_f_offset);
423                 dprintk("        be_length   %llu\n", (u64)be->be_length);
424                 dprintk("        be_v_offset %llu\n", (u64)be->be_v_offset);
425                 dprintk("        be_state    %d\n", be->be_state);
426         }
427 }
428
429 static void
430 destroy_extent(struct kref *kref)
431 {
432         struct pnfs_block_extent *be;
433
434         be = container_of(kref, struct pnfs_block_extent, be_refcnt);
435         dprintk("%s be=%p\n", __func__, be);
436         kfree(be);
437 }
438
439 void
440 bl_put_extent(struct pnfs_block_extent *be)
441 {
442         if (be) {
443                 dprintk("%s enter %p (%i)\n", __func__, be,
444                         atomic_read(&be->be_refcnt.refcount));
445                 kref_put(&be->be_refcnt, destroy_extent);
446         }
447 }
448
449 struct pnfs_block_extent *bl_alloc_extent(void)
450 {
451         struct pnfs_block_extent *be;
452
453         be = kmalloc(sizeof(struct pnfs_block_extent), GFP_NOFS);
454         if (!be)
455                 return NULL;
456         INIT_LIST_HEAD(&be->be_node);
457         kref_init(&be->be_refcnt);
458         be->be_inval = NULL;
459         return be;
460 }
461
462 static void print_elist(struct list_head *list)
463 {
464         struct pnfs_block_extent *be;
465         dprintk("****************\n");
466         dprintk("Extent list looks like:\n");
467         list_for_each_entry(be, list, be_node) {
468                 print_bl_extent(be);
469         }
470         dprintk("****************\n");
471 }
472
473 static inline int
474 extents_consistent(struct pnfs_block_extent *old, struct pnfs_block_extent *new)
475 {
476         /* Note this assumes new->be_f_offset >= old->be_f_offset */
477         return (new->be_state == old->be_state) &&
478                 ((new->be_state == PNFS_BLOCK_NONE_DATA) ||
479                  ((new->be_v_offset - old->be_v_offset ==
480                    new->be_f_offset - old->be_f_offset) &&
481                   new->be_mdev == old->be_mdev));
482 }
483
484 /* Adds new to appropriate list in bl, modifying new and removing existing
485  * extents as appropriate to deal with overlaps.
486  *
487  * See bl_find_get_extent for list constraints.
488  *
489  * Refcount on new is already set.  If end up not using it, or error out,
490  * need to put the reference.
491  *
492  * bl->bl_ext_lock is held by caller.
493  */
494 int
495 bl_add_merge_extent(struct pnfs_block_layout *bl,
496                      struct pnfs_block_extent *new)
497 {
498         struct pnfs_block_extent *be, *tmp;
499         sector_t end = new->be_f_offset + new->be_length;
500         struct list_head *list;
501
502         dprintk("%s enter with be=%p\n", __func__, new);
503         print_bl_extent(new);
504         list = &bl->bl_extents[bl_choose_list(new->be_state)];
505         print_elist(list);
506
507         /* Scan for proper place to insert, extending new to the left
508          * as much as possible.
509          */
510         list_for_each_entry_safe_reverse(be, tmp, list, be_node) {
511                 if (new->be_f_offset >= be->be_f_offset + be->be_length)
512                         break;
513                 if (new->be_f_offset >= be->be_f_offset) {
514                         if (end <= be->be_f_offset + be->be_length) {
515                                 /* new is a subset of existing be*/
516                                 if (extents_consistent(be, new)) {
517                                         dprintk("%s: new is subset, ignoring\n",
518                                                 __func__);
519                                         bl_put_extent(new);
520                                         return 0;
521                                 } else {
522                                         goto out_err;
523                                 }
524                         } else {
525                                 /* |<--   be   -->|
526                                  *          |<--   new   -->| */
527                                 if (extents_consistent(be, new)) {
528                                         /* extend new to fully replace be */
529                                         new->be_length += new->be_f_offset -
530                                                 be->be_f_offset;
531                                         new->be_f_offset = be->be_f_offset;
532                                         new->be_v_offset = be->be_v_offset;
533                                         dprintk("%s: removing %p\n", __func__, be);
534                                         list_del(&be->be_node);
535                                         bl_put_extent(be);
536                                 } else {
537                                         goto out_err;
538                                 }
539                         }
540                 } else if (end >= be->be_f_offset + be->be_length) {
541                         /* new extent overlap existing be */
542                         if (extents_consistent(be, new)) {
543                                 /* extend new to fully replace be */
544                                 dprintk("%s: removing %p\n", __func__, be);
545                                 list_del(&be->be_node);
546                                 bl_put_extent(be);
547                         } else {
548                                 goto out_err;
549                         }
550                 } else if (end > be->be_f_offset) {
551                         /*           |<--   be   -->|
552                          *|<--   new   -->| */
553                         if (extents_consistent(new, be)) {
554                                 /* extend new to fully replace be */
555                                 new->be_length += be->be_f_offset + be->be_length -
556                                         new->be_f_offset - new->be_length;
557                                 dprintk("%s: removing %p\n", __func__, be);
558                                 list_del(&be->be_node);
559                                 bl_put_extent(be);
560                         } else {
561                                 goto out_err;
562                         }
563                 }
564         }
565         /* Note that if we never hit the above break, be will not point to a
566          * valid extent.  However, in that case &be->be_node==list.
567          */
568         list_add(&new->be_node, &be->be_node);
569         dprintk("%s: inserting new\n", __func__);
570         print_elist(list);
571         /* FIXME - The per-list consistency checks have all been done,
572          * should now check cross-list consistency.
573          */
574         return 0;
575
576  out_err:
577         bl_put_extent(new);
578         return -EIO;
579 }
580
581 /* Returns extent, or NULL.  If a second READ extent exists, it is returned
582  * in cow_read, if given.
583  *
584  * The extents are kept in two seperate ordered lists, one for READ and NONE,
585  * one for READWRITE and INVALID.  Within each list, we assume:
586  * 1. Extents are ordered by file offset.
587  * 2. For any given isect, there is at most one extents that matches.
588  */
589 struct pnfs_block_extent *
590 bl_find_get_extent(struct pnfs_block_layout *bl, sector_t isect,
591             struct pnfs_block_extent **cow_read)
592 {
593         struct pnfs_block_extent *be, *cow, *ret;
594         int i;
595
596         dprintk("%s enter with isect %llu\n", __func__, (u64)isect);
597         cow = ret = NULL;
598         spin_lock(&bl->bl_ext_lock);
599         for (i = 0; i < EXTENT_LISTS; i++) {
600                 list_for_each_entry_reverse(be, &bl->bl_extents[i], be_node) {
601                         if (isect >= be->be_f_offset + be->be_length)
602                                 break;
603                         if (isect >= be->be_f_offset) {
604                                 /* We have found an extent */
605                                 dprintk("%s Get %p (%i)\n", __func__, be,
606                                         atomic_read(&be->be_refcnt.refcount));
607                                 kref_get(&be->be_refcnt);
608                                 if (!ret)
609                                         ret = be;
610                                 else if (be->be_state != PNFS_BLOCK_READ_DATA)
611                                         bl_put_extent(be);
612                                 else
613                                         cow = be;
614                                 break;
615                         }
616                 }
617                 if (ret &&
618                     (!cow_read || ret->be_state != PNFS_BLOCK_INVALID_DATA))
619                         break;
620         }
621         spin_unlock(&bl->bl_ext_lock);
622         if (cow_read)
623                 *cow_read = cow;
624         print_bl_extent(ret);
625         return ret;
626 }
627
628 /* Similar to bl_find_get_extent, but called with lock held, and ignores cow */
629 static struct pnfs_block_extent *
630 bl_find_get_extent_locked(struct pnfs_block_layout *bl, sector_t isect)
631 {
632         struct pnfs_block_extent *be, *ret = NULL;
633         int i;
634
635         dprintk("%s enter with isect %llu\n", __func__, (u64)isect);
636         for (i = 0; i < EXTENT_LISTS; i++) {
637                 if (ret)
638                         break;
639                 list_for_each_entry_reverse(be, &bl->bl_extents[i], be_node) {
640                         if (isect >= be->be_f_offset + be->be_length)
641                                 break;
642                         if (isect >= be->be_f_offset) {
643                                 /* We have found an extent */
644                                 dprintk("%s Get %p (%i)\n", __func__, be,
645                                         atomic_read(&be->be_refcnt.refcount));
646                                 kref_get(&be->be_refcnt);
647                                 ret = be;
648                                 break;
649                         }
650                 }
651         }
652         print_bl_extent(ret);
653         return ret;
654 }
655
656 int
657 encode_pnfs_block_layoutupdate(struct pnfs_block_layout *bl,
658                                struct xdr_stream *xdr,
659                                const struct nfs4_layoutcommit_args *arg)
660 {
661         struct pnfs_block_short_extent *lce, *save;
662         unsigned int count = 0;
663         __be32 *p, *xdr_start;
664
665         dprintk("%s enter\n", __func__);
666         /* BUG - creation of bl_commit is buggy - need to wait for
667          * entire block to be marked WRITTEN before it can be added.
668          */
669         spin_lock(&bl->bl_ext_lock);
670         /* Want to adjust for possible truncate */
671         /* We now want to adjust argument range */
672
673         /* XDR encode the ranges found */
674         xdr_start = xdr_reserve_space(xdr, 8);
675         if (!xdr_start)
676                 goto out;
677         list_for_each_entry_safe(lce, save, &bl->bl_commit, bse_node) {
678                 p = xdr_reserve_space(xdr, 7 * 4 + sizeof(lce->bse_devid.data));
679                 if (!p)
680                         break;
681                 p = xdr_encode_opaque_fixed(p, lce->bse_devid.data, NFS4_DEVICEID4_SIZE);
682                 p = xdr_encode_hyper(p, lce->bse_f_offset << SECTOR_SHIFT);
683                 p = xdr_encode_hyper(p, lce->bse_length << SECTOR_SHIFT);
684                 p = xdr_encode_hyper(p, 0LL);
685                 *p++ = cpu_to_be32(PNFS_BLOCK_READWRITE_DATA);
686                 list_move_tail(&lce->bse_node, &bl->bl_committing);
687                 bl->bl_count--;
688                 count++;
689         }
690         xdr_start[0] = cpu_to_be32((xdr->p - xdr_start - 1) * 4);
691         xdr_start[1] = cpu_to_be32(count);
692 out:
693         spin_unlock(&bl->bl_ext_lock);
694         dprintk("%s found %i ranges\n", __func__, count);
695         return 0;
696 }
697
698 /* Helper function to set_to_rw that initialize a new extent */
699 static void
700 _prep_new_extent(struct pnfs_block_extent *new,
701                  struct pnfs_block_extent *orig,
702                  sector_t offset, sector_t length, int state)
703 {
704         kref_init(&new->be_refcnt);
705         /* don't need to INIT_LIST_HEAD(&new->be_node) */
706         memcpy(&new->be_devid, &orig->be_devid, sizeof(struct nfs4_deviceid));
707         new->be_mdev = orig->be_mdev;
708         new->be_f_offset = offset;
709         new->be_length = length;
710         new->be_v_offset = orig->be_v_offset - orig->be_f_offset + offset;
711         new->be_state = state;
712         new->be_inval = orig->be_inval;
713 }
714
715 /* Tries to merge be with extent in front of it in list.
716  * Frees storage if not used.
717  */
718 static struct pnfs_block_extent *
719 _front_merge(struct pnfs_block_extent *be, struct list_head *head,
720              struct pnfs_block_extent *storage)
721 {
722         struct pnfs_block_extent *prev;
723
724         if (!storage)
725                 goto no_merge;
726         if (&be->be_node == head || be->be_node.prev == head)
727                 goto no_merge;
728         prev = list_entry(be->be_node.prev, struct pnfs_block_extent, be_node);
729         if ((prev->be_f_offset + prev->be_length != be->be_f_offset) ||
730             !extents_consistent(prev, be))
731                 goto no_merge;
732         _prep_new_extent(storage, prev, prev->be_f_offset,
733                          prev->be_length + be->be_length, prev->be_state);
734         list_replace(&prev->be_node, &storage->be_node);
735         bl_put_extent(prev);
736         list_del(&be->be_node);
737         bl_put_extent(be);
738         return storage;
739
740  no_merge:
741         kfree(storage);
742         return be;
743 }
744
745 static u64
746 set_to_rw(struct pnfs_block_layout *bl, u64 offset, u64 length)
747 {
748         u64 rv = offset + length;
749         struct pnfs_block_extent *be, *e1, *e2, *e3, *new, *old;
750         struct pnfs_block_extent *children[3];
751         struct pnfs_block_extent *merge1 = NULL, *merge2 = NULL;
752         int i = 0, j;
753
754         dprintk("%s(%llu, %llu)\n", __func__, offset, length);
755         /* Create storage for up to three new extents e1, e2, e3 */
756         e1 = kmalloc(sizeof(*e1), GFP_ATOMIC);
757         e2 = kmalloc(sizeof(*e2), GFP_ATOMIC);
758         e3 = kmalloc(sizeof(*e3), GFP_ATOMIC);
759         /* BUG - we are ignoring any failure */
760         if (!e1 || !e2 || !e3)
761                 goto out_nosplit;
762
763         spin_lock(&bl->bl_ext_lock);
764         be = bl_find_get_extent_locked(bl, offset);
765         rv = be->be_f_offset + be->be_length;
766         if (be->be_state != PNFS_BLOCK_INVALID_DATA) {
767                 spin_unlock(&bl->bl_ext_lock);
768                 goto out_nosplit;
769         }
770         /* Add e* to children, bumping e*'s krefs */
771         if (be->be_f_offset != offset) {
772                 _prep_new_extent(e1, be, be->be_f_offset,
773                                  offset - be->be_f_offset,
774                                  PNFS_BLOCK_INVALID_DATA);
775                 children[i++] = e1;
776                 print_bl_extent(e1);
777         } else
778                 merge1 = e1;
779         _prep_new_extent(e2, be, offset,
780                          min(length, be->be_f_offset + be->be_length - offset),
781                          PNFS_BLOCK_READWRITE_DATA);
782         children[i++] = e2;
783         print_bl_extent(e2);
784         if (offset + length < be->be_f_offset + be->be_length) {
785                 _prep_new_extent(e3, be, e2->be_f_offset + e2->be_length,
786                                  be->be_f_offset + be->be_length -
787                                  offset - length,
788                                  PNFS_BLOCK_INVALID_DATA);
789                 children[i++] = e3;
790                 print_bl_extent(e3);
791         } else
792                 merge2 = e3;
793
794         /* Remove be from list, and insert the e* */
795         /* We don't get refs on e*, since this list is the base reference
796          * set when init'ed.
797          */
798         if (i < 3)
799                 children[i] = NULL;
800         new = children[0];
801         list_replace(&be->be_node, &new->be_node);
802         bl_put_extent(be);
803         new = _front_merge(new, &bl->bl_extents[RW_EXTENT], merge1);
804         for (j = 1; j < i; j++) {
805                 old = new;
806                 new = children[j];
807                 list_add(&new->be_node, &old->be_node);
808         }
809         if (merge2) {
810                 /* This is a HACK, should just create a _back_merge function */
811                 new = list_entry(new->be_node.next,
812                                  struct pnfs_block_extent, be_node);
813                 new = _front_merge(new, &bl->bl_extents[RW_EXTENT], merge2);
814         }
815         spin_unlock(&bl->bl_ext_lock);
816
817         /* Since we removed the base reference above, be is now scheduled for
818          * destruction.
819          */
820         bl_put_extent(be);
821         dprintk("%s returns %llu after split\n", __func__, rv);
822         return rv;
823
824  out_nosplit:
825         kfree(e1);
826         kfree(e2);
827         kfree(e3);
828         dprintk("%s returns %llu without splitting\n", __func__, rv);
829         return rv;
830 }
831
832 void
833 clean_pnfs_block_layoutupdate(struct pnfs_block_layout *bl,
834                               const struct nfs4_layoutcommit_args *arg,
835                               int status)
836 {
837         struct pnfs_block_short_extent *lce, *save;
838
839         dprintk("%s status %d\n", __func__, status);
840         list_for_each_entry_safe(lce, save, &bl->bl_committing, bse_node) {
841                 if (likely(!status)) {
842                         u64 offset = lce->bse_f_offset;
843                         u64 end = offset + lce->bse_length;
844
845                         do {
846                                 offset = set_to_rw(bl, offset, end - offset);
847                         } while (offset < end);
848                         list_del(&lce->bse_node);
849
850                         kfree(lce);
851                 } else {
852                         list_del(&lce->bse_node);
853                         spin_lock(&bl->bl_ext_lock);
854                         add_to_commitlist(bl, lce);
855                         spin_unlock(&bl->bl_ext_lock);
856                 }
857         }
858 }
859
860 int bl_push_one_short_extent(struct pnfs_inval_markings *marks)
861 {
862         struct pnfs_block_short_extent *new;
863
864         new = kmalloc(sizeof(*new), GFP_NOFS);
865         if (unlikely(!new))
866                 return -ENOMEM;
867
868         spin_lock_bh(&marks->im_lock);
869         list_add(&new->bse_node, &marks->im_extents);
870         spin_unlock_bh(&marks->im_lock);
871
872         return 0;
873 }
874
875 struct pnfs_block_short_extent *
876 bl_pop_one_short_extent(struct pnfs_inval_markings *marks)
877 {
878         struct pnfs_block_short_extent *rv = NULL;
879
880         spin_lock_bh(&marks->im_lock);
881         if (!list_empty(&marks->im_extents)) {
882                 rv = list_entry((&marks->im_extents)->next,
883                                 struct pnfs_block_short_extent, bse_node);
884                 list_del_init(&rv->bse_node);
885         }
886         spin_unlock_bh(&marks->im_lock);
887
888         return rv;
889 }
890
891 void bl_free_short_extents(struct pnfs_inval_markings *marks, int num_to_free)
892 {
893         struct pnfs_block_short_extent *se = NULL, *tmp;
894
895         if (num_to_free <= 0)
896                 return;
897
898         spin_lock(&marks->im_lock);
899         list_for_each_entry_safe(se, tmp, &marks->im_extents, bse_node) {
900                 list_del(&se->bse_node);
901                 kfree(se);
902                 if (--num_to_free == 0)
903                         break;
904         }
905         spin_unlock(&marks->im_lock);
906
907         BUG_ON(num_to_free > 0);
908 }