nilfs2: fiemap support
[linux.git] / fs / nilfs2 / inode.c
1 /*
2  * inode.c - NILFS inode operations.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/writeback.h>
28 #include <linux/uio.h>
29 #include "nilfs.h"
30 #include "btnode.h"
31 #include "segment.h"
32 #include "page.h"
33 #include "mdt.h"
34 #include "cpfile.h"
35 #include "ifile.h"
36
37 struct nilfs_iget_args {
38         u64 ino;
39         __u64 cno;
40         struct nilfs_root *root;
41         int for_gc;
42 };
43
44 /**
45  * nilfs_get_block() - get a file block on the filesystem (callback function)
46  * @inode - inode struct of the target file
47  * @blkoff - file block number
48  * @bh_result - buffer head to be mapped on
49  * @create - indicate whether allocating the block or not when it has not
50  *      been allocated yet.
51  *
52  * This function does not issue actual read request of the specified data
53  * block. It is done by VFS.
54  */
55 int nilfs_get_block(struct inode *inode, sector_t blkoff,
56                     struct buffer_head *bh_result, int create)
57 {
58         struct nilfs_inode_info *ii = NILFS_I(inode);
59         __u64 blknum = 0;
60         int err = 0, ret;
61         struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode));
62         unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
63
64         down_read(&NILFS_MDT(dat)->mi_sem);
65         ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
66         up_read(&NILFS_MDT(dat)->mi_sem);
67         if (ret >= 0) { /* found */
68                 map_bh(bh_result, inode->i_sb, blknum);
69                 if (ret > 0)
70                         bh_result->b_size = (ret << inode->i_blkbits);
71                 goto out;
72         }
73         /* data block was not found */
74         if (ret == -ENOENT && create) {
75                 struct nilfs_transaction_info ti;
76
77                 bh_result->b_blocknr = 0;
78                 err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
79                 if (unlikely(err))
80                         goto out;
81                 err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
82                                         (unsigned long)bh_result);
83                 if (unlikely(err != 0)) {
84                         if (err == -EEXIST) {
85                                 /*
86                                  * The get_block() function could be called
87                                  * from multiple callers for an inode.
88                                  * However, the page having this block must
89                                  * be locked in this case.
90                                  */
91                                 printk(KERN_WARNING
92                                        "nilfs_get_block: a race condition "
93                                        "while inserting a data block. "
94                                        "(inode number=%lu, file block "
95                                        "offset=%llu)\n",
96                                        inode->i_ino,
97                                        (unsigned long long)blkoff);
98                                 err = 0;
99                         }
100                         nilfs_transaction_abort(inode->i_sb);
101                         goto out;
102                 }
103                 nilfs_mark_inode_dirty(inode);
104                 nilfs_transaction_commit(inode->i_sb); /* never fails */
105                 /* Error handling should be detailed */
106                 set_buffer_new(bh_result);
107                 set_buffer_delay(bh_result);
108                 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
109                                                       to proper value */
110         } else if (ret == -ENOENT) {
111                 /* not found is not error (e.g. hole); must return without
112                    the mapped state flag. */
113                 ;
114         } else {
115                 err = ret;
116         }
117
118  out:
119         return err;
120 }
121
122 /**
123  * nilfs_readpage() - implement readpage() method of nilfs_aops {}
124  * address_space_operations.
125  * @file - file struct of the file to be read
126  * @page - the page to be read
127  */
128 static int nilfs_readpage(struct file *file, struct page *page)
129 {
130         return mpage_readpage(page, nilfs_get_block);
131 }
132
133 /**
134  * nilfs_readpages() - implement readpages() method of nilfs_aops {}
135  * address_space_operations.
136  * @file - file struct of the file to be read
137  * @mapping - address_space struct used for reading multiple pages
138  * @pages - the pages to be read
139  * @nr_pages - number of pages to be read
140  */
141 static int nilfs_readpages(struct file *file, struct address_space *mapping,
142                            struct list_head *pages, unsigned nr_pages)
143 {
144         return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
145 }
146
147 static int nilfs_writepages(struct address_space *mapping,
148                             struct writeback_control *wbc)
149 {
150         struct inode *inode = mapping->host;
151         int err = 0;
152
153         if (wbc->sync_mode == WB_SYNC_ALL)
154                 err = nilfs_construct_dsync_segment(inode->i_sb, inode,
155                                                     wbc->range_start,
156                                                     wbc->range_end);
157         return err;
158 }
159
160 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
161 {
162         struct inode *inode = page->mapping->host;
163         int err;
164
165         redirty_page_for_writepage(wbc, page);
166         unlock_page(page);
167
168         if (wbc->sync_mode == WB_SYNC_ALL) {
169                 err = nilfs_construct_segment(inode->i_sb);
170                 if (unlikely(err))
171                         return err;
172         } else if (wbc->for_reclaim)
173                 nilfs_flush_segment(inode->i_sb, inode->i_ino);
174
175         return 0;
176 }
177
178 static int nilfs_set_page_dirty(struct page *page)
179 {
180         int ret = __set_page_dirty_buffers(page);
181
182         if (ret) {
183                 struct inode *inode = page->mapping->host;
184                 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
185                 unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
186
187                 nilfs_set_file_dirty(sbi, inode, nr_dirty);
188         }
189         return ret;
190 }
191
192 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
193                              loff_t pos, unsigned len, unsigned flags,
194                              struct page **pagep, void **fsdata)
195
196 {
197         struct inode *inode = mapping->host;
198         int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
199
200         if (unlikely(err))
201                 return err;
202
203         err = block_write_begin(mapping, pos, len, flags, pagep,
204                                 nilfs_get_block);
205         if (unlikely(err)) {
206                 loff_t isize = mapping->host->i_size;
207                 if (pos + len > isize)
208                         vmtruncate(mapping->host, isize);
209
210                 nilfs_transaction_abort(inode->i_sb);
211         }
212         return err;
213 }
214
215 static int nilfs_write_end(struct file *file, struct address_space *mapping,
216                            loff_t pos, unsigned len, unsigned copied,
217                            struct page *page, void *fsdata)
218 {
219         struct inode *inode = mapping->host;
220         unsigned start = pos & (PAGE_CACHE_SIZE - 1);
221         unsigned nr_dirty;
222         int err;
223
224         nr_dirty = nilfs_page_count_clean_buffers(page, start,
225                                                   start + copied);
226         copied = generic_write_end(file, mapping, pos, len, copied, page,
227                                    fsdata);
228         nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty);
229         err = nilfs_transaction_commit(inode->i_sb);
230         return err ? : copied;
231 }
232
233 static ssize_t
234 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
235                 loff_t offset, unsigned long nr_segs)
236 {
237         struct file *file = iocb->ki_filp;
238         struct inode *inode = file->f_mapping->host;
239         ssize_t size;
240
241         if (rw == WRITE)
242                 return 0;
243
244         /* Needs synchronization with the cleaner */
245         size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
246                                   offset, nr_segs, nilfs_get_block, NULL);
247
248         /*
249          * In case of error extending write may have instantiated a few
250          * blocks outside i_size. Trim these off again.
251          */
252         if (unlikely((rw & WRITE) && size < 0)) {
253                 loff_t isize = i_size_read(inode);
254                 loff_t end = offset + iov_length(iov, nr_segs);
255
256                 if (end > isize)
257                         vmtruncate(inode, isize);
258         }
259
260         return size;
261 }
262
263 const struct address_space_operations nilfs_aops = {
264         .writepage              = nilfs_writepage,
265         .readpage               = nilfs_readpage,
266         .sync_page              = block_sync_page,
267         .writepages             = nilfs_writepages,
268         .set_page_dirty         = nilfs_set_page_dirty,
269         .readpages              = nilfs_readpages,
270         .write_begin            = nilfs_write_begin,
271         .write_end              = nilfs_write_end,
272         /* .releasepage         = nilfs_releasepage, */
273         .invalidatepage         = block_invalidatepage,
274         .direct_IO              = nilfs_direct_IO,
275         .is_partially_uptodate  = block_is_partially_uptodate,
276 };
277
278 struct inode *nilfs_new_inode(struct inode *dir, int mode)
279 {
280         struct super_block *sb = dir->i_sb;
281         struct nilfs_sb_info *sbi = NILFS_SB(sb);
282         struct inode *inode;
283         struct nilfs_inode_info *ii;
284         struct nilfs_root *root;
285         int err = -ENOMEM;
286         ino_t ino;
287
288         inode = new_inode(sb);
289         if (unlikely(!inode))
290                 goto failed;
291
292         mapping_set_gfp_mask(inode->i_mapping,
293                              mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
294
295         root = NILFS_I(dir)->i_root;
296         ii = NILFS_I(inode);
297         ii->i_state = 1 << NILFS_I_NEW;
298         ii->i_root = root;
299
300         err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
301         if (unlikely(err))
302                 goto failed_ifile_create_inode;
303         /* reference count of i_bh inherits from nilfs_mdt_read_block() */
304
305         atomic_inc(&root->inodes_count);
306         inode_init_owner(inode, dir, mode);
307         inode->i_ino = ino;
308         inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
309
310         if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
311                 err = nilfs_bmap_read(ii->i_bmap, NULL);
312                 if (err < 0)
313                         goto failed_bmap;
314
315                 set_bit(NILFS_I_BMAP, &ii->i_state);
316                 /* No lock is needed; iget() ensures it. */
317         }
318
319         ii->i_flags = NILFS_I(dir)->i_flags;
320         if (S_ISLNK(mode))
321                 ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL);
322         if (!S_ISDIR(mode))
323                 ii->i_flags &= ~NILFS_DIRSYNC_FL;
324
325         /* ii->i_file_acl = 0; */
326         /* ii->i_dir_acl = 0; */
327         ii->i_dir_start_lookup = 0;
328         nilfs_set_inode_flags(inode);
329         spin_lock(&sbi->s_next_gen_lock);
330         inode->i_generation = sbi->s_next_generation++;
331         spin_unlock(&sbi->s_next_gen_lock);
332         insert_inode_hash(inode);
333
334         err = nilfs_init_acl(inode, dir);
335         if (unlikely(err))
336                 goto failed_acl; /* never occur. When supporting
337                                     nilfs_init_acl(), proper cancellation of
338                                     above jobs should be considered */
339
340         return inode;
341
342  failed_acl:
343  failed_bmap:
344         inode->i_nlink = 0;
345         iput(inode);  /* raw_inode will be deleted through
346                          generic_delete_inode() */
347         goto failed;
348
349  failed_ifile_create_inode:
350         make_bad_inode(inode);
351         iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
352                          called */
353  failed:
354         return ERR_PTR(err);
355 }
356
357 void nilfs_set_inode_flags(struct inode *inode)
358 {
359         unsigned int flags = NILFS_I(inode)->i_flags;
360
361         inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
362                             S_DIRSYNC);
363         if (flags & NILFS_SYNC_FL)
364                 inode->i_flags |= S_SYNC;
365         if (flags & NILFS_APPEND_FL)
366                 inode->i_flags |= S_APPEND;
367         if (flags & NILFS_IMMUTABLE_FL)
368                 inode->i_flags |= S_IMMUTABLE;
369 #ifndef NILFS_ATIME_DISABLE
370         if (flags & NILFS_NOATIME_FL)
371 #endif
372                 inode->i_flags |= S_NOATIME;
373         if (flags & NILFS_DIRSYNC_FL)
374                 inode->i_flags |= S_DIRSYNC;
375         mapping_set_gfp_mask(inode->i_mapping,
376                              mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
377 }
378
379 int nilfs_read_inode_common(struct inode *inode,
380                             struct nilfs_inode *raw_inode)
381 {
382         struct nilfs_inode_info *ii = NILFS_I(inode);
383         int err;
384
385         inode->i_mode = le16_to_cpu(raw_inode->i_mode);
386         inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid);
387         inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid);
388         inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
389         inode->i_size = le64_to_cpu(raw_inode->i_size);
390         inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
391         inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
392         inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
393         inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
394         inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
395         inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
396         if (inode->i_nlink == 0 && inode->i_mode == 0)
397                 return -EINVAL; /* this inode is deleted */
398
399         inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
400         ii->i_flags = le32_to_cpu(raw_inode->i_flags);
401 #if 0
402         ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
403         ii->i_dir_acl = S_ISREG(inode->i_mode) ?
404                 0 : le32_to_cpu(raw_inode->i_dir_acl);
405 #endif
406         ii->i_dir_start_lookup = 0;
407         inode->i_generation = le32_to_cpu(raw_inode->i_generation);
408
409         if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
410             S_ISLNK(inode->i_mode)) {
411                 err = nilfs_bmap_read(ii->i_bmap, raw_inode);
412                 if (err < 0)
413                         return err;
414                 set_bit(NILFS_I_BMAP, &ii->i_state);
415                 /* No lock is needed; iget() ensures it. */
416         }
417         return 0;
418 }
419
420 static int __nilfs_read_inode(struct super_block *sb,
421                               struct nilfs_root *root, unsigned long ino,
422                               struct inode *inode)
423 {
424         struct nilfs_sb_info *sbi = NILFS_SB(sb);
425         struct inode *dat = nilfs_dat_inode(sbi->s_nilfs);
426         struct buffer_head *bh;
427         struct nilfs_inode *raw_inode;
428         int err;
429
430         down_read(&NILFS_MDT(dat)->mi_sem);     /* XXX */
431         err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
432         if (unlikely(err))
433                 goto bad_inode;
434
435         raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
436
437         err = nilfs_read_inode_common(inode, raw_inode);
438         if (err)
439                 goto failed_unmap;
440
441         if (S_ISREG(inode->i_mode)) {
442                 inode->i_op = &nilfs_file_inode_operations;
443                 inode->i_fop = &nilfs_file_operations;
444                 inode->i_mapping->a_ops = &nilfs_aops;
445         } else if (S_ISDIR(inode->i_mode)) {
446                 inode->i_op = &nilfs_dir_inode_operations;
447                 inode->i_fop = &nilfs_dir_operations;
448                 inode->i_mapping->a_ops = &nilfs_aops;
449         } else if (S_ISLNK(inode->i_mode)) {
450                 inode->i_op = &nilfs_symlink_inode_operations;
451                 inode->i_mapping->a_ops = &nilfs_aops;
452         } else {
453                 inode->i_op = &nilfs_special_inode_operations;
454                 init_special_inode(
455                         inode, inode->i_mode,
456                         huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
457         }
458         nilfs_ifile_unmap_inode(root->ifile, ino, bh);
459         brelse(bh);
460         up_read(&NILFS_MDT(dat)->mi_sem);       /* XXX */
461         nilfs_set_inode_flags(inode);
462         return 0;
463
464  failed_unmap:
465         nilfs_ifile_unmap_inode(root->ifile, ino, bh);
466         brelse(bh);
467
468  bad_inode:
469         up_read(&NILFS_MDT(dat)->mi_sem);       /* XXX */
470         return err;
471 }
472
473 static int nilfs_iget_test(struct inode *inode, void *opaque)
474 {
475         struct nilfs_iget_args *args = opaque;
476         struct nilfs_inode_info *ii;
477
478         if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
479                 return 0;
480
481         ii = NILFS_I(inode);
482         if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
483                 return !args->for_gc;
484
485         return args->for_gc && args->cno == ii->i_cno;
486 }
487
488 static int nilfs_iget_set(struct inode *inode, void *opaque)
489 {
490         struct nilfs_iget_args *args = opaque;
491
492         inode->i_ino = args->ino;
493         if (args->for_gc) {
494                 NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
495                 NILFS_I(inode)->i_cno = args->cno;
496                 NILFS_I(inode)->i_root = NULL;
497         } else {
498                 if (args->root && args->ino == NILFS_ROOT_INO)
499                         nilfs_get_root(args->root);
500                 NILFS_I(inode)->i_root = args->root;
501         }
502         return 0;
503 }
504
505 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
506                             unsigned long ino)
507 {
508         struct nilfs_iget_args args = {
509                 .ino = ino, .root = root, .cno = 0, .for_gc = 0
510         };
511
512         return ilookup5(sb, ino, nilfs_iget_test, &args);
513 }
514
515 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
516                                 unsigned long ino)
517 {
518         struct nilfs_iget_args args = {
519                 .ino = ino, .root = root, .cno = 0, .for_gc = 0
520         };
521
522         return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
523 }
524
525 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
526                          unsigned long ino)
527 {
528         struct inode *inode;
529         int err;
530
531         inode = nilfs_iget_locked(sb, root, ino);
532         if (unlikely(!inode))
533                 return ERR_PTR(-ENOMEM);
534         if (!(inode->i_state & I_NEW))
535                 return inode;
536
537         err = __nilfs_read_inode(sb, root, ino, inode);
538         if (unlikely(err)) {
539                 iget_failed(inode);
540                 return ERR_PTR(err);
541         }
542         unlock_new_inode(inode);
543         return inode;
544 }
545
546 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
547                                 __u64 cno)
548 {
549         struct nilfs_iget_args args = {
550                 .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
551         };
552         struct inode *inode;
553         int err;
554
555         inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
556         if (unlikely(!inode))
557                 return ERR_PTR(-ENOMEM);
558         if (!(inode->i_state & I_NEW))
559                 return inode;
560
561         err = nilfs_init_gcinode(inode);
562         if (unlikely(err)) {
563                 iget_failed(inode);
564                 return ERR_PTR(err);
565         }
566         unlock_new_inode(inode);
567         return inode;
568 }
569
570 void nilfs_write_inode_common(struct inode *inode,
571                               struct nilfs_inode *raw_inode, int has_bmap)
572 {
573         struct nilfs_inode_info *ii = NILFS_I(inode);
574
575         raw_inode->i_mode = cpu_to_le16(inode->i_mode);
576         raw_inode->i_uid = cpu_to_le32(inode->i_uid);
577         raw_inode->i_gid = cpu_to_le32(inode->i_gid);
578         raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
579         raw_inode->i_size = cpu_to_le64(inode->i_size);
580         raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
581         raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
582         raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
583         raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
584         raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
585
586         raw_inode->i_flags = cpu_to_le32(ii->i_flags);
587         raw_inode->i_generation = cpu_to_le32(inode->i_generation);
588
589         if (has_bmap)
590                 nilfs_bmap_write(ii->i_bmap, raw_inode);
591         else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
592                 raw_inode->i_device_code =
593                         cpu_to_le64(huge_encode_dev(inode->i_rdev));
594         /* When extending inode, nilfs->ns_inode_size should be checked
595            for substitutions of appended fields */
596 }
597
598 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
599 {
600         ino_t ino = inode->i_ino;
601         struct nilfs_inode_info *ii = NILFS_I(inode);
602         struct inode *ifile = ii->i_root->ifile;
603         struct nilfs_inode *raw_inode;
604
605         raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
606
607         if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
608                 memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
609         set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
610
611         nilfs_write_inode_common(inode, raw_inode, 0);
612                 /* XXX: call with has_bmap = 0 is a workaround to avoid
613                    deadlock of bmap. This delays update of i_bmap to just
614                    before writing */
615         nilfs_ifile_unmap_inode(ifile, ino, ibh);
616 }
617
618 #define NILFS_MAX_TRUNCATE_BLOCKS       16384  /* 64MB for 4KB block */
619
620 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
621                                 unsigned long from)
622 {
623         unsigned long b;
624         int ret;
625
626         if (!test_bit(NILFS_I_BMAP, &ii->i_state))
627                 return;
628 repeat:
629         ret = nilfs_bmap_last_key(ii->i_bmap, &b);
630         if (ret == -ENOENT)
631                 return;
632         else if (ret < 0)
633                 goto failed;
634
635         if (b < from)
636                 return;
637
638         b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
639         ret = nilfs_bmap_truncate(ii->i_bmap, b);
640         nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
641         if (!ret || (ret == -ENOMEM &&
642                      nilfs_bmap_truncate(ii->i_bmap, b) == 0))
643                 goto repeat;
644
645 failed:
646         nilfs_warning(ii->vfs_inode.i_sb, __func__,
647                       "failed to truncate bmap (ino=%lu, err=%d)",
648                       ii->vfs_inode.i_ino, ret);
649 }
650
651 void nilfs_truncate(struct inode *inode)
652 {
653         unsigned long blkoff;
654         unsigned int blocksize;
655         struct nilfs_transaction_info ti;
656         struct super_block *sb = inode->i_sb;
657         struct nilfs_inode_info *ii = NILFS_I(inode);
658
659         if (!test_bit(NILFS_I_BMAP, &ii->i_state))
660                 return;
661         if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
662                 return;
663
664         blocksize = sb->s_blocksize;
665         blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
666         nilfs_transaction_begin(sb, &ti, 0); /* never fails */
667
668         block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
669
670         nilfs_truncate_bmap(ii, blkoff);
671
672         inode->i_mtime = inode->i_ctime = CURRENT_TIME;
673         if (IS_SYNC(inode))
674                 nilfs_set_transaction_flag(NILFS_TI_SYNC);
675
676         nilfs_mark_inode_dirty(inode);
677         nilfs_set_file_dirty(NILFS_SB(sb), inode, 0);
678         nilfs_transaction_commit(sb);
679         /* May construct a logical segment and may fail in sync mode.
680            But truncate has no return value. */
681 }
682
683 static void nilfs_clear_inode(struct inode *inode)
684 {
685         struct nilfs_inode_info *ii = NILFS_I(inode);
686         struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
687
688         /*
689          * Free resources allocated in nilfs_read_inode(), here.
690          */
691         BUG_ON(!list_empty(&ii->i_dirty));
692         brelse(ii->i_bh);
693         ii->i_bh = NULL;
694
695         if (mdi && mdi->mi_palloc_cache)
696                 nilfs_palloc_destroy_cache(inode);
697
698         if (test_bit(NILFS_I_BMAP, &ii->i_state))
699                 nilfs_bmap_clear(ii->i_bmap);
700
701         nilfs_btnode_cache_clear(&ii->i_btnode_cache);
702
703         if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
704                 nilfs_put_root(ii->i_root);
705 }
706
707 void nilfs_evict_inode(struct inode *inode)
708 {
709         struct nilfs_transaction_info ti;
710         struct super_block *sb = inode->i_sb;
711         struct nilfs_inode_info *ii = NILFS_I(inode);
712
713         if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
714                 if (inode->i_data.nrpages)
715                         truncate_inode_pages(&inode->i_data, 0);
716                 end_writeback(inode);
717                 nilfs_clear_inode(inode);
718                 return;
719         }
720         nilfs_transaction_begin(sb, &ti, 0); /* never fails */
721
722         if (inode->i_data.nrpages)
723                 truncate_inode_pages(&inode->i_data, 0);
724
725         /* TODO: some of the following operations may fail.  */
726         nilfs_truncate_bmap(ii, 0);
727         nilfs_mark_inode_dirty(inode);
728         end_writeback(inode);
729
730         nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
731         atomic_dec(&ii->i_root->inodes_count);
732
733         nilfs_clear_inode(inode);
734
735         if (IS_SYNC(inode))
736                 nilfs_set_transaction_flag(NILFS_TI_SYNC);
737         nilfs_transaction_commit(sb);
738         /* May construct a logical segment and may fail in sync mode.
739            But delete_inode has no return value. */
740 }
741
742 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
743 {
744         struct nilfs_transaction_info ti;
745         struct inode *inode = dentry->d_inode;
746         struct super_block *sb = inode->i_sb;
747         int err;
748
749         err = inode_change_ok(inode, iattr);
750         if (err)
751                 return err;
752
753         err = nilfs_transaction_begin(sb, &ti, 0);
754         if (unlikely(err))
755                 return err;
756
757         if ((iattr->ia_valid & ATTR_SIZE) &&
758             iattr->ia_size != i_size_read(inode)) {
759                 err = vmtruncate(inode, iattr->ia_size);
760                 if (unlikely(err))
761                         goto out_err;
762         }
763
764         setattr_copy(inode, iattr);
765         mark_inode_dirty(inode);
766
767         if (iattr->ia_valid & ATTR_MODE) {
768                 err = nilfs_acl_chmod(inode);
769                 if (unlikely(err))
770                         goto out_err;
771         }
772
773         return nilfs_transaction_commit(sb);
774
775 out_err:
776         nilfs_transaction_abort(sb);
777         return err;
778 }
779
780 int nilfs_permission(struct inode *inode, int mask, unsigned int flags)
781 {
782         struct nilfs_root *root;
783
784         if (flags & IPERM_FLAG_RCU)
785                 return -ECHILD;
786
787         root = NILFS_I(inode)->i_root;
788         if ((mask & MAY_WRITE) && root &&
789             root->cno != NILFS_CPTREE_CURRENT_CNO)
790                 return -EROFS; /* snapshot is not writable */
791
792         return generic_permission(inode, mask, flags, NULL);
793 }
794
795 int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode,
796                            struct buffer_head **pbh)
797 {
798         struct nilfs_inode_info *ii = NILFS_I(inode);
799         int err;
800
801         spin_lock(&sbi->s_inode_lock);
802         if (ii->i_bh == NULL) {
803                 spin_unlock(&sbi->s_inode_lock);
804                 err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
805                                                   inode->i_ino, pbh);
806                 if (unlikely(err))
807                         return err;
808                 spin_lock(&sbi->s_inode_lock);
809                 if (ii->i_bh == NULL)
810                         ii->i_bh = *pbh;
811                 else {
812                         brelse(*pbh);
813                         *pbh = ii->i_bh;
814                 }
815         } else
816                 *pbh = ii->i_bh;
817
818         get_bh(*pbh);
819         spin_unlock(&sbi->s_inode_lock);
820         return 0;
821 }
822
823 int nilfs_inode_dirty(struct inode *inode)
824 {
825         struct nilfs_inode_info *ii = NILFS_I(inode);
826         struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
827         int ret = 0;
828
829         if (!list_empty(&ii->i_dirty)) {
830                 spin_lock(&sbi->s_inode_lock);
831                 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
832                         test_bit(NILFS_I_BUSY, &ii->i_state);
833                 spin_unlock(&sbi->s_inode_lock);
834         }
835         return ret;
836 }
837
838 int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode,
839                          unsigned nr_dirty)
840 {
841         struct nilfs_inode_info *ii = NILFS_I(inode);
842
843         atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks);
844
845         if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
846                 return 0;
847
848         spin_lock(&sbi->s_inode_lock);
849         if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
850             !test_bit(NILFS_I_BUSY, &ii->i_state)) {
851                 /* Because this routine may race with nilfs_dispose_list(),
852                    we have to check NILFS_I_QUEUED here, too. */
853                 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
854                         /* This will happen when somebody is freeing
855                            this inode. */
856                         nilfs_warning(sbi->s_super, __func__,
857                                       "cannot get inode (ino=%lu)\n",
858                                       inode->i_ino);
859                         spin_unlock(&sbi->s_inode_lock);
860                         return -EINVAL; /* NILFS_I_DIRTY may remain for
861                                            freeing inode */
862                 }
863                 list_del(&ii->i_dirty);
864                 list_add_tail(&ii->i_dirty, &sbi->s_dirty_files);
865                 set_bit(NILFS_I_QUEUED, &ii->i_state);
866         }
867         spin_unlock(&sbi->s_inode_lock);
868         return 0;
869 }
870
871 int nilfs_mark_inode_dirty(struct inode *inode)
872 {
873         struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
874         struct buffer_head *ibh;
875         int err;
876
877         err = nilfs_load_inode_block(sbi, inode, &ibh);
878         if (unlikely(err)) {
879                 nilfs_warning(inode->i_sb, __func__,
880                               "failed to reget inode block.\n");
881                 return err;
882         }
883         nilfs_update_inode(inode, ibh);
884         nilfs_mdt_mark_buffer_dirty(ibh);
885         nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
886         brelse(ibh);
887         return 0;
888 }
889
890 /**
891  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
892  * @inode: inode of the file to be registered.
893  *
894  * nilfs_dirty_inode() loads a inode block containing the specified
895  * @inode and copies data from a nilfs_inode to a corresponding inode
896  * entry in the inode block. This operation is excluded from the segment
897  * construction. This function can be called both as a single operation
898  * and as a part of indivisible file operations.
899  */
900 void nilfs_dirty_inode(struct inode *inode)
901 {
902         struct nilfs_transaction_info ti;
903         struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
904
905         if (is_bad_inode(inode)) {
906                 nilfs_warning(inode->i_sb, __func__,
907                               "tried to mark bad_inode dirty. ignored.\n");
908                 dump_stack();
909                 return;
910         }
911         if (mdi) {
912                 nilfs_mdt_mark_dirty(inode);
913                 return;
914         }
915         nilfs_transaction_begin(inode->i_sb, &ti, 0);
916         nilfs_mark_inode_dirty(inode);
917         nilfs_transaction_commit(inode->i_sb); /* never fails */
918 }
919
920 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
921                  __u64 start, __u64 len)
922 {
923         struct the_nilfs *nilfs = NILFS_I_NILFS(inode);
924         __u64 logical = 0, phys = 0, size = 0;
925         __u32 flags = 0;
926         loff_t isize;
927         sector_t blkoff, end_blkoff;
928         sector_t delalloc_blkoff;
929         unsigned long delalloc_blklen;
930         unsigned int blkbits = inode->i_blkbits;
931         int ret, n;
932
933         ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
934         if (ret)
935                 return ret;
936
937         mutex_lock(&inode->i_mutex);
938
939         isize = i_size_read(inode);
940
941         blkoff = start >> blkbits;
942         end_blkoff = (start + len - 1) >> blkbits;
943
944         delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
945                                                         &delalloc_blkoff);
946
947         do {
948                 __u64 blkphy;
949                 unsigned int maxblocks;
950
951                 if (delalloc_blklen && blkoff == delalloc_blkoff) {
952                         if (size) {
953                                 /* End of the current extent */
954                                 ret = fiemap_fill_next_extent(
955                                         fieinfo, logical, phys, size, flags);
956                                 if (ret)
957                                         break;
958                         }
959                         if (blkoff > end_blkoff)
960                                 break;
961
962                         flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
963                         logical = blkoff << blkbits;
964                         phys = 0;
965                         size = delalloc_blklen << blkbits;
966
967                         blkoff = delalloc_blkoff + delalloc_blklen;
968                         delalloc_blklen = nilfs_find_uncommitted_extent(
969                                 inode, blkoff, &delalloc_blkoff);
970                         continue;
971                 }
972
973                 /*
974                  * Limit the number of blocks that we look up so as
975                  * not to get into the next delayed allocation extent.
976                  */
977                 maxblocks = INT_MAX;
978                 if (delalloc_blklen)
979                         maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
980                                           maxblocks);
981                 blkphy = 0;
982
983                 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
984                 n = nilfs_bmap_lookup_contig(
985                         NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
986                 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
987
988                 if (n < 0) {
989                         int past_eof;
990
991                         if (unlikely(n != -ENOENT))
992                                 break; /* error */
993
994                         /* HOLE */
995                         blkoff++;
996                         past_eof = ((blkoff << blkbits) >= isize);
997
998                         if (size) {
999                                 /* End of the current extent */
1000
1001                                 if (past_eof)
1002                                         flags |= FIEMAP_EXTENT_LAST;
1003
1004                                 ret = fiemap_fill_next_extent(
1005                                         fieinfo, logical, phys, size, flags);
1006                                 if (ret)
1007                                         break;
1008                                 size = 0;
1009                         }
1010                         if (blkoff > end_blkoff || past_eof)
1011                                 break;
1012                 } else {
1013                         if (size) {
1014                                 if (phys && blkphy << blkbits == phys + size) {
1015                                         /* The current extent goes on */
1016                                         size += n << blkbits;
1017                                 } else {
1018                                         /* Terminate the current extent */
1019                                         ret = fiemap_fill_next_extent(
1020                                                 fieinfo, logical, phys, size,
1021                                                 flags);
1022                                         if (ret || blkoff > end_blkoff)
1023                                                 break;
1024
1025                                         /* Start another extent */
1026                                         flags = FIEMAP_EXTENT_MERGED;
1027                                         logical = blkoff << blkbits;
1028                                         phys = blkphy << blkbits;
1029                                         size = n << blkbits;
1030                                 }
1031                         } else {
1032                                 /* Start a new extent */
1033                                 flags = FIEMAP_EXTENT_MERGED;
1034                                 logical = blkoff << blkbits;
1035                                 phys = blkphy << blkbits;
1036                                 size = n << blkbits;
1037                         }
1038                         blkoff += n;
1039                 }
1040                 cond_resched();
1041         } while (true);
1042
1043         /* If ret is 1 then we just hit the end of the extent array */
1044         if (ret == 1)
1045                 ret = 0;
1046
1047         mutex_unlock(&inode->i_mutex);
1048         return ret;
1049 }