Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
[linux.git] / fs / hfsplus / inode.c
1 /*
2  *  linux/fs/hfsplus/inode.c
3  *
4  * Copyright (C) 2001
5  * Brad Boyer (flar@allandria.com)
6  * (C) 2003 Ardis Technologies <roman@ardistech.com>
7  *
8  * Inode handling routines
9  */
10
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/fs.h>
14 #include <linux/pagemap.h>
15 #include <linux/mpage.h>
16 #include <linux/sched.h>
17
18 #include "hfsplus_fs.h"
19 #include "hfsplus_raw.h"
20 #include "xattr.h"
21
22 static int hfsplus_readpage(struct file *file, struct page *page)
23 {
24         return block_read_full_page(page, hfsplus_get_block);
25 }
26
27 static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
28 {
29         return block_write_full_page(page, hfsplus_get_block, wbc);
30 }
31
32 static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
33 {
34         struct inode *inode = mapping->host;
35
36         if (to > inode->i_size) {
37                 truncate_pagecache(inode, to, inode->i_size);
38                 hfsplus_file_truncate(inode);
39         }
40 }
41
42 static int hfsplus_write_begin(struct file *file, struct address_space *mapping,
43                         loff_t pos, unsigned len, unsigned flags,
44                         struct page **pagep, void **fsdata)
45 {
46         int ret;
47
48         *pagep = NULL;
49         ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
50                                 hfsplus_get_block,
51                                 &HFSPLUS_I(mapping->host)->phys_size);
52         if (unlikely(ret))
53                 hfsplus_write_failed(mapping, pos + len);
54
55         return ret;
56 }
57
58 static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
59 {
60         return generic_block_bmap(mapping, block, hfsplus_get_block);
61 }
62
63 static int hfsplus_releasepage(struct page *page, gfp_t mask)
64 {
65         struct inode *inode = page->mapping->host;
66         struct super_block *sb = inode->i_sb;
67         struct hfs_btree *tree;
68         struct hfs_bnode *node;
69         u32 nidx;
70         int i, res = 1;
71
72         switch (inode->i_ino) {
73         case HFSPLUS_EXT_CNID:
74                 tree = HFSPLUS_SB(sb)->ext_tree;
75                 break;
76         case HFSPLUS_CAT_CNID:
77                 tree = HFSPLUS_SB(sb)->cat_tree;
78                 break;
79         case HFSPLUS_ATTR_CNID:
80                 tree = HFSPLUS_SB(sb)->attr_tree;
81                 break;
82         default:
83                 BUG();
84                 return 0;
85         }
86         if (!tree)
87                 return 0;
88         if (tree->node_size >= PAGE_CACHE_SIZE) {
89                 nidx = page->index >>
90                         (tree->node_size_shift - PAGE_CACHE_SHIFT);
91                 spin_lock(&tree->hash_lock);
92                 node = hfs_bnode_findhash(tree, nidx);
93                 if (!node)
94                         ;
95                 else if (atomic_read(&node->refcnt))
96                         res = 0;
97                 if (res && node) {
98                         hfs_bnode_unhash(node);
99                         hfs_bnode_free(node);
100                 }
101                 spin_unlock(&tree->hash_lock);
102         } else {
103                 nidx = page->index <<
104                         (PAGE_CACHE_SHIFT - tree->node_size_shift);
105                 i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
106                 spin_lock(&tree->hash_lock);
107                 do {
108                         node = hfs_bnode_findhash(tree, nidx++);
109                         if (!node)
110                                 continue;
111                         if (atomic_read(&node->refcnt)) {
112                                 res = 0;
113                                 break;
114                         }
115                         hfs_bnode_unhash(node);
116                         hfs_bnode_free(node);
117                 } while (--i && nidx < tree->node_count);
118                 spin_unlock(&tree->hash_lock);
119         }
120         return res ? try_to_free_buffers(page) : 0;
121 }
122
123 static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
124                 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
125 {
126         struct file *file = iocb->ki_filp;
127         struct address_space *mapping = file->f_mapping;
128         struct inode *inode = file_inode(file)->i_mapping->host;
129         ssize_t ret;
130
131         ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
132                                  hfsplus_get_block);
133
134         /*
135          * In case of error extending write may have instantiated a few
136          * blocks outside i_size. Trim these off again.
137          */
138         if (unlikely((rw & WRITE) && ret < 0)) {
139                 loff_t isize = i_size_read(inode);
140                 loff_t end = offset + iov_length(iov, nr_segs);
141
142                 if (end > isize)
143                         hfsplus_write_failed(mapping, end);
144         }
145
146         return ret;
147 }
148
149 static int hfsplus_writepages(struct address_space *mapping,
150                               struct writeback_control *wbc)
151 {
152         return mpage_writepages(mapping, wbc, hfsplus_get_block);
153 }
154
155 const struct address_space_operations hfsplus_btree_aops = {
156         .readpage       = hfsplus_readpage,
157         .writepage      = hfsplus_writepage,
158         .write_begin    = hfsplus_write_begin,
159         .write_end      = generic_write_end,
160         .bmap           = hfsplus_bmap,
161         .releasepage    = hfsplus_releasepage,
162 };
163
164 const struct address_space_operations hfsplus_aops = {
165         .readpage       = hfsplus_readpage,
166         .writepage      = hfsplus_writepage,
167         .write_begin    = hfsplus_write_begin,
168         .write_end      = generic_write_end,
169         .bmap           = hfsplus_bmap,
170         .direct_IO      = hfsplus_direct_IO,
171         .writepages     = hfsplus_writepages,
172 };
173
174 const struct dentry_operations hfsplus_dentry_operations = {
175         .d_hash       = hfsplus_hash_dentry,
176         .d_compare    = hfsplus_compare_dentry,
177 };
178
179 static struct dentry *hfsplus_file_lookup(struct inode *dir,
180                 struct dentry *dentry, unsigned int flags)
181 {
182         struct hfs_find_data fd;
183         struct super_block *sb = dir->i_sb;
184         struct inode *inode = NULL;
185         struct hfsplus_inode_info *hip;
186         int err;
187
188         if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
189                 goto out;
190
191         inode = HFSPLUS_I(dir)->rsrc_inode;
192         if (inode)
193                 goto out;
194
195         inode = new_inode(sb);
196         if (!inode)
197                 return ERR_PTR(-ENOMEM);
198
199         hip = HFSPLUS_I(inode);
200         inode->i_ino = dir->i_ino;
201         INIT_LIST_HEAD(&hip->open_dir_list);
202         mutex_init(&hip->extents_lock);
203         hip->extent_state = 0;
204         hip->flags = 0;
205         hip->userflags = 0;
206         set_bit(HFSPLUS_I_RSRC, &hip->flags);
207
208         err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
209         if (!err) {
210                 err = hfsplus_find_cat(sb, dir->i_ino, &fd);
211                 if (!err)
212                         err = hfsplus_cat_read_inode(inode, &fd);
213                 hfs_find_exit(&fd);
214         }
215         if (err) {
216                 iput(inode);
217                 return ERR_PTR(err);
218         }
219         hip->rsrc_inode = dir;
220         HFSPLUS_I(dir)->rsrc_inode = inode;
221         igrab(dir);
222
223         /*
224          * __mark_inode_dirty expects inodes to be hashed.  Since we don't
225          * want resource fork inodes in the regular inode space, we make them
226          * appear hashed, but do not put on any lists.  hlist_del()
227          * will work fine and require no locking.
228          */
229         hlist_add_fake(&inode->i_hash);
230
231         mark_inode_dirty(inode);
232 out:
233         d_add(dentry, inode);
234         return NULL;
235 }
236
237 static void hfsplus_get_perms(struct inode *inode,
238                 struct hfsplus_perm *perms, int dir)
239 {
240         struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
241         u16 mode;
242
243         mode = be16_to_cpu(perms->mode);
244
245         i_uid_write(inode, be32_to_cpu(perms->owner));
246         if (!i_uid_read(inode) && !mode)
247                 inode->i_uid = sbi->uid;
248
249         i_gid_write(inode, be32_to_cpu(perms->group));
250         if (!i_gid_read(inode) && !mode)
251                 inode->i_gid = sbi->gid;
252
253         if (dir) {
254                 mode = mode ? (mode & S_IALLUGO) : (S_IRWXUGO & ~(sbi->umask));
255                 mode |= S_IFDIR;
256         } else if (!mode)
257                 mode = S_IFREG | ((S_IRUGO|S_IWUGO) & ~(sbi->umask));
258         inode->i_mode = mode;
259
260         HFSPLUS_I(inode)->userflags = perms->userflags;
261         if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
262                 inode->i_flags |= S_IMMUTABLE;
263         else
264                 inode->i_flags &= ~S_IMMUTABLE;
265         if (perms->rootflags & HFSPLUS_FLG_APPEND)
266                 inode->i_flags |= S_APPEND;
267         else
268                 inode->i_flags &= ~S_APPEND;
269 }
270
271 static int hfsplus_file_open(struct inode *inode, struct file *file)
272 {
273         if (HFSPLUS_IS_RSRC(inode))
274                 inode = HFSPLUS_I(inode)->rsrc_inode;
275         if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
276                 return -EOVERFLOW;
277         atomic_inc(&HFSPLUS_I(inode)->opencnt);
278         return 0;
279 }
280
281 static int hfsplus_file_release(struct inode *inode, struct file *file)
282 {
283         struct super_block *sb = inode->i_sb;
284
285         if (HFSPLUS_IS_RSRC(inode))
286                 inode = HFSPLUS_I(inode)->rsrc_inode;
287         if (atomic_dec_and_test(&HFSPLUS_I(inode)->opencnt)) {
288                 mutex_lock(&inode->i_mutex);
289                 hfsplus_file_truncate(inode);
290                 if (inode->i_flags & S_DEAD) {
291                         hfsplus_delete_cat(inode->i_ino,
292                                            HFSPLUS_SB(sb)->hidden_dir, NULL);
293                         hfsplus_delete_inode(inode);
294                 }
295                 mutex_unlock(&inode->i_mutex);
296         }
297         return 0;
298 }
299
300 static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
301 {
302         struct inode *inode = dentry->d_inode;
303         int error;
304
305         error = inode_change_ok(inode, attr);
306         if (error)
307                 return error;
308
309         if ((attr->ia_valid & ATTR_SIZE) &&
310             attr->ia_size != i_size_read(inode)) {
311                 inode_dio_wait(inode);
312                 truncate_setsize(inode, attr->ia_size);
313                 hfsplus_file_truncate(inode);
314         }
315
316         setattr_copy(inode, attr);
317         mark_inode_dirty(inode);
318         return 0;
319 }
320
321 int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
322                        int datasync)
323 {
324         struct inode *inode = file->f_mapping->host;
325         struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
326         struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
327         int error = 0, error2;
328
329         error = filemap_write_and_wait_range(inode->i_mapping, start, end);
330         if (error)
331                 return error;
332         mutex_lock(&inode->i_mutex);
333
334         /*
335          * Sync inode metadata into the catalog and extent trees.
336          */
337         sync_inode_metadata(inode, 1);
338
339         /*
340          * And explicitly write out the btrees.
341          */
342         if (test_and_clear_bit(HFSPLUS_I_CAT_DIRTY, &hip->flags))
343                 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping);
344
345         if (test_and_clear_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags)) {
346                 error2 =
347                         filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
348                 if (!error)
349                         error = error2;
350         }
351
352         if (test_and_clear_bit(HFSPLUS_I_ATTR_DIRTY, &hip->flags)) {
353                 if (sbi->attr_tree) {
354                         error2 =
355                                 filemap_write_and_wait(
356                                             sbi->attr_tree->inode->i_mapping);
357                         if (!error)
358                                 error = error2;
359                 } else {
360                         printk(KERN_ERR "hfs: sync non-existent attributes tree\n");
361                 }
362         }
363
364         if (test_and_clear_bit(HFSPLUS_I_ALLOC_DIRTY, &hip->flags)) {
365                 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
366                 if (!error)
367                         error = error2;
368         }
369
370         if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
371                 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
372
373         mutex_unlock(&inode->i_mutex);
374
375         return error;
376 }
377
378 static const struct inode_operations hfsplus_file_inode_operations = {
379         .lookup         = hfsplus_file_lookup,
380         .setattr        = hfsplus_setattr,
381         .setxattr       = generic_setxattr,
382         .getxattr       = generic_getxattr,
383         .listxattr      = hfsplus_listxattr,
384         .removexattr    = hfsplus_removexattr,
385 };
386
387 static const struct file_operations hfsplus_file_operations = {
388         .llseek         = generic_file_llseek,
389         .read           = do_sync_read,
390         .aio_read       = generic_file_aio_read,
391         .write          = do_sync_write,
392         .aio_write      = generic_file_aio_write,
393         .mmap           = generic_file_mmap,
394         .splice_read    = generic_file_splice_read,
395         .fsync          = hfsplus_file_fsync,
396         .open           = hfsplus_file_open,
397         .release        = hfsplus_file_release,
398         .unlocked_ioctl = hfsplus_ioctl,
399 };
400
401 struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode)
402 {
403         struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
404         struct inode *inode = new_inode(sb);
405         struct hfsplus_inode_info *hip;
406
407         if (!inode)
408                 return NULL;
409
410         inode->i_ino = sbi->next_cnid++;
411         inode->i_mode = mode;
412         inode->i_uid = current_fsuid();
413         inode->i_gid = current_fsgid();
414         set_nlink(inode, 1);
415         inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
416
417         hip = HFSPLUS_I(inode);
418         INIT_LIST_HEAD(&hip->open_dir_list);
419         mutex_init(&hip->extents_lock);
420         atomic_set(&hip->opencnt, 0);
421         hip->extent_state = 0;
422         hip->flags = 0;
423         hip->userflags = 0;
424         memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec));
425         memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
426         hip->alloc_blocks = 0;
427         hip->first_blocks = 0;
428         hip->cached_start = 0;
429         hip->cached_blocks = 0;
430         hip->phys_size = 0;
431         hip->fs_blocks = 0;
432         hip->rsrc_inode = NULL;
433         if (S_ISDIR(inode->i_mode)) {
434                 inode->i_size = 2;
435                 sbi->folder_count++;
436                 inode->i_op = &hfsplus_dir_inode_operations;
437                 inode->i_fop = &hfsplus_dir_operations;
438         } else if (S_ISREG(inode->i_mode)) {
439                 sbi->file_count++;
440                 inode->i_op = &hfsplus_file_inode_operations;
441                 inode->i_fop = &hfsplus_file_operations;
442                 inode->i_mapping->a_ops = &hfsplus_aops;
443                 hip->clump_blocks = sbi->data_clump_blocks;
444         } else if (S_ISLNK(inode->i_mode)) {
445                 sbi->file_count++;
446                 inode->i_op = &page_symlink_inode_operations;
447                 inode->i_mapping->a_ops = &hfsplus_aops;
448                 hip->clump_blocks = 1;
449         } else
450                 sbi->file_count++;
451         insert_inode_hash(inode);
452         mark_inode_dirty(inode);
453         hfsplus_mark_mdb_dirty(sb);
454
455         return inode;
456 }
457
458 void hfsplus_delete_inode(struct inode *inode)
459 {
460         struct super_block *sb = inode->i_sb;
461
462         if (S_ISDIR(inode->i_mode)) {
463                 HFSPLUS_SB(sb)->folder_count--;
464                 hfsplus_mark_mdb_dirty(sb);
465                 return;
466         }
467         HFSPLUS_SB(sb)->file_count--;
468         if (S_ISREG(inode->i_mode)) {
469                 if (!inode->i_nlink) {
470                         inode->i_size = 0;
471                         hfsplus_file_truncate(inode);
472                 }
473         } else if (S_ISLNK(inode->i_mode)) {
474                 inode->i_size = 0;
475                 hfsplus_file_truncate(inode);
476         }
477         hfsplus_mark_mdb_dirty(sb);
478 }
479
480 void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
481 {
482         struct super_block *sb = inode->i_sb;
483         struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
484         struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
485         u32 count;
486         int i;
487
488         memcpy(&hip->first_extents, &fork->extents, sizeof(hfsplus_extent_rec));
489         for (count = 0, i = 0; i < 8; i++)
490                 count += be32_to_cpu(fork->extents[i].block_count);
491         hip->first_blocks = count;
492         memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
493         hip->cached_start = 0;
494         hip->cached_blocks = 0;
495
496         hip->alloc_blocks = be32_to_cpu(fork->total_blocks);
497         hip->phys_size = inode->i_size = be64_to_cpu(fork->total_size);
498         hip->fs_blocks =
499                 (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
500         inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
501         hip->clump_blocks =
502                 be32_to_cpu(fork->clump_size) >> sbi->alloc_blksz_shift;
503         if (!hip->clump_blocks) {
504                 hip->clump_blocks = HFSPLUS_IS_RSRC(inode) ?
505                         sbi->rsrc_clump_blocks :
506                         sbi->data_clump_blocks;
507         }
508 }
509
510 void hfsplus_inode_write_fork(struct inode *inode,
511                 struct hfsplus_fork_raw *fork)
512 {
513         memcpy(&fork->extents, &HFSPLUS_I(inode)->first_extents,
514                sizeof(hfsplus_extent_rec));
515         fork->total_size = cpu_to_be64(inode->i_size);
516         fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode)->alloc_blocks);
517 }
518
519 int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
520 {
521         hfsplus_cat_entry entry;
522         int res = 0;
523         u16 type;
524
525         type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
526
527         HFSPLUS_I(inode)->linkid = 0;
528         if (type == HFSPLUS_FOLDER) {
529                 struct hfsplus_cat_folder *folder = &entry.folder;
530
531                 if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
532                         /* panic? */;
533                 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
534                                         sizeof(struct hfsplus_cat_folder));
535                 hfsplus_get_perms(inode, &folder->permissions, 1);
536                 set_nlink(inode, 1);
537                 inode->i_size = 2 + be32_to_cpu(folder->valence);
538                 inode->i_atime = hfsp_mt2ut(folder->access_date);
539                 inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
540                 inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
541                 HFSPLUS_I(inode)->create_date = folder->create_date;
542                 HFSPLUS_I(inode)->fs_blocks = 0;
543                 inode->i_op = &hfsplus_dir_inode_operations;
544                 inode->i_fop = &hfsplus_dir_operations;
545         } else if (type == HFSPLUS_FILE) {
546                 struct hfsplus_cat_file *file = &entry.file;
547
548                 if (fd->entrylength < sizeof(struct hfsplus_cat_file))
549                         /* panic? */;
550                 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
551                                         sizeof(struct hfsplus_cat_file));
552
553                 hfsplus_inode_read_fork(inode, HFSPLUS_IS_RSRC(inode) ?
554                                         &file->rsrc_fork : &file->data_fork);
555                 hfsplus_get_perms(inode, &file->permissions, 0);
556                 set_nlink(inode, 1);
557                 if (S_ISREG(inode->i_mode)) {
558                         if (file->permissions.dev)
559                                 set_nlink(inode,
560                                           be32_to_cpu(file->permissions.dev));
561                         inode->i_op = &hfsplus_file_inode_operations;
562                         inode->i_fop = &hfsplus_file_operations;
563                         inode->i_mapping->a_ops = &hfsplus_aops;
564                 } else if (S_ISLNK(inode->i_mode)) {
565                         inode->i_op = &page_symlink_inode_operations;
566                         inode->i_mapping->a_ops = &hfsplus_aops;
567                 } else {
568                         init_special_inode(inode, inode->i_mode,
569                                            be32_to_cpu(file->permissions.dev));
570                 }
571                 inode->i_atime = hfsp_mt2ut(file->access_date);
572                 inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
573                 inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
574                 HFSPLUS_I(inode)->create_date = file->create_date;
575         } else {
576                 printk(KERN_ERR "hfs: bad catalog entry used to create inode\n");
577                 res = -EIO;
578         }
579         return res;
580 }
581
582 int hfsplus_cat_write_inode(struct inode *inode)
583 {
584         struct inode *main_inode = inode;
585         struct hfs_find_data fd;
586         hfsplus_cat_entry entry;
587
588         if (HFSPLUS_IS_RSRC(inode))
589                 main_inode = HFSPLUS_I(inode)->rsrc_inode;
590
591         if (!main_inode->i_nlink)
592                 return 0;
593
594         if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb)->cat_tree, &fd))
595                 /* panic? */
596                 return -EIO;
597
598         if (hfsplus_find_cat(main_inode->i_sb, main_inode->i_ino, &fd))
599                 /* panic? */
600                 goto out;
601
602         if (S_ISDIR(main_inode->i_mode)) {
603                 struct hfsplus_cat_folder *folder = &entry.folder;
604
605                 if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
606                         /* panic? */;
607                 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
608                                         sizeof(struct hfsplus_cat_folder));
609                 /* simple node checks? */
610                 hfsplus_cat_set_perms(inode, &folder->permissions);
611                 folder->access_date = hfsp_ut2mt(inode->i_atime);
612                 folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
613                 folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
614                 folder->valence = cpu_to_be32(inode->i_size - 2);
615                 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
616                                          sizeof(struct hfsplus_cat_folder));
617         } else if (HFSPLUS_IS_RSRC(inode)) {
618                 struct hfsplus_cat_file *file = &entry.file;
619                 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
620                                sizeof(struct hfsplus_cat_file));
621                 hfsplus_inode_write_fork(inode, &file->rsrc_fork);
622                 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
623                                 sizeof(struct hfsplus_cat_file));
624         } else {
625                 struct hfsplus_cat_file *file = &entry.file;
626
627                 if (fd.entrylength < sizeof(struct hfsplus_cat_file))
628                         /* panic? */;
629                 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
630                                         sizeof(struct hfsplus_cat_file));
631                 hfsplus_inode_write_fork(inode, &file->data_fork);
632                 hfsplus_cat_set_perms(inode, &file->permissions);
633                 if (HFSPLUS_FLG_IMMUTABLE &
634                                 (file->permissions.rootflags |
635                                         file->permissions.userflags))
636                         file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
637                 else
638                         file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
639                 file->access_date = hfsp_ut2mt(inode->i_atime);
640                 file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
641                 file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
642                 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
643                                          sizeof(struct hfsplus_cat_file));
644         }
645
646         set_bit(HFSPLUS_I_CAT_DIRTY, &HFSPLUS_I(inode)->flags);
647 out:
648         hfs_find_exit(&fd);
649         return 0;
650 }