Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
[linux-drm-fsl-dcu.git] / mm / page-writeback.c
1 /*
2  * mm/page-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains functions related to writing back dirty pages at the
7  * address_space level.
8  *
9  * 10Apr2002    akpm@zip.com.au
10  *              Initial version
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/spinlock.h>
16 #include <linux/fs.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/slab.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/init.h>
23 #include <linux/backing-dev.h>
24 #include <linux/task_io_accounting_ops.h>
25 #include <linux/blkdev.h>
26 #include <linux/mpage.h>
27 #include <linux/rmap.h>
28 #include <linux/percpu.h>
29 #include <linux/notifier.h>
30 #include <linux/smp.h>
31 #include <linux/sysctl.h>
32 #include <linux/cpu.h>
33 #include <linux/syscalls.h>
34 #include <linux/buffer_head.h>
35 #include <linux/pagevec.h>
36
37 /*
38  * The maximum number of pages to writeout in a single bdflush/kupdate
39  * operation.  We do this so we don't hold I_LOCK against an inode for
40  * enormous amounts of time, which would block a userspace task which has
41  * been forced to throttle against that inode.  Also, the code reevaluates
42  * the dirty each time it has written this many pages.
43  */
44 #define MAX_WRITEBACK_PAGES     1024
45
46 /*
47  * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
48  * will look to see if it needs to force writeback or throttling.
49  */
50 static long ratelimit_pages = 32;
51
52 static int dirty_exceeded __cacheline_aligned_in_smp;   /* Dirty mem may be over limit */
53
54 /*
55  * When balance_dirty_pages decides that the caller needs to perform some
56  * non-background writeback, this is how many pages it will attempt to write.
57  * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
58  * large amounts of I/O are submitted.
59  */
60 static inline long sync_writeback_pages(void)
61 {
62         return ratelimit_pages + ratelimit_pages / 2;
63 }
64
65 /* The following parameters are exported via /proc/sys/vm */
66
67 /*
68  * Start background writeback (via pdflush) at this percentage
69  */
70 int dirty_background_ratio = 10;
71
72 /*
73  * The generator of dirty data starts writeback at this percentage
74  */
75 int vm_dirty_ratio = 40;
76
77 /*
78  * The interval between `kupdate'-style writebacks, in jiffies
79  */
80 int dirty_writeback_interval = 5 * HZ;
81
82 /*
83  * The longest number of jiffies for which data is allowed to remain dirty
84  */
85 int dirty_expire_interval = 30 * HZ;
86
87 /*
88  * Flag that makes the machine dump writes/reads and block dirtyings.
89  */
90 int block_dump;
91
92 /*
93  * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
94  * a full sync is triggered after this time elapses without any disk activity.
95  */
96 int laptop_mode;
97
98 EXPORT_SYMBOL(laptop_mode);
99
100 /* End of sysctl-exported parameters */
101
102
103 static void background_writeout(unsigned long _min_pages);
104
105 /*
106  * Work out the current dirty-memory clamping and background writeout
107  * thresholds.
108  *
109  * The main aim here is to lower them aggressively if there is a lot of mapped
110  * memory around.  To avoid stressing page reclaim with lots of unreclaimable
111  * pages.  It is better to clamp down on writers than to start swapping, and
112  * performing lots of scanning.
113  *
114  * We only allow 1/2 of the currently-unmapped memory to be dirtied.
115  *
116  * We don't permit the clamping level to fall below 5% - that is getting rather
117  * excessive.
118  *
119  * We make sure that the background writeout level is below the adjusted
120  * clamping level.
121  */
122 static void
123 get_dirty_limits(long *pbackground, long *pdirty,
124                                         struct address_space *mapping)
125 {
126         int background_ratio;           /* Percentages */
127         int dirty_ratio;
128         int unmapped_ratio;
129         long background;
130         long dirty;
131         unsigned long available_memory = vm_total_pages;
132         struct task_struct *tsk;
133
134 #ifdef CONFIG_HIGHMEM
135         /*
136          * We always exclude high memory from our count.
137          */
138         available_memory -= totalhigh_pages;
139 #endif
140
141
142         unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
143                                 global_page_state(NR_ANON_PAGES)) * 100) /
144                                         vm_total_pages;
145
146         dirty_ratio = vm_dirty_ratio;
147         if (dirty_ratio > unmapped_ratio / 2)
148                 dirty_ratio = unmapped_ratio / 2;
149
150         if (dirty_ratio < 5)
151                 dirty_ratio = 5;
152
153         background_ratio = dirty_background_ratio;
154         if (background_ratio >= dirty_ratio)
155                 background_ratio = dirty_ratio / 2;
156
157         background = (background_ratio * available_memory) / 100;
158         dirty = (dirty_ratio * available_memory) / 100;
159         tsk = current;
160         if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
161                 background += background / 4;
162                 dirty += dirty / 4;
163         }
164         *pbackground = background;
165         *pdirty = dirty;
166 }
167
168 /*
169  * balance_dirty_pages() must be called by processes which are generating dirty
170  * data.  It looks at the number of dirty pages in the machine and will force
171  * the caller to perform writeback if the system is over `vm_dirty_ratio'.
172  * If we're over `background_thresh' then pdflush is woken to perform some
173  * writeout.
174  */
175 static void balance_dirty_pages(struct address_space *mapping)
176 {
177         long nr_reclaimable;
178         long background_thresh;
179         long dirty_thresh;
180         unsigned long pages_written = 0;
181         unsigned long write_chunk = sync_writeback_pages();
182
183         struct backing_dev_info *bdi = mapping->backing_dev_info;
184
185         for (;;) {
186                 struct writeback_control wbc = {
187                         .bdi            = bdi,
188                         .sync_mode      = WB_SYNC_NONE,
189                         .older_than_this = NULL,
190                         .nr_to_write    = write_chunk,
191                         .range_cyclic   = 1,
192                 };
193
194                 get_dirty_limits(&background_thresh, &dirty_thresh, mapping);
195                 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
196                                         global_page_state(NR_UNSTABLE_NFS);
197                 if (nr_reclaimable + global_page_state(NR_WRITEBACK) <=
198                         dirty_thresh)
199                                 break;
200
201                 if (!dirty_exceeded)
202                         dirty_exceeded = 1;
203
204                 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
205                  * Unstable writes are a feature of certain networked
206                  * filesystems (i.e. NFS) in which data may have been
207                  * written to the server's write cache, but has not yet
208                  * been flushed to permanent storage.
209                  */
210                 if (nr_reclaimable) {
211                         writeback_inodes(&wbc);
212                         get_dirty_limits(&background_thresh,
213                                                 &dirty_thresh, mapping);
214                         nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
215                                         global_page_state(NR_UNSTABLE_NFS);
216                         if (nr_reclaimable +
217                                 global_page_state(NR_WRITEBACK)
218                                         <= dirty_thresh)
219                                                 break;
220                         pages_written += write_chunk - wbc.nr_to_write;
221                         if (pages_written >= write_chunk)
222                                 break;          /* We've done our duty */
223                 }
224                 congestion_wait(WRITE, HZ/10);
225         }
226
227         if (nr_reclaimable + global_page_state(NR_WRITEBACK)
228                 <= dirty_thresh && dirty_exceeded)
229                         dirty_exceeded = 0;
230
231         if (writeback_in_progress(bdi))
232                 return;         /* pdflush is already working this queue */
233
234         /*
235          * In laptop mode, we wait until hitting the higher threshold before
236          * starting background writeout, and then write out all the way down
237          * to the lower threshold.  So slow writers cause minimal disk activity.
238          *
239          * In normal mode, we start background writeout at the lower
240          * background_thresh, to keep the amount of dirty memory low.
241          */
242         if ((laptop_mode && pages_written) ||
243              (!laptop_mode && (nr_reclaimable > background_thresh)))
244                 pdflush_operation(background_writeout, 0);
245 }
246
247 void set_page_dirty_balance(struct page *page)
248 {
249         if (set_page_dirty(page)) {
250                 struct address_space *mapping = page_mapping(page);
251
252                 if (mapping)
253                         balance_dirty_pages_ratelimited(mapping);
254         }
255 }
256
257 /**
258  * balance_dirty_pages_ratelimited_nr - balance dirty memory state
259  * @mapping: address_space which was dirtied
260  * @nr_pages_dirtied: number of pages which the caller has just dirtied
261  *
262  * Processes which are dirtying memory should call in here once for each page
263  * which was newly dirtied.  The function will periodically check the system's
264  * dirty state and will initiate writeback if needed.
265  *
266  * On really big machines, get_writeback_state is expensive, so try to avoid
267  * calling it too often (ratelimiting).  But once we're over the dirty memory
268  * limit we decrease the ratelimiting by a lot, to prevent individual processes
269  * from overshooting the limit by (ratelimit_pages) each.
270  */
271 void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
272                                         unsigned long nr_pages_dirtied)
273 {
274         static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
275         unsigned long ratelimit;
276         unsigned long *p;
277
278         ratelimit = ratelimit_pages;
279         if (dirty_exceeded)
280                 ratelimit = 8;
281
282         /*
283          * Check the rate limiting. Also, we do not want to throttle real-time
284          * tasks in balance_dirty_pages(). Period.
285          */
286         preempt_disable();
287         p =  &__get_cpu_var(ratelimits);
288         *p += nr_pages_dirtied;
289         if (unlikely(*p >= ratelimit)) {
290                 *p = 0;
291                 preempt_enable();
292                 balance_dirty_pages(mapping);
293                 return;
294         }
295         preempt_enable();
296 }
297 EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
298
299 void throttle_vm_writeout(void)
300 {
301         long background_thresh;
302         long dirty_thresh;
303
304         for ( ; ; ) {
305                 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
306
307                 /*
308                  * Boost the allowable dirty threshold a bit for page
309                  * allocators so they don't get DoS'ed by heavy writers
310                  */
311                 dirty_thresh += dirty_thresh / 10;      /* wheeee... */
312
313                 if (global_page_state(NR_UNSTABLE_NFS) +
314                         global_page_state(NR_WRITEBACK) <= dirty_thresh)
315                                 break;
316                 congestion_wait(WRITE, HZ/10);
317         }
318 }
319
320
321 /*
322  * writeback at least _min_pages, and keep writing until the amount of dirty
323  * memory is less than the background threshold, or until we're all clean.
324  */
325 static void background_writeout(unsigned long _min_pages)
326 {
327         long min_pages = _min_pages;
328         struct writeback_control wbc = {
329                 .bdi            = NULL,
330                 .sync_mode      = WB_SYNC_NONE,
331                 .older_than_this = NULL,
332                 .nr_to_write    = 0,
333                 .nonblocking    = 1,
334                 .range_cyclic   = 1,
335         };
336
337         for ( ; ; ) {
338                 long background_thresh;
339                 long dirty_thresh;
340
341                 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
342                 if (global_page_state(NR_FILE_DIRTY) +
343                         global_page_state(NR_UNSTABLE_NFS) < background_thresh
344                                 && min_pages <= 0)
345                         break;
346                 wbc.encountered_congestion = 0;
347                 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
348                 wbc.pages_skipped = 0;
349                 writeback_inodes(&wbc);
350                 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
351                 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
352                         /* Wrote less than expected */
353                         congestion_wait(WRITE, HZ/10);
354                         if (!wbc.encountered_congestion)
355                                 break;
356                 }
357         }
358 }
359
360 /*
361  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
362  * the whole world.  Returns 0 if a pdflush thread was dispatched.  Returns
363  * -1 if all pdflush threads were busy.
364  */
365 int wakeup_pdflush(long nr_pages)
366 {
367         if (nr_pages == 0)
368                 nr_pages = global_page_state(NR_FILE_DIRTY) +
369                                 global_page_state(NR_UNSTABLE_NFS);
370         return pdflush_operation(background_writeout, nr_pages);
371 }
372
373 static void wb_timer_fn(unsigned long unused);
374 static void laptop_timer_fn(unsigned long unused);
375
376 static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
377 static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
378
379 /*
380  * Periodic writeback of "old" data.
381  *
382  * Define "old": the first time one of an inode's pages is dirtied, we mark the
383  * dirtying-time in the inode's address_space.  So this periodic writeback code
384  * just walks the superblock inode list, writing back any inodes which are
385  * older than a specific point in time.
386  *
387  * Try to run once per dirty_writeback_interval.  But if a writeback event
388  * takes longer than a dirty_writeback_interval interval, then leave a
389  * one-second gap.
390  *
391  * older_than_this takes precedence over nr_to_write.  So we'll only write back
392  * all dirty pages if they are all attached to "old" mappings.
393  */
394 static void wb_kupdate(unsigned long arg)
395 {
396         unsigned long oldest_jif;
397         unsigned long start_jif;
398         unsigned long next_jif;
399         long nr_to_write;
400         struct writeback_control wbc = {
401                 .bdi            = NULL,
402                 .sync_mode      = WB_SYNC_NONE,
403                 .older_than_this = &oldest_jif,
404                 .nr_to_write    = 0,
405                 .nonblocking    = 1,
406                 .for_kupdate    = 1,
407                 .range_cyclic   = 1,
408         };
409
410         sync_supers();
411
412         oldest_jif = jiffies - dirty_expire_interval;
413         start_jif = jiffies;
414         next_jif = start_jif + dirty_writeback_interval;
415         nr_to_write = global_page_state(NR_FILE_DIRTY) +
416                         global_page_state(NR_UNSTABLE_NFS) +
417                         (inodes_stat.nr_inodes - inodes_stat.nr_unused);
418         while (nr_to_write > 0) {
419                 wbc.encountered_congestion = 0;
420                 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
421                 writeback_inodes(&wbc);
422                 if (wbc.nr_to_write > 0) {
423                         if (wbc.encountered_congestion)
424                                 congestion_wait(WRITE, HZ/10);
425                         else
426                                 break;  /* All the old data is written */
427                 }
428                 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
429         }
430         if (time_before(next_jif, jiffies + HZ))
431                 next_jif = jiffies + HZ;
432         if (dirty_writeback_interval)
433                 mod_timer(&wb_timer, next_jif);
434 }
435
436 /*
437  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
438  */
439 int dirty_writeback_centisecs_handler(ctl_table *table, int write,
440                 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
441 {
442         proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
443         if (dirty_writeback_interval) {
444                 mod_timer(&wb_timer,
445                         jiffies + dirty_writeback_interval);
446                 } else {
447                 del_timer(&wb_timer);
448         }
449         return 0;
450 }
451
452 static void wb_timer_fn(unsigned long unused)
453 {
454         if (pdflush_operation(wb_kupdate, 0) < 0)
455                 mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
456 }
457
458 static void laptop_flush(unsigned long unused)
459 {
460         sys_sync();
461 }
462
463 static void laptop_timer_fn(unsigned long unused)
464 {
465         pdflush_operation(laptop_flush, 0);
466 }
467
468 /*
469  * We've spun up the disk and we're in laptop mode: schedule writeback
470  * of all dirty data a few seconds from now.  If the flush is already scheduled
471  * then push it back - the user is still using the disk.
472  */
473 void laptop_io_completion(void)
474 {
475         mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
476 }
477
478 /*
479  * We're in laptop mode and we've just synced. The sync's writes will have
480  * caused another writeback to be scheduled by laptop_io_completion.
481  * Nothing needs to be written back anymore, so we unschedule the writeback.
482  */
483 void laptop_sync_completion(void)
484 {
485         del_timer(&laptop_mode_wb_timer);
486 }
487
488 /*
489  * If ratelimit_pages is too high then we can get into dirty-data overload
490  * if a large number of processes all perform writes at the same time.
491  * If it is too low then SMP machines will call the (expensive)
492  * get_writeback_state too often.
493  *
494  * Here we set ratelimit_pages to a level which ensures that when all CPUs are
495  * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
496  * thresholds before writeback cuts in.
497  *
498  * But the limit should not be set too high.  Because it also controls the
499  * amount of memory which the balance_dirty_pages() caller has to write back.
500  * If this is too large then the caller will block on the IO queue all the
501  * time.  So limit it to four megabytes - the balance_dirty_pages() caller
502  * will write six megabyte chunks, max.
503  */
504
505 void writeback_set_ratelimit(void)
506 {
507         ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
508         if (ratelimit_pages < 16)
509                 ratelimit_pages = 16;
510         if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
511                 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
512 }
513
514 static int __cpuinit
515 ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
516 {
517         writeback_set_ratelimit();
518         return NOTIFY_DONE;
519 }
520
521 static struct notifier_block __cpuinitdata ratelimit_nb = {
522         .notifier_call  = ratelimit_handler,
523         .next           = NULL,
524 };
525
526 /*
527  * Called early on to tune the page writeback dirty limits.
528  *
529  * We used to scale dirty pages according to how total memory
530  * related to pages that could be allocated for buffers (by
531  * comparing nr_free_buffer_pages() to vm_total_pages.
532  *
533  * However, that was when we used "dirty_ratio" to scale with
534  * all memory, and we don't do that any more. "dirty_ratio"
535  * is now applied to total non-HIGHPAGE memory (by subtracting
536  * totalhigh_pages from vm_total_pages), and as such we can't
537  * get into the old insane situation any more where we had
538  * large amounts of dirty pages compared to a small amount of
539  * non-HIGHMEM memory.
540  *
541  * But we might still want to scale the dirty_ratio by how
542  * much memory the box has..
543  */
544 void __init page_writeback_init(void)
545 {
546         mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
547         writeback_set_ratelimit();
548         register_cpu_notifier(&ratelimit_nb);
549 }
550
551 /**
552  * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
553  * @mapping: address space structure to write
554  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
555  *
556  * This is a library function, which implements the writepages()
557  * address_space_operation.
558  *
559  * If a page is already under I/O, generic_writepages() skips it, even
560  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
561  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
562  * and msync() need to guarantee that all the data which was dirty at the time
563  * the call was made get new I/O started against them.  If wbc->sync_mode is
564  * WB_SYNC_ALL then we were called for data integrity and we must wait for
565  * existing IO to complete.
566  *
567  * Derived from mpage_writepages() - if you fix this you should check that
568  * also!
569  */
570 int generic_writepages(struct address_space *mapping,
571                        struct writeback_control *wbc)
572 {
573         struct backing_dev_info *bdi = mapping->backing_dev_info;
574         int ret = 0;
575         int done = 0;
576         int (*writepage)(struct page *page, struct writeback_control *wbc);
577         struct pagevec pvec;
578         int nr_pages;
579         pgoff_t index;
580         pgoff_t end;            /* Inclusive */
581         int scanned = 0;
582         int range_whole = 0;
583
584         if (wbc->nonblocking && bdi_write_congested(bdi)) {
585                 wbc->encountered_congestion = 1;
586                 return 0;
587         }
588
589         writepage = mapping->a_ops->writepage;
590
591         /* deal with chardevs and other special file */
592         if (!writepage)
593                 return 0;
594
595         pagevec_init(&pvec, 0);
596         if (wbc->range_cyclic) {
597                 index = mapping->writeback_index; /* Start from prev offset */
598                 end = -1;
599         } else {
600                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
601                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
602                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
603                         range_whole = 1;
604                 scanned = 1;
605         }
606 retry:
607         while (!done && (index <= end) &&
608                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
609                                               PAGECACHE_TAG_DIRTY,
610                                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
611                 unsigned i;
612
613                 scanned = 1;
614                 for (i = 0; i < nr_pages; i++) {
615                         struct page *page = pvec.pages[i];
616
617                         /*
618                          * At this point we hold neither mapping->tree_lock nor
619                          * lock on the page itself: the page may be truncated or
620                          * invalidated (changing page->mapping to NULL), or even
621                          * swizzled back from swapper_space to tmpfs file
622                          * mapping
623                          */
624                         lock_page(page);
625
626                         if (unlikely(page->mapping != mapping)) {
627                                 unlock_page(page);
628                                 continue;
629                         }
630
631                         if (!wbc->range_cyclic && page->index > end) {
632                                 done = 1;
633                                 unlock_page(page);
634                                 continue;
635                         }
636
637                         if (wbc->sync_mode != WB_SYNC_NONE)
638                                 wait_on_page_writeback(page);
639
640                         if (PageWriteback(page) ||
641                             !clear_page_dirty_for_io(page)) {
642                                 unlock_page(page);
643                                 continue;
644                         }
645
646                         ret = (*writepage)(page, wbc);
647                         if (ret) {
648                                 if (ret == -ENOSPC)
649                                         set_bit(AS_ENOSPC, &mapping->flags);
650                                 else
651                                         set_bit(AS_EIO, &mapping->flags);
652                         }
653
654                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE))
655                                 unlock_page(page);
656                         if (ret || (--(wbc->nr_to_write) <= 0))
657                                 done = 1;
658                         if (wbc->nonblocking && bdi_write_congested(bdi)) {
659                                 wbc->encountered_congestion = 1;
660                                 done = 1;
661                         }
662                 }
663                 pagevec_release(&pvec);
664                 cond_resched();
665         }
666         if (!scanned && !done) {
667                 /*
668                  * We hit the last page and there is more work to be done: wrap
669                  * back to the start of the file
670                  */
671                 scanned = 1;
672                 index = 0;
673                 goto retry;
674         }
675         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
676                 mapping->writeback_index = index;
677         return ret;
678 }
679
680 EXPORT_SYMBOL(generic_writepages);
681
682 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
683 {
684         int ret;
685
686         if (wbc->nr_to_write <= 0)
687                 return 0;
688         wbc->for_writepages = 1;
689         if (mapping->a_ops->writepages)
690                 ret = mapping->a_ops->writepages(mapping, wbc);
691         else
692                 ret = generic_writepages(mapping, wbc);
693         wbc->for_writepages = 0;
694         return ret;
695 }
696
697 /**
698  * write_one_page - write out a single page and optionally wait on I/O
699  * @page: the page to write
700  * @wait: if true, wait on writeout
701  *
702  * The page must be locked by the caller and will be unlocked upon return.
703  *
704  * write_one_page() returns a negative error code if I/O failed.
705  */
706 int write_one_page(struct page *page, int wait)
707 {
708         struct address_space *mapping = page->mapping;
709         int ret = 0;
710         struct writeback_control wbc = {
711                 .sync_mode = WB_SYNC_ALL,
712                 .nr_to_write = 1,
713         };
714
715         BUG_ON(!PageLocked(page));
716
717         if (wait)
718                 wait_on_page_writeback(page);
719
720         if (clear_page_dirty_for_io(page)) {
721                 page_cache_get(page);
722                 ret = mapping->a_ops->writepage(page, &wbc);
723                 if (ret == 0 && wait) {
724                         wait_on_page_writeback(page);
725                         if (PageError(page))
726                                 ret = -EIO;
727                 }
728                 page_cache_release(page);
729         } else {
730                 unlock_page(page);
731         }
732         return ret;
733 }
734 EXPORT_SYMBOL(write_one_page);
735
736 /*
737  * For address_spaces which do not use buffers nor write back.
738  */
739 int __set_page_dirty_no_writeback(struct page *page)
740 {
741         if (!PageDirty(page))
742                 SetPageDirty(page);
743         return 0;
744 }
745
746 /*
747  * For address_spaces which do not use buffers.  Just tag the page as dirty in
748  * its radix tree.
749  *
750  * This is also used when a single buffer is being dirtied: we want to set the
751  * page dirty in that case, but not all the buffers.  This is a "bottom-up"
752  * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
753  *
754  * Most callers have locked the page, which pins the address_space in memory.
755  * But zap_pte_range() does not lock the page, however in that case the
756  * mapping is pinned by the vma's ->vm_file reference.
757  *
758  * We take care to handle the case where the page was truncated from the
759  * mapping by re-checking page_mapping() insode tree_lock.
760  */
761 int __set_page_dirty_nobuffers(struct page *page)
762 {
763         if (!TestSetPageDirty(page)) {
764                 struct address_space *mapping = page_mapping(page);
765                 struct address_space *mapping2;
766
767                 if (!mapping)
768                         return 1;
769
770                 write_lock_irq(&mapping->tree_lock);
771                 mapping2 = page_mapping(page);
772                 if (mapping2) { /* Race with truncate? */
773                         BUG_ON(mapping2 != mapping);
774                         if (mapping_cap_account_dirty(mapping)) {
775                                 __inc_zone_page_state(page, NR_FILE_DIRTY);
776                                 task_io_account_write(PAGE_CACHE_SIZE);
777                         }
778                         radix_tree_tag_set(&mapping->page_tree,
779                                 page_index(page), PAGECACHE_TAG_DIRTY);
780                 }
781                 write_unlock_irq(&mapping->tree_lock);
782                 if (mapping->host) {
783                         /* !PageAnon && !swapper_space */
784                         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
785                 }
786                 return 1;
787         }
788         return 0;
789 }
790 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
791
792 /*
793  * When a writepage implementation decides that it doesn't want to write this
794  * page for some reason, it should redirty the locked page via
795  * redirty_page_for_writepage() and it should then unlock the page and return 0
796  */
797 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
798 {
799         wbc->pages_skipped++;
800         return __set_page_dirty_nobuffers(page);
801 }
802 EXPORT_SYMBOL(redirty_page_for_writepage);
803
804 /*
805  * If the mapping doesn't provide a set_page_dirty a_op, then
806  * just fall through and assume that it wants buffer_heads.
807  */
808 int fastcall set_page_dirty(struct page *page)
809 {
810         struct address_space *mapping = page_mapping(page);
811
812         if (likely(mapping)) {
813                 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
814 #ifdef CONFIG_BLOCK
815                 if (!spd)
816                         spd = __set_page_dirty_buffers;
817 #endif
818                 return (*spd)(page);
819         }
820         if (!PageDirty(page)) {
821                 if (!TestSetPageDirty(page))
822                         return 1;
823         }
824         return 0;
825 }
826 EXPORT_SYMBOL(set_page_dirty);
827
828 /*
829  * set_page_dirty() is racy if the caller has no reference against
830  * page->mapping->host, and if the page is unlocked.  This is because another
831  * CPU could truncate the page off the mapping and then free the mapping.
832  *
833  * Usually, the page _is_ locked, or the caller is a user-space process which
834  * holds a reference on the inode by having an open file.
835  *
836  * In other cases, the page should be locked before running set_page_dirty().
837  */
838 int set_page_dirty_lock(struct page *page)
839 {
840         int ret;
841
842         lock_page_nosync(page);
843         ret = set_page_dirty(page);
844         unlock_page(page);
845         return ret;
846 }
847 EXPORT_SYMBOL(set_page_dirty_lock);
848
849 /*
850  * Clear a page's dirty flag, while caring for dirty memory accounting.
851  * Returns true if the page was previously dirty.
852  *
853  * This is for preparing to put the page under writeout.  We leave the page
854  * tagged as dirty in the radix tree so that a concurrent write-for-sync
855  * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
856  * implementation will run either set_page_writeback() or set_page_dirty(),
857  * at which stage we bring the page's dirty flag and radix-tree dirty tag
858  * back into sync.
859  *
860  * This incoherency between the page's dirty flag and radix-tree tag is
861  * unfortunate, but it only exists while the page is locked.
862  */
863 int clear_page_dirty_for_io(struct page *page)
864 {
865         struct address_space *mapping = page_mapping(page);
866
867         if (mapping && mapping_cap_account_dirty(mapping)) {
868                 /*
869                  * Yes, Virginia, this is indeed insane.
870                  *
871                  * We use this sequence to make sure that
872                  *  (a) we account for dirty stats properly
873                  *  (b) we tell the low-level filesystem to
874                  *      mark the whole page dirty if it was
875                  *      dirty in a pagetable. Only to then
876                  *  (c) clean the page again and return 1 to
877                  *      cause the writeback.
878                  *
879                  * This way we avoid all nasty races with the
880                  * dirty bit in multiple places and clearing
881                  * them concurrently from different threads.
882                  *
883                  * Note! Normally the "set_page_dirty(page)"
884                  * has no effect on the actual dirty bit - since
885                  * that will already usually be set. But we
886                  * need the side effects, and it can help us
887                  * avoid races.
888                  *
889                  * We basically use the page "master dirty bit"
890                  * as a serialization point for all the different
891                  * threads doing their things.
892                  *
893                  * FIXME! We still have a race here: if somebody
894                  * adds the page back to the page tables in
895                  * between the "page_mkclean()" and the "TestClearPageDirty()",
896                  * we might have it mapped without the dirty bit set.
897                  */
898                 if (page_mkclean(page))
899                         set_page_dirty(page);
900                 if (TestClearPageDirty(page)) {
901                         dec_zone_page_state(page, NR_FILE_DIRTY);
902                         return 1;
903                 }
904                 return 0;
905         }
906         return TestClearPageDirty(page);
907 }
908 EXPORT_SYMBOL(clear_page_dirty_for_io);
909
910 int test_clear_page_writeback(struct page *page)
911 {
912         struct address_space *mapping = page_mapping(page);
913         int ret;
914
915         if (mapping) {
916                 unsigned long flags;
917
918                 write_lock_irqsave(&mapping->tree_lock, flags);
919                 ret = TestClearPageWriteback(page);
920                 if (ret)
921                         radix_tree_tag_clear(&mapping->page_tree,
922                                                 page_index(page),
923                                                 PAGECACHE_TAG_WRITEBACK);
924                 write_unlock_irqrestore(&mapping->tree_lock, flags);
925         } else {
926                 ret = TestClearPageWriteback(page);
927         }
928         return ret;
929 }
930
931 int test_set_page_writeback(struct page *page)
932 {
933         struct address_space *mapping = page_mapping(page);
934         int ret;
935
936         if (mapping) {
937                 unsigned long flags;
938
939                 write_lock_irqsave(&mapping->tree_lock, flags);
940                 ret = TestSetPageWriteback(page);
941                 if (!ret)
942                         radix_tree_tag_set(&mapping->page_tree,
943                                                 page_index(page),
944                                                 PAGECACHE_TAG_WRITEBACK);
945                 if (!PageDirty(page))
946                         radix_tree_tag_clear(&mapping->page_tree,
947                                                 page_index(page),
948                                                 PAGECACHE_TAG_DIRTY);
949                 write_unlock_irqrestore(&mapping->tree_lock, flags);
950         } else {
951                 ret = TestSetPageWriteback(page);
952         }
953         return ret;
954
955 }
956 EXPORT_SYMBOL(test_set_page_writeback);
957
958 /*
959  * Return true if any of the pages in the mapping are marged with the
960  * passed tag.
961  */
962 int mapping_tagged(struct address_space *mapping, int tag)
963 {
964         unsigned long flags;
965         int ret;
966
967         read_lock_irqsave(&mapping->tree_lock, flags);
968         ret = radix_tree_tagged(&mapping->page_tree, tag);
969         read_unlock_irqrestore(&mapping->tree_lock, flags);
970         return ret;
971 }
972 EXPORT_SYMBOL(mapping_tagged);