4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
11 * 10Apr2002 Andrew Morton
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/spinlock.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
23 #include <linux/pagemap.h>
24 #include <linux/kthread.h>
25 #include <linux/writeback.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/tracepoint.h>
29 #include <linux/device.h>
33 * 4MB minimal write chunk size
35 #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
38 * Passed into wb_writeback(), essentially a subset of writeback_control
40 struct wb_writeback_work {
42 struct super_block *sb;
43 unsigned long *older_than_this;
44 enum writeback_sync_modes sync_mode;
45 unsigned int tagged_writepages:1;
46 unsigned int for_kupdate:1;
47 unsigned int range_cyclic:1;
48 unsigned int for_background:1;
49 unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
50 enum wb_reason reason; /* why was writeback initiated? */
52 struct list_head list; /* pending work list */
53 struct completion *done; /* set if the caller waits */
57 * writeback_in_progress - determine whether there is writeback in progress
58 * @bdi: the device's backing_dev_info structure.
60 * Determine whether there is writeback waiting to be handled against a
63 int writeback_in_progress(struct backing_dev_info *bdi)
65 return test_bit(BDI_writeback_running, &bdi->state);
67 EXPORT_SYMBOL(writeback_in_progress);
69 struct backing_dev_info *inode_to_bdi(struct inode *inode)
71 struct super_block *sb = inode->i_sb;
73 if (sb_is_blkdev_sb(sb))
74 return blk_get_backing_dev_info(I_BDEV(inode));
78 EXPORT_SYMBOL_GPL(inode_to_bdi);
80 static inline struct inode *wb_inode(struct list_head *head)
82 return list_entry(head, struct inode, i_wb_list);
86 * Include the creation of the trace points after defining the
87 * wb_writeback_work structure and inline functions so that the definition
88 * remains local to this file.
90 #define CREATE_TRACE_POINTS
91 #include <trace/events/writeback.h>
93 EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
95 static void bdi_wakeup_thread(struct backing_dev_info *bdi)
97 spin_lock_bh(&bdi->wb_lock);
98 if (test_bit(BDI_registered, &bdi->state))
99 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
100 spin_unlock_bh(&bdi->wb_lock);
103 static void bdi_queue_work(struct backing_dev_info *bdi,
104 struct wb_writeback_work *work)
106 trace_writeback_queue(bdi, work);
108 spin_lock_bh(&bdi->wb_lock);
109 if (!test_bit(BDI_registered, &bdi->state)) {
111 complete(work->done);
114 list_add_tail(&work->list, &bdi->work_list);
115 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
117 spin_unlock_bh(&bdi->wb_lock);
121 __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
122 bool range_cyclic, enum wb_reason reason)
124 struct wb_writeback_work *work;
127 * This is WB_SYNC_NONE writeback, so if allocation fails just
128 * wakeup the thread for old dirty data writeback
130 work = kzalloc(sizeof(*work), GFP_ATOMIC);
132 trace_writeback_nowork(bdi);
133 bdi_wakeup_thread(bdi);
137 work->sync_mode = WB_SYNC_NONE;
138 work->nr_pages = nr_pages;
139 work->range_cyclic = range_cyclic;
140 work->reason = reason;
142 bdi_queue_work(bdi, work);
146 * bdi_start_writeback - start writeback
147 * @bdi: the backing device to write from
148 * @nr_pages: the number of pages to write
149 * @reason: reason why some writeback work was initiated
152 * This does WB_SYNC_NONE opportunistic writeback. The IO is only
153 * started when this function returns, we make no guarantees on
154 * completion. Caller need not hold sb s_umount semaphore.
157 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
158 enum wb_reason reason)
160 __bdi_start_writeback(bdi, nr_pages, true, reason);
164 * bdi_start_background_writeback - start background writeback
165 * @bdi: the backing device to write from
168 * This makes sure WB_SYNC_NONE background writeback happens. When
169 * this function returns, it is only guaranteed that for given BDI
170 * some IO is happening if we are over background dirty threshold.
171 * Caller need not hold sb s_umount semaphore.
173 void bdi_start_background_writeback(struct backing_dev_info *bdi)
176 * We just wake up the flusher thread. It will perform background
177 * writeback as soon as there is no other work to do.
179 trace_writeback_wake_background(bdi);
180 bdi_wakeup_thread(bdi);
184 * Remove the inode from the writeback list it is on.
186 void inode_wb_list_del(struct inode *inode)
188 struct backing_dev_info *bdi = inode_to_bdi(inode);
190 spin_lock(&bdi->wb.list_lock);
191 list_del_init(&inode->i_wb_list);
192 spin_unlock(&bdi->wb.list_lock);
196 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
197 * furthest end of its superblock's dirty-inode list.
199 * Before stamping the inode's ->dirtied_when, we check to see whether it is
200 * already the most-recently-dirtied inode on the b_dirty list. If that is
201 * the case then the inode must have been redirtied while it was being written
202 * out and we don't reset its dirtied_when.
204 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
206 assert_spin_locked(&wb->list_lock);
207 if (!list_empty(&wb->b_dirty)) {
210 tail = wb_inode(wb->b_dirty.next);
211 if (time_before(inode->dirtied_when, tail->dirtied_when))
212 inode->dirtied_when = jiffies;
214 list_move(&inode->i_wb_list, &wb->b_dirty);
218 * requeue inode for re-scanning after bdi->b_io list is exhausted.
220 static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
222 assert_spin_locked(&wb->list_lock);
223 list_move(&inode->i_wb_list, &wb->b_more_io);
226 static void inode_sync_complete(struct inode *inode)
228 inode->i_state &= ~I_SYNC;
229 /* If inode is clean an unused, put it into LRU now... */
230 inode_add_lru(inode);
231 /* Waiters must see I_SYNC cleared before being woken up */
233 wake_up_bit(&inode->i_state, __I_SYNC);
236 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
238 bool ret = time_after(inode->dirtied_when, t);
241 * For inodes being constantly redirtied, dirtied_when can get stuck.
242 * It _appears_ to be in the future, but is actually in distant past.
243 * This test is necessary to prevent such wrapped-around relative times
244 * from permanently stopping the whole bdi writeback.
246 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
252 * Move expired (dirtied before work->older_than_this) dirty inodes from
253 * @delaying_queue to @dispatch_queue.
255 static int move_expired_inodes(struct list_head *delaying_queue,
256 struct list_head *dispatch_queue,
257 struct wb_writeback_work *work)
260 struct list_head *pos, *node;
261 struct super_block *sb = NULL;
266 while (!list_empty(delaying_queue)) {
267 inode = wb_inode(delaying_queue->prev);
268 if (work->older_than_this &&
269 inode_dirtied_after(inode, *work->older_than_this))
271 list_move(&inode->i_wb_list, &tmp);
273 if (sb_is_blkdev_sb(inode->i_sb))
275 if (sb && sb != inode->i_sb)
280 /* just one sb in list, splice to dispatch_queue and we're done */
282 list_splice(&tmp, dispatch_queue);
286 /* Move inodes from one superblock together */
287 while (!list_empty(&tmp)) {
288 sb = wb_inode(tmp.prev)->i_sb;
289 list_for_each_prev_safe(pos, node, &tmp) {
290 inode = wb_inode(pos);
291 if (inode->i_sb == sb)
292 list_move(&inode->i_wb_list, dispatch_queue);
300 * Queue all expired dirty inodes for io, eldest first.
302 * newly dirtied b_dirty b_io b_more_io
303 * =============> gf edc BA
305 * newly dirtied b_dirty b_io b_more_io
306 * =============> g fBAedc
308 * +--> dequeue for IO
310 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
313 assert_spin_locked(&wb->list_lock);
314 list_splice_init(&wb->b_more_io, &wb->b_io);
315 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
316 trace_writeback_queue_io(wb, work, moved);
319 static int write_inode(struct inode *inode, struct writeback_control *wbc)
323 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
324 trace_writeback_write_inode_start(inode, wbc);
325 ret = inode->i_sb->s_op->write_inode(inode, wbc);
326 trace_writeback_write_inode(inode, wbc);
333 * Wait for writeback on an inode to complete. Called with i_lock held.
334 * Caller must make sure inode cannot go away when we drop i_lock.
336 static void __inode_wait_for_writeback(struct inode *inode)
337 __releases(inode->i_lock)
338 __acquires(inode->i_lock)
340 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
341 wait_queue_head_t *wqh;
343 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
344 while (inode->i_state & I_SYNC) {
345 spin_unlock(&inode->i_lock);
346 __wait_on_bit(wqh, &wq, bit_wait,
347 TASK_UNINTERRUPTIBLE);
348 spin_lock(&inode->i_lock);
353 * Wait for writeback on an inode to complete. Caller must have inode pinned.
355 void inode_wait_for_writeback(struct inode *inode)
357 spin_lock(&inode->i_lock);
358 __inode_wait_for_writeback(inode);
359 spin_unlock(&inode->i_lock);
363 * Sleep until I_SYNC is cleared. This function must be called with i_lock
364 * held and drops it. It is aimed for callers not holding any inode reference
365 * so once i_lock is dropped, inode can go away.
367 static void inode_sleep_on_writeback(struct inode *inode)
368 __releases(inode->i_lock)
371 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
374 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
375 sleep = inode->i_state & I_SYNC;
376 spin_unlock(&inode->i_lock);
379 finish_wait(wqh, &wait);
383 * Find proper writeback list for the inode depending on its current state and
384 * possibly also change of its state while we were doing writeback. Here we
385 * handle things such as livelock prevention or fairness of writeback among
386 * inodes. This function can be called only by flusher thread - noone else
387 * processes all inodes in writeback lists and requeueing inodes behind flusher
388 * thread's back can have unexpected consequences.
390 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
391 struct writeback_control *wbc)
393 if (inode->i_state & I_FREEING)
397 * Sync livelock prevention. Each inode is tagged and synced in one
398 * shot. If still dirty, it will be redirty_tail()'ed below. Update
399 * the dirty time to prevent enqueue and sync it again.
401 if ((inode->i_state & I_DIRTY) &&
402 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
403 inode->dirtied_when = jiffies;
405 if (wbc->pages_skipped) {
407 * writeback is not making progress due to locked
408 * buffers. Skip this inode for now.
410 redirty_tail(inode, wb);
414 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
416 * We didn't write back all the pages. nfs_writepages()
417 * sometimes bales out without doing anything.
419 if (wbc->nr_to_write <= 0) {
420 /* Slice used up. Queue for next turn. */
421 requeue_io(inode, wb);
424 * Writeback blocked by something other than
425 * congestion. Delay the inode for some time to
426 * avoid spinning on the CPU (100% iowait)
427 * retrying writeback of the dirty page/inode
428 * that cannot be performed immediately.
430 redirty_tail(inode, wb);
432 } else if (inode->i_state & I_DIRTY) {
434 * Filesystems can dirty the inode during writeback operations,
435 * such as delayed allocation during submission or metadata
436 * updates after data IO completion.
438 redirty_tail(inode, wb);
440 /* The inode is clean. Remove from writeback lists. */
441 list_del_init(&inode->i_wb_list);
446 * Write out an inode and its dirty pages. Do not update the writeback list
447 * linkage. That is left to the caller. The caller is also responsible for
448 * setting I_SYNC flag and calling inode_sync_complete() to clear it.
451 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
453 struct address_space *mapping = inode->i_mapping;
454 long nr_to_write = wbc->nr_to_write;
458 WARN_ON(!(inode->i_state & I_SYNC));
460 trace_writeback_single_inode_start(inode, wbc, nr_to_write);
462 ret = do_writepages(mapping, wbc);
465 * Make sure to wait on the data before writing out the metadata.
466 * This is important for filesystems that modify metadata on data
467 * I/O completion. We don't do it for sync(2) writeback because it has a
468 * separate, external IO completion path and ->sync_fs for guaranteeing
469 * inode metadata is written back correctly.
471 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
472 int err = filemap_fdatawait(mapping);
478 * Some filesystems may redirty the inode during the writeback
479 * due to delalloc, clear dirty metadata flags right before
482 spin_lock(&inode->i_lock);
484 dirty = inode->i_state & I_DIRTY;
485 inode->i_state &= ~I_DIRTY;
488 * Paired with smp_mb() in __mark_inode_dirty(). This allows
489 * __mark_inode_dirty() to test i_state without grabbing i_lock -
490 * either they see the I_DIRTY bits cleared or we see the dirtied
493 * I_DIRTY_PAGES is always cleared together above even if @mapping
494 * still has dirty pages. The flag is reinstated after smp_mb() if
495 * necessary. This guarantees that either __mark_inode_dirty()
496 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
500 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
501 inode->i_state |= I_DIRTY_PAGES;
503 spin_unlock(&inode->i_lock);
505 /* Don't write the inode if only I_DIRTY_PAGES was set */
506 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
507 int err = write_inode(inode, wbc);
511 trace_writeback_single_inode(inode, wbc, nr_to_write);
516 * Write out an inode's dirty pages. Either the caller has an active reference
517 * on the inode or the inode has I_WILL_FREE set.
519 * This function is designed to be called for writing back one inode which
520 * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
521 * and does more profound writeback list handling in writeback_sb_inodes().
524 writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
525 struct writeback_control *wbc)
529 spin_lock(&inode->i_lock);
530 if (!atomic_read(&inode->i_count))
531 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
533 WARN_ON(inode->i_state & I_WILL_FREE);
535 if (inode->i_state & I_SYNC) {
536 if (wbc->sync_mode != WB_SYNC_ALL)
539 * It's a data-integrity sync. We must wait. Since callers hold
540 * inode reference or inode has I_WILL_FREE set, it cannot go
543 __inode_wait_for_writeback(inode);
545 WARN_ON(inode->i_state & I_SYNC);
547 * Skip inode if it is clean and we have no outstanding writeback in
548 * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
549 * function since flusher thread may be doing for example sync in
550 * parallel and if we move the inode, it could get skipped. So here we
551 * make sure inode is on some writeback list and leave it there unless
552 * we have completely cleaned the inode.
554 if (!(inode->i_state & I_DIRTY) &&
555 (wbc->sync_mode != WB_SYNC_ALL ||
556 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
558 inode->i_state |= I_SYNC;
559 spin_unlock(&inode->i_lock);
561 ret = __writeback_single_inode(inode, wbc);
563 spin_lock(&wb->list_lock);
564 spin_lock(&inode->i_lock);
566 * If inode is clean, remove it from writeback lists. Otherwise don't
567 * touch it. See comment above for explanation.
569 if (!(inode->i_state & I_DIRTY))
570 list_del_init(&inode->i_wb_list);
571 spin_unlock(&wb->list_lock);
572 inode_sync_complete(inode);
574 spin_unlock(&inode->i_lock);
578 static long writeback_chunk_size(struct backing_dev_info *bdi,
579 struct wb_writeback_work *work)
584 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
585 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
586 * here avoids calling into writeback_inodes_wb() more than once.
588 * The intended call sequence for WB_SYNC_ALL writeback is:
591 * writeback_sb_inodes() <== called only once
592 * write_cache_pages() <== called once for each inode
593 * (quickly) tag currently dirty pages
594 * (maybe slowly) sync all tagged pages
596 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
599 pages = min(bdi->avg_write_bandwidth / 2,
600 global_dirty_limit / DIRTY_SCOPE);
601 pages = min(pages, work->nr_pages);
602 pages = round_down(pages + MIN_WRITEBACK_PAGES,
603 MIN_WRITEBACK_PAGES);
610 * Write a portion of b_io inodes which belong to @sb.
612 * Return the number of pages and/or inodes written.
614 static long writeback_sb_inodes(struct super_block *sb,
615 struct bdi_writeback *wb,
616 struct wb_writeback_work *work)
618 struct writeback_control wbc = {
619 .sync_mode = work->sync_mode,
620 .tagged_writepages = work->tagged_writepages,
621 .for_kupdate = work->for_kupdate,
622 .for_background = work->for_background,
623 .for_sync = work->for_sync,
624 .range_cyclic = work->range_cyclic,
626 .range_end = LLONG_MAX,
628 unsigned long start_time = jiffies;
630 long wrote = 0; /* count both pages and inodes */
632 while (!list_empty(&wb->b_io)) {
633 struct inode *inode = wb_inode(wb->b_io.prev);
635 if (inode->i_sb != sb) {
638 * We only want to write back data for this
639 * superblock, move all inodes not belonging
640 * to it back onto the dirty list.
642 redirty_tail(inode, wb);
647 * The inode belongs to a different superblock.
648 * Bounce back to the caller to unpin this and
649 * pin the next superblock.
655 * Don't bother with new inodes or inodes being freed, first
656 * kind does not need periodic writeout yet, and for the latter
657 * kind writeout is handled by the freer.
659 spin_lock(&inode->i_lock);
660 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
661 spin_unlock(&inode->i_lock);
662 redirty_tail(inode, wb);
665 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
667 * If this inode is locked for writeback and we are not
668 * doing writeback-for-data-integrity, move it to
669 * b_more_io so that writeback can proceed with the
670 * other inodes on s_io.
672 * We'll have another go at writing back this inode
673 * when we completed a full scan of b_io.
675 spin_unlock(&inode->i_lock);
676 requeue_io(inode, wb);
677 trace_writeback_sb_inodes_requeue(inode);
680 spin_unlock(&wb->list_lock);
683 * We already requeued the inode if it had I_SYNC set and we
684 * are doing WB_SYNC_NONE writeback. So this catches only the
687 if (inode->i_state & I_SYNC) {
688 /* Wait for I_SYNC. This function drops i_lock... */
689 inode_sleep_on_writeback(inode);
690 /* Inode may be gone, start again */
691 spin_lock(&wb->list_lock);
694 inode->i_state |= I_SYNC;
695 spin_unlock(&inode->i_lock);
697 write_chunk = writeback_chunk_size(wb->bdi, work);
698 wbc.nr_to_write = write_chunk;
699 wbc.pages_skipped = 0;
702 * We use I_SYNC to pin the inode in memory. While it is set
703 * evict_inode() will wait so the inode cannot be freed.
705 __writeback_single_inode(inode, &wbc);
707 work->nr_pages -= write_chunk - wbc.nr_to_write;
708 wrote += write_chunk - wbc.nr_to_write;
709 spin_lock(&wb->list_lock);
710 spin_lock(&inode->i_lock);
711 if (!(inode->i_state & I_DIRTY))
713 requeue_inode(inode, wb, &wbc);
714 inode_sync_complete(inode);
715 spin_unlock(&inode->i_lock);
716 cond_resched_lock(&wb->list_lock);
718 * bail out to wb_writeback() often enough to check
719 * background threshold and other termination conditions.
722 if (time_is_before_jiffies(start_time + HZ / 10UL))
724 if (work->nr_pages <= 0)
731 static long __writeback_inodes_wb(struct bdi_writeback *wb,
732 struct wb_writeback_work *work)
734 unsigned long start_time = jiffies;
737 while (!list_empty(&wb->b_io)) {
738 struct inode *inode = wb_inode(wb->b_io.prev);
739 struct super_block *sb = inode->i_sb;
741 if (!grab_super_passive(sb)) {
743 * grab_super_passive() may fail consistently due to
744 * s_umount being grabbed by someone else. Don't use
745 * requeue_io() to avoid busy retrying the inode/sb.
747 redirty_tail(inode, wb);
750 wrote += writeback_sb_inodes(sb, wb, work);
753 /* refer to the same tests at the end of writeback_sb_inodes */
755 if (time_is_before_jiffies(start_time + HZ / 10UL))
757 if (work->nr_pages <= 0)
761 /* Leave any unwritten inodes on b_io */
765 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
766 enum wb_reason reason)
768 struct wb_writeback_work work = {
769 .nr_pages = nr_pages,
770 .sync_mode = WB_SYNC_NONE,
775 spin_lock(&wb->list_lock);
776 if (list_empty(&wb->b_io))
778 __writeback_inodes_wb(wb, &work);
779 spin_unlock(&wb->list_lock);
781 return nr_pages - work.nr_pages;
784 static bool over_bground_thresh(struct backing_dev_info *bdi)
786 unsigned long background_thresh, dirty_thresh;
788 global_dirty_limits(&background_thresh, &dirty_thresh);
790 if (global_page_state(NR_FILE_DIRTY) +
791 global_page_state(NR_UNSTABLE_NFS) > background_thresh)
794 if (bdi_stat(bdi, BDI_RECLAIMABLE) >
795 bdi_dirty_limit(bdi, background_thresh))
802 * Called under wb->list_lock. If there are multiple wb per bdi,
803 * only the flusher working on the first wb should do it.
805 static void wb_update_bandwidth(struct bdi_writeback *wb,
806 unsigned long start_time)
808 __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
812 * Explicit flushing or periodic writeback of "old" data.
814 * Define "old": the first time one of an inode's pages is dirtied, we mark the
815 * dirtying-time in the inode's address_space. So this periodic writeback code
816 * just walks the superblock inode list, writing back any inodes which are
817 * older than a specific point in time.
819 * Try to run once per dirty_writeback_interval. But if a writeback event
820 * takes longer than a dirty_writeback_interval interval, then leave a
823 * older_than_this takes precedence over nr_to_write. So we'll only write back
824 * all dirty pages if they are all attached to "old" mappings.
826 static long wb_writeback(struct bdi_writeback *wb,
827 struct wb_writeback_work *work)
829 unsigned long wb_start = jiffies;
830 long nr_pages = work->nr_pages;
831 unsigned long oldest_jif;
835 oldest_jif = jiffies;
836 work->older_than_this = &oldest_jif;
838 spin_lock(&wb->list_lock);
841 * Stop writeback when nr_pages has been consumed
843 if (work->nr_pages <= 0)
847 * Background writeout and kupdate-style writeback may
848 * run forever. Stop them if there is other work to do
849 * so that e.g. sync can proceed. They'll be restarted
850 * after the other works are all done.
852 if ((work->for_background || work->for_kupdate) &&
853 !list_empty(&wb->bdi->work_list))
857 * For background writeout, stop when we are below the
858 * background dirty threshold
860 if (work->for_background && !over_bground_thresh(wb->bdi))
864 * Kupdate and background works are special and we want to
865 * include all inodes that need writing. Livelock avoidance is
866 * handled by these works yielding to any other work so we are
869 if (work->for_kupdate) {
870 oldest_jif = jiffies -
871 msecs_to_jiffies(dirty_expire_interval * 10);
872 } else if (work->for_background)
873 oldest_jif = jiffies;
875 trace_writeback_start(wb->bdi, work);
876 if (list_empty(&wb->b_io))
879 progress = writeback_sb_inodes(work->sb, wb, work);
881 progress = __writeback_inodes_wb(wb, work);
882 trace_writeback_written(wb->bdi, work);
884 wb_update_bandwidth(wb, wb_start);
887 * Did we write something? Try for more
889 * Dirty inodes are moved to b_io for writeback in batches.
890 * The completion of the current batch does not necessarily
891 * mean the overall work is done. So we keep looping as long
892 * as made some progress on cleaning pages or inodes.
897 * No more inodes for IO, bail
899 if (list_empty(&wb->b_more_io))
902 * Nothing written. Wait for some inode to
903 * become available for writeback. Otherwise
904 * we'll just busyloop.
906 if (!list_empty(&wb->b_more_io)) {
907 trace_writeback_wait(wb->bdi, work);
908 inode = wb_inode(wb->b_more_io.prev);
909 spin_lock(&inode->i_lock);
910 spin_unlock(&wb->list_lock);
911 /* This function drops i_lock... */
912 inode_sleep_on_writeback(inode);
913 spin_lock(&wb->list_lock);
916 spin_unlock(&wb->list_lock);
918 return nr_pages - work->nr_pages;
922 * Return the next wb_writeback_work struct that hasn't been processed yet.
924 static struct wb_writeback_work *
925 get_next_work_item(struct backing_dev_info *bdi)
927 struct wb_writeback_work *work = NULL;
929 spin_lock_bh(&bdi->wb_lock);
930 if (!list_empty(&bdi->work_list)) {
931 work = list_entry(bdi->work_list.next,
932 struct wb_writeback_work, list);
933 list_del_init(&work->list);
935 spin_unlock_bh(&bdi->wb_lock);
940 * Add in the number of potentially dirty inodes, because each inode
941 * write can dirty pagecache in the underlying blockdev.
943 static unsigned long get_nr_dirty_pages(void)
945 return global_page_state(NR_FILE_DIRTY) +
946 global_page_state(NR_UNSTABLE_NFS) +
947 get_nr_dirty_inodes();
950 static long wb_check_background_flush(struct bdi_writeback *wb)
952 if (over_bground_thresh(wb->bdi)) {
954 struct wb_writeback_work work = {
955 .nr_pages = LONG_MAX,
956 .sync_mode = WB_SYNC_NONE,
959 .reason = WB_REASON_BACKGROUND,
962 return wb_writeback(wb, &work);
968 static long wb_check_old_data_flush(struct bdi_writeback *wb)
970 unsigned long expired;
974 * When set to zero, disable periodic writeback
976 if (!dirty_writeback_interval)
979 expired = wb->last_old_flush +
980 msecs_to_jiffies(dirty_writeback_interval * 10);
981 if (time_before(jiffies, expired))
984 wb->last_old_flush = jiffies;
985 nr_pages = get_nr_dirty_pages();
988 struct wb_writeback_work work = {
989 .nr_pages = nr_pages,
990 .sync_mode = WB_SYNC_NONE,
993 .reason = WB_REASON_PERIODIC,
996 return wb_writeback(wb, &work);
1003 * Retrieve work items and do the writeback they describe
1005 static long wb_do_writeback(struct bdi_writeback *wb)
1007 struct backing_dev_info *bdi = wb->bdi;
1008 struct wb_writeback_work *work;
1011 set_bit(BDI_writeback_running, &wb->bdi->state);
1012 while ((work = get_next_work_item(bdi)) != NULL) {
1014 trace_writeback_exec(bdi, work);
1016 wrote += wb_writeback(wb, work);
1019 * Notify the caller of completion if this is a synchronous
1020 * work item, otherwise just free it.
1023 complete(work->done);
1029 * Check for periodic writeback, kupdated() style
1031 wrote += wb_check_old_data_flush(wb);
1032 wrote += wb_check_background_flush(wb);
1033 clear_bit(BDI_writeback_running, &wb->bdi->state);
1039 * Handle writeback of dirty data for the device backed by this bdi. Also
1040 * reschedules periodically and does kupdated style flushing.
1042 void bdi_writeback_workfn(struct work_struct *work)
1044 struct bdi_writeback *wb = container_of(to_delayed_work(work),
1045 struct bdi_writeback, dwork);
1046 struct backing_dev_info *bdi = wb->bdi;
1049 set_worker_desc("flush-%s", dev_name(bdi->dev));
1050 current->flags |= PF_SWAPWRITE;
1052 if (likely(!current_is_workqueue_rescuer() ||
1053 !test_bit(BDI_registered, &bdi->state))) {
1055 * The normal path. Keep writing back @bdi until its
1056 * work_list is empty. Note that this path is also taken
1057 * if @bdi is shutting down even when we're running off the
1058 * rescuer as work_list needs to be drained.
1061 pages_written = wb_do_writeback(wb);
1062 trace_writeback_pages_written(pages_written);
1063 } while (!list_empty(&bdi->work_list));
1066 * bdi_wq can't get enough workers and we're running off
1067 * the emergency worker. Don't hog it. Hopefully, 1024 is
1068 * enough for efficient IO.
1070 pages_written = writeback_inodes_wb(&bdi->wb, 1024,
1071 WB_REASON_FORKER_THREAD);
1072 trace_writeback_pages_written(pages_written);
1075 if (!list_empty(&bdi->work_list))
1076 mod_delayed_work(bdi_wq, &wb->dwork, 0);
1077 else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
1078 bdi_wakeup_thread_delayed(bdi);
1080 current->flags &= ~PF_SWAPWRITE;
1084 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
1087 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
1089 struct backing_dev_info *bdi;
1092 nr_pages = get_nr_dirty_pages();
1095 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1096 if (!bdi_has_dirty_io(bdi))
1098 __bdi_start_writeback(bdi, nr_pages, false, reason);
1103 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
1105 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
1106 struct dentry *dentry;
1107 const char *name = "?";
1109 dentry = d_find_alias(inode);
1111 spin_lock(&dentry->d_lock);
1112 name = (const char *) dentry->d_name.name;
1115 "%s(%d): dirtied inode %lu (%s) on %s\n",
1116 current->comm, task_pid_nr(current), inode->i_ino,
1117 name, inode->i_sb->s_id);
1119 spin_unlock(&dentry->d_lock);
1126 * __mark_inode_dirty - internal function
1127 * @inode: inode to mark
1128 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
1129 * Mark an inode as dirty. Callers should use mark_inode_dirty or
1130 * mark_inode_dirty_sync.
1132 * Put the inode on the super block's dirty list.
1134 * CAREFUL! We mark it dirty unconditionally, but move it onto the
1135 * dirty list only if it is hashed or if it refers to a blockdev.
1136 * If it was not hashed, it will never be added to the dirty list
1137 * even if it is later hashed, as it will have been marked dirty already.
1139 * In short, make sure you hash any inodes _before_ you start marking
1142 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
1143 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
1144 * the kernel-internal blockdev inode represents the dirtying time of the
1145 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
1146 * page->mapping->host, so the page-dirtying time is recorded in the internal
1149 void __mark_inode_dirty(struct inode *inode, int flags)
1151 struct super_block *sb = inode->i_sb;
1152 struct backing_dev_info *bdi = NULL;
1155 * Don't do this for I_DIRTY_PAGES - that doesn't actually
1156 * dirty the inode itself
1158 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
1159 trace_writeback_dirty_inode_start(inode, flags);
1161 if (sb->s_op->dirty_inode)
1162 sb->s_op->dirty_inode(inode, flags);
1164 trace_writeback_dirty_inode(inode, flags);
1168 * Paired with smp_mb() in __writeback_single_inode() for the
1169 * following lockless i_state test. See there for details.
1173 if ((inode->i_state & flags) == flags)
1176 if (unlikely(block_dump))
1177 block_dump___mark_inode_dirty(inode);
1179 spin_lock(&inode->i_lock);
1180 if ((inode->i_state & flags) != flags) {
1181 const int was_dirty = inode->i_state & I_DIRTY;
1183 inode->i_state |= flags;
1186 * If the inode is being synced, just update its dirty state.
1187 * The unlocker will place the inode on the appropriate
1188 * superblock list, based upon its state.
1190 if (inode->i_state & I_SYNC)
1191 goto out_unlock_inode;
1194 * Only add valid (hashed) inodes to the superblock's
1195 * dirty list. Add blockdev inodes as well.
1197 if (!S_ISBLK(inode->i_mode)) {
1198 if (inode_unhashed(inode))
1199 goto out_unlock_inode;
1201 if (inode->i_state & I_FREEING)
1202 goto out_unlock_inode;
1205 * If the inode was already on b_dirty/b_io/b_more_io, don't
1206 * reposition it (that would break b_dirty time-ordering).
1209 bool wakeup_bdi = false;
1210 bdi = inode_to_bdi(inode);
1212 spin_unlock(&inode->i_lock);
1213 spin_lock(&bdi->wb.list_lock);
1214 if (bdi_cap_writeback_dirty(bdi)) {
1215 WARN(!test_bit(BDI_registered, &bdi->state),
1216 "bdi-%s not registered\n", bdi->name);
1219 * If this is the first dirty inode for this
1220 * bdi, we have to wake-up the corresponding
1221 * bdi thread to make sure background
1222 * write-back happens later.
1224 if (!wb_has_dirty_io(&bdi->wb))
1228 inode->dirtied_when = jiffies;
1229 list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1230 spin_unlock(&bdi->wb.list_lock);
1233 bdi_wakeup_thread_delayed(bdi);
1238 spin_unlock(&inode->i_lock);
1241 EXPORT_SYMBOL(__mark_inode_dirty);
1243 static void wait_sb_inodes(struct super_block *sb)
1245 struct inode *inode, *old_inode = NULL;
1248 * We need to be protected against the filesystem going from
1249 * r/o to r/w or vice versa.
1251 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1253 spin_lock(&inode_sb_list_lock);
1256 * Data integrity sync. Must wait for all pages under writeback,
1257 * because there may have been pages dirtied before our sync
1258 * call, but which had writeout started before we write it out.
1259 * In which case, the inode may not be on the dirty list, but
1260 * we still have to wait for that writeout.
1262 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1263 struct address_space *mapping = inode->i_mapping;
1265 spin_lock(&inode->i_lock);
1266 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1267 (mapping->nrpages == 0)) {
1268 spin_unlock(&inode->i_lock);
1272 spin_unlock(&inode->i_lock);
1273 spin_unlock(&inode_sb_list_lock);
1276 * We hold a reference to 'inode' so it couldn't have been
1277 * removed from s_inodes list while we dropped the
1278 * inode_sb_list_lock. We cannot iput the inode now as we can
1279 * be holding the last reference and we cannot iput it under
1280 * inode_sb_list_lock. So we keep the reference and iput it
1286 filemap_fdatawait(mapping);
1290 spin_lock(&inode_sb_list_lock);
1292 spin_unlock(&inode_sb_list_lock);
1297 * writeback_inodes_sb_nr - writeback dirty inodes from given super_block
1298 * @sb: the superblock
1299 * @nr: the number of pages to write
1300 * @reason: reason why some writeback work initiated
1302 * Start writeback on some inodes on this super_block. No guarantees are made
1303 * on how many (if any) will be written, and this function does not wait
1304 * for IO completion of submitted IO.
1306 void writeback_inodes_sb_nr(struct super_block *sb,
1308 enum wb_reason reason)
1310 DECLARE_COMPLETION_ONSTACK(done);
1311 struct wb_writeback_work work = {
1313 .sync_mode = WB_SYNC_NONE,
1314 .tagged_writepages = 1,
1320 if (sb->s_bdi == &noop_backing_dev_info)
1322 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1323 bdi_queue_work(sb->s_bdi, &work);
1324 wait_for_completion(&done);
1326 EXPORT_SYMBOL(writeback_inodes_sb_nr);
1329 * writeback_inodes_sb - writeback dirty inodes from given super_block
1330 * @sb: the superblock
1331 * @reason: reason why some writeback work was initiated
1333 * Start writeback on some inodes on this super_block. No guarantees are made
1334 * on how many (if any) will be written, and this function does not wait
1335 * for IO completion of submitted IO.
1337 void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1339 return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1341 EXPORT_SYMBOL(writeback_inodes_sb);
1344 * try_to_writeback_inodes_sb_nr - try to start writeback if none underway
1345 * @sb: the superblock
1346 * @nr: the number of pages to write
1347 * @reason: the reason of writeback
1349 * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
1350 * Returns 1 if writeback was started, 0 if not.
1352 int try_to_writeback_inodes_sb_nr(struct super_block *sb,
1354 enum wb_reason reason)
1356 if (writeback_in_progress(sb->s_bdi))
1359 if (!down_read_trylock(&sb->s_umount))
1362 writeback_inodes_sb_nr(sb, nr, reason);
1363 up_read(&sb->s_umount);
1366 EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
1369 * try_to_writeback_inodes_sb - try to start writeback if none underway
1370 * @sb: the superblock
1371 * @reason: reason why some writeback work was initiated
1373 * Implement by try_to_writeback_inodes_sb_nr()
1374 * Returns 1 if writeback was started, 0 if not.
1376 int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1378 return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1380 EXPORT_SYMBOL(try_to_writeback_inodes_sb);
1383 * sync_inodes_sb - sync sb inode pages
1384 * @sb: the superblock
1386 * This function writes and waits on any dirty inode belonging to this
1389 void sync_inodes_sb(struct super_block *sb)
1391 DECLARE_COMPLETION_ONSTACK(done);
1392 struct wb_writeback_work work = {
1394 .sync_mode = WB_SYNC_ALL,
1395 .nr_pages = LONG_MAX,
1398 .reason = WB_REASON_SYNC,
1402 /* Nothing to do? */
1403 if (sb->s_bdi == &noop_backing_dev_info)
1405 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1407 bdi_queue_work(sb->s_bdi, &work);
1408 wait_for_completion(&done);
1412 EXPORT_SYMBOL(sync_inodes_sb);
1415 * write_inode_now - write an inode to disk
1416 * @inode: inode to write to disk
1417 * @sync: whether the write should be synchronous or not
1419 * This function commits an inode to disk immediately if it is dirty. This is
1420 * primarily needed by knfsd.
1422 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1424 int write_inode_now(struct inode *inode, int sync)
1426 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1427 struct writeback_control wbc = {
1428 .nr_to_write = LONG_MAX,
1429 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1431 .range_end = LLONG_MAX,
1434 if (!mapping_cap_writeback_dirty(inode->i_mapping))
1435 wbc.nr_to_write = 0;
1438 return writeback_single_inode(inode, wb, &wbc);
1440 EXPORT_SYMBOL(write_inode_now);
1443 * sync_inode - write an inode and its pages to disk.
1444 * @inode: the inode to sync
1445 * @wbc: controls the writeback mode
1447 * sync_inode() will write an inode and its pages to disk. It will also
1448 * correctly update the inode on its superblock's dirty inode lists and will
1449 * update inode->i_state.
1451 * The caller must have a ref on the inode.
1453 int sync_inode(struct inode *inode, struct writeback_control *wbc)
1455 return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
1457 EXPORT_SYMBOL(sync_inode);
1460 * sync_inode_metadata - write an inode to disk
1461 * @inode: the inode to sync
1462 * @wait: wait for I/O to complete.
1464 * Write an inode to disk and adjust its dirty state after completion.
1466 * Note: only writes the actual inode, no associated data or other metadata.
1468 int sync_inode_metadata(struct inode *inode, int wait)
1470 struct writeback_control wbc = {
1471 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
1472 .nr_to_write = 0, /* metadata-only */
1475 return sync_inode(inode, &wbc);
1477 EXPORT_SYMBOL(sync_inode_metadata);