Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / md / bcache / writeback.c
1 /*
2  * background writeback - scan btree for dirty data and write it to the backing
3  * device
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8
9 #include "bcache.h"
10 #include "btree.h"
11 #include "debug.h"
12 #include "writeback.h"
13
14 #include <linux/delay.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <trace/events/bcache.h>
18
19 /* Rate limiting */
20
21 static void __update_writeback_rate(struct cached_dev *dc)
22 {
23         struct cache_set *c = dc->disk.c;
24         uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
25         uint64_t cache_dirty_target =
26                 div_u64(cache_sectors * dc->writeback_percent, 100);
27
28         int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
29                                    c->cached_dev_sectors);
30
31         /* PD controller */
32
33         int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
34         int64_t derivative = dirty - dc->disk.sectors_dirty_last;
35         int64_t proportional = dirty - target;
36         int64_t change;
37
38         dc->disk.sectors_dirty_last = dirty;
39
40         /* Scale to sectors per second */
41
42         proportional *= dc->writeback_rate_update_seconds;
43         proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
44
45         derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
46
47         derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
48                               (dc->writeback_rate_d_term /
49                                dc->writeback_rate_update_seconds) ?: 1, 0);
50
51         derivative *= dc->writeback_rate_d_term;
52         derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
53
54         change = proportional + derivative;
55
56         /* Don't increase writeback rate if the device isn't keeping up */
57         if (change > 0 &&
58             time_after64(local_clock(),
59                          dc->writeback_rate.next + NSEC_PER_MSEC))
60                 change = 0;
61
62         dc->writeback_rate.rate =
63                 clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
64                         1, NSEC_PER_MSEC);
65
66         dc->writeback_rate_proportional = proportional;
67         dc->writeback_rate_derivative = derivative;
68         dc->writeback_rate_change = change;
69         dc->writeback_rate_target = target;
70 }
71
72 static void update_writeback_rate(struct work_struct *work)
73 {
74         struct cached_dev *dc = container_of(to_delayed_work(work),
75                                              struct cached_dev,
76                                              writeback_rate_update);
77
78         down_read(&dc->writeback_lock);
79
80         if (atomic_read(&dc->has_dirty) &&
81             dc->writeback_percent)
82                 __update_writeback_rate(dc);
83
84         up_read(&dc->writeback_lock);
85
86         schedule_delayed_work(&dc->writeback_rate_update,
87                               dc->writeback_rate_update_seconds * HZ);
88 }
89
90 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
91 {
92         if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
93             !dc->writeback_percent)
94                 return 0;
95
96         return bch_next_delay(&dc->writeback_rate, sectors);
97 }
98
99 struct dirty_io {
100         struct closure          cl;
101         struct cached_dev       *dc;
102         struct bio              bio;
103 };
104
105 static void dirty_init(struct keybuf_key *w)
106 {
107         struct dirty_io *io = w->private;
108         struct bio *bio = &io->bio;
109
110         bio_init(bio);
111         if (!io->dc->writeback_percent)
112                 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
113
114         bio->bi_size            = KEY_SIZE(&w->key) << 9;
115         bio->bi_max_vecs        = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
116         bio->bi_private         = w;
117         bio->bi_io_vec          = bio->bi_inline_vecs;
118         bch_bio_map(bio, NULL);
119 }
120
121 static void dirty_io_destructor(struct closure *cl)
122 {
123         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
124         kfree(io);
125 }
126
127 static void write_dirty_finish(struct closure *cl)
128 {
129         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
130         struct keybuf_key *w = io->bio.bi_private;
131         struct cached_dev *dc = io->dc;
132         struct bio_vec *bv;
133         int i;
134
135         bio_for_each_segment_all(bv, &io->bio, i)
136                 __free_page(bv->bv_page);
137
138         /* This is kind of a dumb way of signalling errors. */
139         if (KEY_DIRTY(&w->key)) {
140                 int ret;
141                 unsigned i;
142                 struct keylist keys;
143
144                 bch_keylist_init(&keys);
145
146                 bkey_copy(keys.top, &w->key);
147                 SET_KEY_DIRTY(keys.top, false);
148                 bch_keylist_push(&keys);
149
150                 for (i = 0; i < KEY_PTRS(&w->key); i++)
151                         atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
152
153                 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
154
155                 if (ret)
156                         trace_bcache_writeback_collision(&w->key);
157
158                 atomic_long_inc(ret
159                                 ? &dc->disk.c->writeback_keys_failed
160                                 : &dc->disk.c->writeback_keys_done);
161         }
162
163         bch_keybuf_del(&dc->writeback_keys, w);
164         up(&dc->in_flight);
165
166         closure_return_with_destructor(cl, dirty_io_destructor);
167 }
168
169 static void dirty_endio(struct bio *bio, int error)
170 {
171         struct keybuf_key *w = bio->bi_private;
172         struct dirty_io *io = w->private;
173
174         if (error)
175                 SET_KEY_DIRTY(&w->key, false);
176
177         closure_put(&io->cl);
178 }
179
180 static void write_dirty(struct closure *cl)
181 {
182         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
183         struct keybuf_key *w = io->bio.bi_private;
184
185         dirty_init(w);
186         io->bio.bi_rw           = WRITE;
187         io->bio.bi_sector       = KEY_START(&w->key);
188         io->bio.bi_bdev         = io->dc->bdev;
189         io->bio.bi_end_io       = dirty_endio;
190
191         closure_bio_submit(&io->bio, cl, &io->dc->disk);
192
193         continue_at(cl, write_dirty_finish, system_wq);
194 }
195
196 static void read_dirty_endio(struct bio *bio, int error)
197 {
198         struct keybuf_key *w = bio->bi_private;
199         struct dirty_io *io = w->private;
200
201         bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
202                             error, "reading dirty data from cache");
203
204         dirty_endio(bio, error);
205 }
206
207 static void read_dirty_submit(struct closure *cl)
208 {
209         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
210
211         closure_bio_submit(&io->bio, cl, &io->dc->disk);
212
213         continue_at(cl, write_dirty, system_wq);
214 }
215
216 static void read_dirty(struct cached_dev *dc)
217 {
218         unsigned delay = 0;
219         struct keybuf_key *w;
220         struct dirty_io *io;
221         struct closure cl;
222
223         closure_init_stack(&cl);
224
225         /*
226          * XXX: if we error, background writeback just spins. Should use some
227          * mempools.
228          */
229
230         while (!kthread_should_stop()) {
231                 try_to_freeze();
232
233                 w = bch_keybuf_next(&dc->writeback_keys);
234                 if (!w)
235                         break;
236
237                 BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
238
239                 if (KEY_START(&w->key) != dc->last_read ||
240                     jiffies_to_msecs(delay) > 50)
241                         while (!kthread_should_stop() && delay)
242                                 delay = schedule_timeout_uninterruptible(delay);
243
244                 dc->last_read   = KEY_OFFSET(&w->key);
245
246                 io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
247                              * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
248                              GFP_KERNEL);
249                 if (!io)
250                         goto err;
251
252                 w->private      = io;
253                 io->dc          = dc;
254
255                 dirty_init(w);
256                 io->bio.bi_sector       = PTR_OFFSET(&w->key, 0);
257                 io->bio.bi_bdev         = PTR_CACHE(dc->disk.c,
258                                                     &w->key, 0)->bdev;
259                 io->bio.bi_rw           = READ;
260                 io->bio.bi_end_io       = read_dirty_endio;
261
262                 if (bio_alloc_pages(&io->bio, GFP_KERNEL))
263                         goto err_free;
264
265                 trace_bcache_writeback(&w->key);
266
267                 down(&dc->in_flight);
268                 closure_call(&io->cl, read_dirty_submit, NULL, &cl);
269
270                 delay = writeback_delay(dc, KEY_SIZE(&w->key));
271         }
272
273         if (0) {
274 err_free:
275                 kfree(w->private);
276 err:
277                 bch_keybuf_del(&dc->writeback_keys, w);
278         }
279
280         /*
281          * Wait for outstanding writeback IOs to finish (and keybuf slots to be
282          * freed) before refilling again
283          */
284         closure_sync(&cl);
285 }
286
287 /* Scan for dirty data */
288
289 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
290                                   uint64_t offset, int nr_sectors)
291 {
292         struct bcache_device *d = c->devices[inode];
293         unsigned stripe_offset, stripe, sectors_dirty;
294
295         if (!d)
296                 return;
297
298         stripe = offset_to_stripe(d, offset);
299         stripe_offset = offset & (d->stripe_size - 1);
300
301         while (nr_sectors) {
302                 int s = min_t(unsigned, abs(nr_sectors),
303                               d->stripe_size - stripe_offset);
304
305                 if (nr_sectors < 0)
306                         s = -s;
307
308                 if (stripe >= d->nr_stripes)
309                         return;
310
311                 sectors_dirty = atomic_add_return(s,
312                                         d->stripe_sectors_dirty + stripe);
313                 if (sectors_dirty == d->stripe_size)
314                         set_bit(stripe, d->full_dirty_stripes);
315                 else
316                         clear_bit(stripe, d->full_dirty_stripes);
317
318                 nr_sectors -= s;
319                 stripe_offset = 0;
320                 stripe++;
321         }
322 }
323
324 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
325 {
326         return KEY_DIRTY(k);
327 }
328
329 static void refill_full_stripes(struct cached_dev *dc)
330 {
331         struct keybuf *buf = &dc->writeback_keys;
332         unsigned start_stripe, stripe, next_stripe;
333         bool wrapped = false;
334
335         stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
336
337         if (stripe >= dc->disk.nr_stripes)
338                 stripe = 0;
339
340         start_stripe = stripe;
341
342         while (1) {
343                 stripe = find_next_bit(dc->disk.full_dirty_stripes,
344                                        dc->disk.nr_stripes, stripe);
345
346                 if (stripe == dc->disk.nr_stripes)
347                         goto next;
348
349                 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
350                                                  dc->disk.nr_stripes, stripe);
351
352                 buf->last_scanned = KEY(dc->disk.id,
353                                         stripe * dc->disk.stripe_size, 0);
354
355                 bch_refill_keybuf(dc->disk.c, buf,
356                                   &KEY(dc->disk.id,
357                                        next_stripe * dc->disk.stripe_size, 0),
358                                   dirty_pred);
359
360                 if (array_freelist_empty(&buf->freelist))
361                         return;
362
363                 stripe = next_stripe;
364 next:
365                 if (wrapped && stripe > start_stripe)
366                         return;
367
368                 if (stripe == dc->disk.nr_stripes) {
369                         stripe = 0;
370                         wrapped = true;
371                 }
372         }
373 }
374
375 static bool refill_dirty(struct cached_dev *dc)
376 {
377         struct keybuf *buf = &dc->writeback_keys;
378         struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
379         bool searched_from_start = false;
380
381         if (dc->partial_stripes_expensive) {
382                 refill_full_stripes(dc);
383                 if (array_freelist_empty(&buf->freelist))
384                         return false;
385         }
386
387         if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
388                 buf->last_scanned = KEY(dc->disk.id, 0, 0);
389                 searched_from_start = true;
390         }
391
392         bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
393
394         return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
395 }
396
397 static int bch_writeback_thread(void *arg)
398 {
399         struct cached_dev *dc = arg;
400         bool searched_full_index;
401
402         while (!kthread_should_stop()) {
403                 down_write(&dc->writeback_lock);
404                 if (!atomic_read(&dc->has_dirty) ||
405                     (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
406                      !dc->writeback_running)) {
407                         up_write(&dc->writeback_lock);
408                         set_current_state(TASK_INTERRUPTIBLE);
409
410                         if (kthread_should_stop())
411                                 return 0;
412
413                         try_to_freeze();
414                         schedule();
415                         continue;
416                 }
417
418                 searched_full_index = refill_dirty(dc);
419
420                 if (searched_full_index &&
421                     RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
422                         atomic_set(&dc->has_dirty, 0);
423                         cached_dev_put(dc);
424                         SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
425                         bch_write_bdev_super(dc, NULL);
426                 }
427
428                 up_write(&dc->writeback_lock);
429
430                 bch_ratelimit_reset(&dc->writeback_rate);
431                 read_dirty(dc);
432
433                 if (searched_full_index) {
434                         unsigned delay = dc->writeback_delay * HZ;
435
436                         while (delay &&
437                                !kthread_should_stop() &&
438                                !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
439                                 delay = schedule_timeout_uninterruptible(delay);
440                 }
441         }
442
443         return 0;
444 }
445
446 /* Init */
447
448 struct sectors_dirty_init {
449         struct btree_op op;
450         unsigned        inode;
451 };
452
453 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
454                                  struct bkey *k)
455 {
456         struct sectors_dirty_init *op = container_of(_op,
457                                                 struct sectors_dirty_init, op);
458         if (KEY_INODE(k) > op->inode)
459                 return MAP_DONE;
460
461         if (KEY_DIRTY(k))
462                 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
463                                              KEY_START(k), KEY_SIZE(k));
464
465         return MAP_CONTINUE;
466 }
467
468 void bch_sectors_dirty_init(struct cached_dev *dc)
469 {
470         struct sectors_dirty_init op;
471
472         bch_btree_op_init(&op.op, -1);
473         op.inode = dc->disk.id;
474
475         bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
476                            sectors_dirty_init_fn, 0);
477
478         dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
479 }
480
481 int bch_cached_dev_writeback_init(struct cached_dev *dc)
482 {
483         sema_init(&dc->in_flight, 64);
484         init_rwsem(&dc->writeback_lock);
485         bch_keybuf_init(&dc->writeback_keys);
486
487         dc->writeback_metadata          = true;
488         dc->writeback_running           = true;
489         dc->writeback_percent           = 10;
490         dc->writeback_delay             = 30;
491         dc->writeback_rate.rate         = 1024;
492
493         dc->writeback_rate_update_seconds = 5;
494         dc->writeback_rate_d_term       = 30;
495         dc->writeback_rate_p_term_inverse = 6000;
496
497         dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
498                                               "bcache_writeback");
499         if (IS_ERR(dc->writeback_thread))
500                 return PTR_ERR(dc->writeback_thread);
501
502         INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
503         schedule_delayed_work(&dc->writeback_rate_update,
504                               dc->writeback_rate_update_seconds * HZ);
505
506         return 0;
507 }