Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / md / bcache / movinggc.c
1 /*
2  * Moving/copying garbage collector
3  *
4  * Copyright 2012 Google, Inc.
5  */
6
7 #include "bcache.h"
8 #include "btree.h"
9 #include "debug.h"
10 #include "request.h"
11
12 #include <trace/events/bcache.h>
13
14 struct moving_io {
15         struct closure          cl;
16         struct keybuf_key       *w;
17         struct data_insert_op   op;
18         struct bbio             bio;
19 };
20
21 static bool moving_pred(struct keybuf *buf, struct bkey *k)
22 {
23         struct cache_set *c = container_of(buf, struct cache_set,
24                                            moving_gc_keys);
25         unsigned i;
26
27         for (i = 0; i < KEY_PTRS(k); i++) {
28                 struct bucket *g = PTR_BUCKET(c, k, i);
29
30                 if (GC_MOVE(g))
31                         return true;
32         }
33
34         return false;
35 }
36
37 /* Moving GC - IO loop */
38
39 static void moving_io_destructor(struct closure *cl)
40 {
41         struct moving_io *io = container_of(cl, struct moving_io, cl);
42         kfree(io);
43 }
44
45 static void write_moving_finish(struct closure *cl)
46 {
47         struct moving_io *io = container_of(cl, struct moving_io, cl);
48         struct bio *bio = &io->bio.bio;
49         struct bio_vec *bv;
50         int i;
51
52         bio_for_each_segment_all(bv, bio, i)
53                 __free_page(bv->bv_page);
54
55         if (io->op.replace_collision)
56                 trace_bcache_gc_copy_collision(&io->w->key);
57
58         bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
59
60         up(&io->op.c->moving_in_flight);
61
62         closure_return_with_destructor(cl, moving_io_destructor);
63 }
64
65 static void read_moving_endio(struct bio *bio, int error)
66 {
67         struct bbio *b = container_of(bio, struct bbio, bio);
68         struct moving_io *io = container_of(bio->bi_private,
69                                             struct moving_io, cl);
70
71         if (error)
72                 io->op.error = error;
73         else if (!KEY_DIRTY(&b->key) &&
74                  ptr_stale(io->op.c, &b->key, 0)) {
75                 io->op.error = -EINTR;
76         }
77
78         bch_bbio_endio(io->op.c, bio, error, "reading data to move");
79 }
80
81 static void moving_init(struct moving_io *io)
82 {
83         struct bio *bio = &io->bio.bio;
84
85         bio_init(bio);
86         bio_get(bio);
87         bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
88
89         bio->bi_size            = KEY_SIZE(&io->w->key) << 9;
90         bio->bi_max_vecs        = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
91                                                PAGE_SECTORS);
92         bio->bi_private         = &io->cl;
93         bio->bi_io_vec          = bio->bi_inline_vecs;
94         bch_bio_map(bio, NULL);
95 }
96
97 static void write_moving(struct closure *cl)
98 {
99         struct moving_io *io = container_of(cl, struct moving_io, cl);
100         struct data_insert_op *op = &io->op;
101
102         if (!op->error) {
103                 moving_init(io);
104
105                 io->bio.bio.bi_sector = KEY_START(&io->w->key);
106                 op->write_prio          = 1;
107                 op->bio                 = &io->bio.bio;
108
109                 op->writeback           = KEY_DIRTY(&io->w->key);
110                 op->csum                = KEY_CSUM(&io->w->key);
111
112                 bkey_copy(&op->replace_key, &io->w->key);
113                 op->replace             = true;
114
115                 closure_call(&op->cl, bch_data_insert, NULL, cl);
116         }
117
118         continue_at(cl, write_moving_finish, system_wq);
119 }
120
121 static void read_moving_submit(struct closure *cl)
122 {
123         struct moving_io *io = container_of(cl, struct moving_io, cl);
124         struct bio *bio = &io->bio.bio;
125
126         bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
127
128         continue_at(cl, write_moving, system_wq);
129 }
130
131 static void read_moving(struct cache_set *c)
132 {
133         struct keybuf_key *w;
134         struct moving_io *io;
135         struct bio *bio;
136         struct closure cl;
137
138         closure_init_stack(&cl);
139
140         /* XXX: if we error, background writeback could stall indefinitely */
141
142         while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
143                 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
144                                            &MAX_KEY, moving_pred);
145                 if (!w)
146                         break;
147
148                 if (ptr_stale(c, &w->key, 0)) {
149                         bch_keybuf_del(&c->moving_gc_keys, w);
150                         continue;
151                 }
152
153                 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
154                              * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
155                              GFP_KERNEL);
156                 if (!io)
157                         goto err;
158
159                 w->private      = io;
160                 io->w           = w;
161                 io->op.inode    = KEY_INODE(&w->key);
162                 io->op.c        = c;
163
164                 moving_init(io);
165                 bio = &io->bio.bio;
166
167                 bio->bi_rw      = READ;
168                 bio->bi_end_io  = read_moving_endio;
169
170                 if (bio_alloc_pages(bio, GFP_KERNEL))
171                         goto err;
172
173                 trace_bcache_gc_copy(&w->key);
174
175                 down(&c->moving_in_flight);
176                 closure_call(&io->cl, read_moving_submit, NULL, &cl);
177         }
178
179         if (0) {
180 err:            if (!IS_ERR_OR_NULL(w->private))
181                         kfree(w->private);
182
183                 bch_keybuf_del(&c->moving_gc_keys, w);
184         }
185
186         closure_sync(&cl);
187 }
188
189 static bool bucket_cmp(struct bucket *l, struct bucket *r)
190 {
191         return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
192 }
193
194 static unsigned bucket_heap_top(struct cache *ca)
195 {
196         struct bucket *b;
197         return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
198 }
199
200 void bch_moving_gc(struct cache_set *c)
201 {
202         struct cache *ca;
203         struct bucket *b;
204         unsigned i;
205
206         if (!c->copy_gc_enabled)
207                 return;
208
209         mutex_lock(&c->bucket_lock);
210
211         for_each_cache(ca, c, i) {
212                 unsigned sectors_to_move = 0;
213                 unsigned reserve_sectors = ca->sb.bucket_size *
214                         min(fifo_used(&ca->free), ca->free.size / 2);
215
216                 ca->heap.used = 0;
217
218                 for_each_bucket(b, ca) {
219                         if (!GC_SECTORS_USED(b))
220                                 continue;
221
222                         if (!heap_full(&ca->heap)) {
223                                 sectors_to_move += GC_SECTORS_USED(b);
224                                 heap_add(&ca->heap, b, bucket_cmp);
225                         } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
226                                 sectors_to_move -= bucket_heap_top(ca);
227                                 sectors_to_move += GC_SECTORS_USED(b);
228
229                                 ca->heap.data[0] = b;
230                                 heap_sift(&ca->heap, 0, bucket_cmp);
231                         }
232                 }
233
234                 while (sectors_to_move > reserve_sectors) {
235                         heap_pop(&ca->heap, b, bucket_cmp);
236                         sectors_to_move -= GC_SECTORS_USED(b);
237                 }
238
239                 while (heap_pop(&ca->heap, b, bucket_cmp))
240                         SET_GC_MOVE(b, 1);
241         }
242
243         mutex_unlock(&c->bucket_lock);
244
245         c->moving_gc_keys.last_scanned = ZERO_KEY;
246
247         read_moving(c);
248 }
249
250 void bch_moving_init_cache_set(struct cache_set *c)
251 {
252         bch_keybuf_init(&c->moving_gc_keys);
253         sema_init(&c->moving_in_flight, 64);
254 }