Merge remote-tracking branches 'asoc/topic/88pm860x', 'asoc/topic/ac97', 'asoc/topic...
[linux-drm-fsl-dcu.git] / drivers / md / dm-cache-policy-smq.c
1 /*
2  * Copyright (C) 2015 Red Hat. All rights reserved.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-cache-policy.h"
8 #include "dm-cache-policy-internal.h"
9 #include "dm.h"
10
11 #include <linux/hash.h>
12 #include <linux/jiffies.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/vmalloc.h>
16 #include <linux/math64.h>
17
18 #define DM_MSG_PREFIX "cache-policy-smq"
19
20 /*----------------------------------------------------------------*/
21
22 /*
23  * Safe division functions that return zero on divide by zero.
24  */
25 static unsigned safe_div(unsigned n, unsigned d)
26 {
27         return d ? n / d : 0u;
28 }
29
30 static unsigned safe_mod(unsigned n, unsigned d)
31 {
32         return d ? n % d : 0u;
33 }
34
35 /*----------------------------------------------------------------*/
36
37 struct entry {
38         unsigned hash_next:28;
39         unsigned prev:28;
40         unsigned next:28;
41         unsigned level:7;
42         bool dirty:1;
43         bool allocated:1;
44         bool sentinel:1;
45
46         dm_oblock_t oblock;
47 };
48
49 /*----------------------------------------------------------------*/
50
51 #define INDEXER_NULL ((1u << 28u) - 1u)
52
53 /*
54  * An entry_space manages a set of entries that we use for the queues.
55  * The clean and dirty queues share entries, so this object is separate
56  * from the queue itself.
57  */
58 struct entry_space {
59         struct entry *begin;
60         struct entry *end;
61 };
62
63 static int space_init(struct entry_space *es, unsigned nr_entries)
64 {
65         if (!nr_entries) {
66                 es->begin = es->end = NULL;
67                 return 0;
68         }
69
70         es->begin = vzalloc(sizeof(struct entry) * nr_entries);
71         if (!es->begin)
72                 return -ENOMEM;
73
74         es->end = es->begin + nr_entries;
75         return 0;
76 }
77
78 static void space_exit(struct entry_space *es)
79 {
80         vfree(es->begin);
81 }
82
83 static struct entry *__get_entry(struct entry_space *es, unsigned block)
84 {
85         struct entry *e;
86
87         e = es->begin + block;
88         BUG_ON(e >= es->end);
89
90         return e;
91 }
92
93 static unsigned to_index(struct entry_space *es, struct entry *e)
94 {
95         BUG_ON(e < es->begin || e >= es->end);
96         return e - es->begin;
97 }
98
99 static struct entry *to_entry(struct entry_space *es, unsigned block)
100 {
101         if (block == INDEXER_NULL)
102                 return NULL;
103
104         return __get_entry(es, block);
105 }
106
107 /*----------------------------------------------------------------*/
108
109 struct ilist {
110         unsigned nr_elts;       /* excluding sentinel entries */
111         unsigned head, tail;
112 };
113
114 static void l_init(struct ilist *l)
115 {
116         l->nr_elts = 0;
117         l->head = l->tail = INDEXER_NULL;
118 }
119
120 static struct entry *l_head(struct entry_space *es, struct ilist *l)
121 {
122         return to_entry(es, l->head);
123 }
124
125 static struct entry *l_tail(struct entry_space *es, struct ilist *l)
126 {
127         return to_entry(es, l->tail);
128 }
129
130 static struct entry *l_next(struct entry_space *es, struct entry *e)
131 {
132         return to_entry(es, e->next);
133 }
134
135 static struct entry *l_prev(struct entry_space *es, struct entry *e)
136 {
137         return to_entry(es, e->prev);
138 }
139
140 static bool l_empty(struct ilist *l)
141 {
142         return l->head == INDEXER_NULL;
143 }
144
145 static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e)
146 {
147         struct entry *head = l_head(es, l);
148
149         e->next = l->head;
150         e->prev = INDEXER_NULL;
151
152         if (head)
153                 head->prev = l->head = to_index(es, e);
154         else
155                 l->head = l->tail = to_index(es, e);
156
157         if (!e->sentinel)
158                 l->nr_elts++;
159 }
160
161 static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e)
162 {
163         struct entry *tail = l_tail(es, l);
164
165         e->next = INDEXER_NULL;
166         e->prev = l->tail;
167
168         if (tail)
169                 tail->next = l->tail = to_index(es, e);
170         else
171                 l->head = l->tail = to_index(es, e);
172
173         if (!e->sentinel)
174                 l->nr_elts++;
175 }
176
177 static void l_add_before(struct entry_space *es, struct ilist *l,
178                          struct entry *old, struct entry *e)
179 {
180         struct entry *prev = l_prev(es, old);
181
182         if (!prev)
183                 l_add_head(es, l, e);
184
185         else {
186                 e->prev = old->prev;
187                 e->next = to_index(es, old);
188                 prev->next = old->prev = to_index(es, e);
189
190                 if (!e->sentinel)
191                         l->nr_elts++;
192         }
193 }
194
195 static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
196 {
197         struct entry *prev = l_prev(es, e);
198         struct entry *next = l_next(es, e);
199
200         if (prev)
201                 prev->next = e->next;
202         else
203                 l->head = e->next;
204
205         if (next)
206                 next->prev = e->prev;
207         else
208                 l->tail = e->prev;
209
210         if (!e->sentinel)
211                 l->nr_elts--;
212 }
213
214 static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
215 {
216         struct entry *e;
217
218         for (e = l_tail(es, l); e; e = l_prev(es, e))
219                 if (!e->sentinel) {
220                         l_del(es, l, e);
221                         return e;
222                 }
223
224         return NULL;
225 }
226
227 /*----------------------------------------------------------------*/
228
229 /*
230  * The stochastic-multi-queue is a set of lru lists stacked into levels.
231  * Entries are moved up levels when they are used, which loosely orders the
232  * most accessed entries in the top levels and least in the bottom.  This
233  * structure is *much* better than a single lru list.
234  */
235 #define MAX_LEVELS 64u
236
237 struct queue {
238         struct entry_space *es;
239
240         unsigned nr_elts;
241         unsigned nr_levels;
242         struct ilist qs[MAX_LEVELS];
243
244         /*
245          * We maintain a count of the number of entries we would like in each
246          * level.
247          */
248         unsigned last_target_nr_elts;
249         unsigned nr_top_levels;
250         unsigned nr_in_top_levels;
251         unsigned target_count[MAX_LEVELS];
252 };
253
254 static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
255 {
256         unsigned i;
257
258         q->es = es;
259         q->nr_elts = 0;
260         q->nr_levels = nr_levels;
261
262         for (i = 0; i < q->nr_levels; i++) {
263                 l_init(q->qs + i);
264                 q->target_count[i] = 0u;
265         }
266
267         q->last_target_nr_elts = 0u;
268         q->nr_top_levels = 0u;
269         q->nr_in_top_levels = 0u;
270 }
271
272 static unsigned q_size(struct queue *q)
273 {
274         return q->nr_elts;
275 }
276
277 /*
278  * Insert an entry to the back of the given level.
279  */
280 static void q_push(struct queue *q, struct entry *e)
281 {
282         if (!e->sentinel)
283                 q->nr_elts++;
284
285         l_add_tail(q->es, q->qs + e->level, e);
286 }
287
288 static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
289 {
290         if (!e->sentinel)
291                 q->nr_elts++;
292
293         l_add_before(q->es, q->qs + e->level, old, e);
294 }
295
296 static void q_del(struct queue *q, struct entry *e)
297 {
298         l_del(q->es, q->qs + e->level, e);
299         if (!e->sentinel)
300                 q->nr_elts--;
301 }
302
303 /*
304  * Return the oldest entry of the lowest populated level.
305  */
306 static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel)
307 {
308         unsigned level;
309         struct entry *e;
310
311         max_level = min(max_level, q->nr_levels);
312
313         for (level = 0; level < max_level; level++)
314                 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
315                         if (e->sentinel) {
316                                 if (can_cross_sentinel)
317                                         continue;
318                                 else
319                                         break;
320                         }
321
322                         return e;
323                 }
324
325         return NULL;
326 }
327
328 static struct entry *q_pop(struct queue *q)
329 {
330         struct entry *e = q_peek(q, q->nr_levels, true);
331
332         if (e)
333                 q_del(q, e);
334
335         return e;
336 }
337
338 /*
339  * Pops an entry from a level that is not past a sentinel.
340  */
341 static struct entry *q_pop_old(struct queue *q, unsigned max_level)
342 {
343         struct entry *e = q_peek(q, max_level, false);
344
345         if (e)
346                 q_del(q, e);
347
348         return e;
349 }
350
351 /*
352  * This function assumes there is a non-sentinel entry to pop.  It's only
353  * used by redistribute, so we know this is true.  It also doesn't adjust
354  * the q->nr_elts count.
355  */
356 static struct entry *__redist_pop_from(struct queue *q, unsigned level)
357 {
358         struct entry *e;
359
360         for (; level < q->nr_levels; level++)
361                 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e))
362                         if (!e->sentinel) {
363                                 l_del(q->es, q->qs + e->level, e);
364                                 return e;
365                         }
366
367         return NULL;
368 }
369
370 static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
371 {
372         unsigned level, nr_levels, entries_per_level, remainder;
373
374         BUG_ON(lbegin > lend);
375         BUG_ON(lend > q->nr_levels);
376         nr_levels = lend - lbegin;
377         entries_per_level = safe_div(nr_elts, nr_levels);
378         remainder = safe_mod(nr_elts, nr_levels);
379
380         for (level = lbegin; level < lend; level++)
381                 q->target_count[level] =
382                         (level < (lbegin + remainder)) ? entries_per_level + 1u : entries_per_level;
383 }
384
385 /*
386  * Typically we have fewer elements in the top few levels which allows us
387  * to adjust the promote threshold nicely.
388  */
389 static void q_set_targets(struct queue *q)
390 {
391         if (q->last_target_nr_elts == q->nr_elts)
392                 return;
393
394         q->last_target_nr_elts = q->nr_elts;
395
396         if (q->nr_top_levels > q->nr_levels)
397                 q_set_targets_subrange_(q, q->nr_elts, 0, q->nr_levels);
398
399         else {
400                 q_set_targets_subrange_(q, q->nr_in_top_levels,
401                                         q->nr_levels - q->nr_top_levels, q->nr_levels);
402
403                 if (q->nr_in_top_levels < q->nr_elts)
404                         q_set_targets_subrange_(q, q->nr_elts - q->nr_in_top_levels,
405                                                 0, q->nr_levels - q->nr_top_levels);
406                 else
407                         q_set_targets_subrange_(q, 0, 0, q->nr_levels - q->nr_top_levels);
408         }
409 }
410
411 static void q_redistribute(struct queue *q)
412 {
413         unsigned target, level;
414         struct ilist *l, *l_above;
415         struct entry *e;
416
417         q_set_targets(q);
418
419         for (level = 0u; level < q->nr_levels - 1u; level++) {
420                 l = q->qs + level;
421                 target = q->target_count[level];
422
423                 /*
424                  * Pull down some entries from the level above.
425                  */
426                 while (l->nr_elts < target) {
427                         e = __redist_pop_from(q, level + 1u);
428                         if (!e) {
429                                 /* bug in nr_elts */
430                                 break;
431                         }
432
433                         e->level = level;
434                         l_add_tail(q->es, l, e);
435                 }
436
437                 /*
438                  * Push some entries up.
439                  */
440                 l_above = q->qs + level + 1u;
441                 while (l->nr_elts > target) {
442                         e = l_pop_tail(q->es, l);
443
444                         if (!e)
445                                 /* bug in nr_elts */
446                                 break;
447
448                         e->level = level + 1u;
449                         l_add_head(q->es, l_above, e);
450                 }
451         }
452 }
453
454 static void q_requeue_before(struct queue *q, struct entry *dest, struct entry *e, unsigned extra_levels)
455 {
456         struct entry *de;
457         unsigned new_level;
458
459         q_del(q, e);
460
461         if (extra_levels && (e->level < q->nr_levels - 1u)) {
462                 new_level = min(q->nr_levels - 1u, e->level + extra_levels);
463                 for (de = l_head(q->es, q->qs + new_level); de; de = l_next(q->es, de)) {
464                         if (de->sentinel)
465                                 continue;
466
467                         q_del(q, de);
468                         de->level = e->level;
469
470                         if (dest)
471                                 q_push_before(q, dest, de);
472                         else
473                                 q_push(q, de);
474                         break;
475                 }
476
477                 e->level = new_level;
478         }
479
480         q_push(q, e);
481 }
482
483 static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels)
484 {
485         q_requeue_before(q, NULL, e, extra_levels);
486 }
487
488 /*----------------------------------------------------------------*/
489
490 #define FP_SHIFT 8
491 #define SIXTEENTH (1u << (FP_SHIFT - 4u))
492 #define EIGHTH (1u << (FP_SHIFT - 3u))
493
494 struct stats {
495         unsigned hit_threshold;
496         unsigned hits;
497         unsigned misses;
498 };
499
500 enum performance {
501         Q_POOR,
502         Q_FAIR,
503         Q_WELL
504 };
505
506 static void stats_init(struct stats *s, unsigned nr_levels)
507 {
508         s->hit_threshold = (nr_levels * 3u) / 4u;
509         s->hits = 0u;
510         s->misses = 0u;
511 }
512
513 static void stats_reset(struct stats *s)
514 {
515         s->hits = s->misses = 0u;
516 }
517
518 static void stats_level_accessed(struct stats *s, unsigned level)
519 {
520         if (level >= s->hit_threshold)
521                 s->hits++;
522         else
523                 s->misses++;
524 }
525
526 static void stats_miss(struct stats *s)
527 {
528         s->misses++;
529 }
530
531 /*
532  * There are times when we don't have any confidence in the hotspot queue.
533  * Such as when a fresh cache is created and the blocks have been spread
534  * out across the levels, or if an io load changes.  We detect this by
535  * seeing how often a lookup is in the top levels of the hotspot queue.
536  */
537 static enum performance stats_assess(struct stats *s)
538 {
539         unsigned confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
540
541         if (confidence < SIXTEENTH)
542                 return Q_POOR;
543
544         else if (confidence < EIGHTH)
545                 return Q_FAIR;
546
547         else
548                 return Q_WELL;
549 }
550
551 /*----------------------------------------------------------------*/
552
553 struct hash_table {
554         struct entry_space *es;
555         unsigned long long hash_bits;
556         unsigned *buckets;
557 };
558
559 /*
560  * All cache entries are stored in a chained hash table.  To save space we
561  * use indexing again, and only store indexes to the next entry.
562  */
563 static int h_init(struct hash_table *ht, struct entry_space *es, unsigned nr_entries)
564 {
565         unsigned i, nr_buckets;
566
567         ht->es = es;
568         nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
569         ht->hash_bits = ffs(nr_buckets) - 1;
570
571         ht->buckets = vmalloc(sizeof(*ht->buckets) * nr_buckets);
572         if (!ht->buckets)
573                 return -ENOMEM;
574
575         for (i = 0; i < nr_buckets; i++)
576                 ht->buckets[i] = INDEXER_NULL;
577
578         return 0;
579 }
580
581 static void h_exit(struct hash_table *ht)
582 {
583         vfree(ht->buckets);
584 }
585
586 static struct entry *h_head(struct hash_table *ht, unsigned bucket)
587 {
588         return to_entry(ht->es, ht->buckets[bucket]);
589 }
590
591 static struct entry *h_next(struct hash_table *ht, struct entry *e)
592 {
593         return to_entry(ht->es, e->hash_next);
594 }
595
596 static void __h_insert(struct hash_table *ht, unsigned bucket, struct entry *e)
597 {
598         e->hash_next = ht->buckets[bucket];
599         ht->buckets[bucket] = to_index(ht->es, e);
600 }
601
602 static void h_insert(struct hash_table *ht, struct entry *e)
603 {
604         unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
605         __h_insert(ht, h, e);
606 }
607
608 static struct entry *__h_lookup(struct hash_table *ht, unsigned h, dm_oblock_t oblock,
609                                 struct entry **prev)
610 {
611         struct entry *e;
612
613         *prev = NULL;
614         for (e = h_head(ht, h); e; e = h_next(ht, e)) {
615                 if (e->oblock == oblock)
616                         return e;
617
618                 *prev = e;
619         }
620
621         return NULL;
622 }
623
624 static void __h_unlink(struct hash_table *ht, unsigned h,
625                        struct entry *e, struct entry *prev)
626 {
627         if (prev)
628                 prev->hash_next = e->hash_next;
629         else
630                 ht->buckets[h] = e->hash_next;
631 }
632
633 /*
634  * Also moves each entry to the front of the bucket.
635  */
636 static struct entry *h_lookup(struct hash_table *ht, dm_oblock_t oblock)
637 {
638         struct entry *e, *prev;
639         unsigned h = hash_64(from_oblock(oblock), ht->hash_bits);
640
641         e = __h_lookup(ht, h, oblock, &prev);
642         if (e && prev) {
643                 /*
644                  * Move to the front because this entry is likely
645                  * to be hit again.
646                  */
647                 __h_unlink(ht, h, e, prev);
648                 __h_insert(ht, h, e);
649         }
650
651         return e;
652 }
653
654 static void h_remove(struct hash_table *ht, struct entry *e)
655 {
656         unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
657         struct entry *prev;
658
659         /*
660          * The down side of using a singly linked list is we have to
661          * iterate the bucket to remove an item.
662          */
663         e = __h_lookup(ht, h, e->oblock, &prev);
664         if (e)
665                 __h_unlink(ht, h, e, prev);
666 }
667
668 /*----------------------------------------------------------------*/
669
670 struct entry_alloc {
671         struct entry_space *es;
672         unsigned begin;
673
674         unsigned nr_allocated;
675         struct ilist free;
676 };
677
678 static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
679                            unsigned begin, unsigned end)
680 {
681         unsigned i;
682
683         ea->es = es;
684         ea->nr_allocated = 0u;
685         ea->begin = begin;
686
687         l_init(&ea->free);
688         for (i = begin; i != end; i++)
689                 l_add_tail(ea->es, &ea->free, __get_entry(ea->es, i));
690 }
691
692 static void init_entry(struct entry *e)
693 {
694         /*
695          * We can't memset because that would clear the hotspot and
696          * sentinel bits which remain constant.
697          */
698         e->hash_next = INDEXER_NULL;
699         e->next = INDEXER_NULL;
700         e->prev = INDEXER_NULL;
701         e->level = 0u;
702         e->allocated = true;
703 }
704
705 static struct entry *alloc_entry(struct entry_alloc *ea)
706 {
707         struct entry *e;
708
709         if (l_empty(&ea->free))
710                 return NULL;
711
712         e = l_pop_tail(ea->es, &ea->free);
713         init_entry(e);
714         ea->nr_allocated++;
715
716         return e;
717 }
718
719 /*
720  * This assumes the cblock hasn't already been allocated.
721  */
722 static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned i)
723 {
724         struct entry *e = __get_entry(ea->es, ea->begin + i);
725
726         BUG_ON(e->allocated);
727
728         l_del(ea->es, &ea->free, e);
729         init_entry(e);
730         ea->nr_allocated++;
731
732         return e;
733 }
734
735 static void free_entry(struct entry_alloc *ea, struct entry *e)
736 {
737         BUG_ON(!ea->nr_allocated);
738         BUG_ON(!e->allocated);
739
740         ea->nr_allocated--;
741         e->allocated = false;
742         l_add_tail(ea->es, &ea->free, e);
743 }
744
745 static bool allocator_empty(struct entry_alloc *ea)
746 {
747         return l_empty(&ea->free);
748 }
749
750 static unsigned get_index(struct entry_alloc *ea, struct entry *e)
751 {
752         return to_index(ea->es, e) - ea->begin;
753 }
754
755 static struct entry *get_entry(struct entry_alloc *ea, unsigned index)
756 {
757         return __get_entry(ea->es, ea->begin + index);
758 }
759
760 /*----------------------------------------------------------------*/
761
762 #define NR_HOTSPOT_LEVELS 64u
763 #define NR_CACHE_LEVELS 64u
764
765 #define WRITEBACK_PERIOD (10 * HZ)
766 #define DEMOTE_PERIOD (60 * HZ)
767
768 #define HOTSPOT_UPDATE_PERIOD (HZ)
769 #define CACHE_UPDATE_PERIOD (10u * HZ)
770
771 struct smq_policy {
772         struct dm_cache_policy policy;
773
774         /* protects everything */
775         struct mutex lock;
776         dm_cblock_t cache_size;
777         sector_t cache_block_size;
778
779         sector_t hotspot_block_size;
780         unsigned nr_hotspot_blocks;
781         unsigned cache_blocks_per_hotspot_block;
782         unsigned hotspot_level_jump;
783
784         struct entry_space es;
785         struct entry_alloc writeback_sentinel_alloc;
786         struct entry_alloc demote_sentinel_alloc;
787         struct entry_alloc hotspot_alloc;
788         struct entry_alloc cache_alloc;
789
790         unsigned long *hotspot_hit_bits;
791         unsigned long *cache_hit_bits;
792
793         /*
794          * We maintain three queues of entries.  The cache proper,
795          * consisting of a clean and dirty queue, containing the currently
796          * active mappings.  The hotspot queue uses a larger block size to
797          * track blocks that are being hit frequently and potential
798          * candidates for promotion to the cache.
799          */
800         struct queue hotspot;
801         struct queue clean;
802         struct queue dirty;
803
804         struct stats hotspot_stats;
805         struct stats cache_stats;
806
807         /*
808          * Keeps track of time, incremented by the core.  We use this to
809          * avoid attributing multiple hits within the same tick.
810          *
811          * Access to tick_protected should be done with the spin lock held.
812          * It's copied to tick at the start of the map function (within the
813          * mutex).
814          */
815         spinlock_t tick_lock;
816         unsigned tick_protected;
817         unsigned tick;
818
819         /*
820          * The hash tables allows us to quickly find an entry by origin
821          * block.
822          */
823         struct hash_table table;
824         struct hash_table hotspot_table;
825
826         bool current_writeback_sentinels;
827         unsigned long next_writeback_period;
828
829         bool current_demote_sentinels;
830         unsigned long next_demote_period;
831
832         unsigned write_promote_level;
833         unsigned read_promote_level;
834
835         unsigned long next_hotspot_period;
836         unsigned long next_cache_period;
837 };
838
839 /*----------------------------------------------------------------*/
840
841 static struct entry *get_sentinel(struct entry_alloc *ea, unsigned level, bool which)
842 {
843         return get_entry(ea, which ? level : NR_CACHE_LEVELS + level);
844 }
845
846 static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level)
847 {
848         return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
849 }
850
851 static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level)
852 {
853         return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
854 }
855
856 static void __update_writeback_sentinels(struct smq_policy *mq)
857 {
858         unsigned level;
859         struct queue *q = &mq->dirty;
860         struct entry *sentinel;
861
862         for (level = 0; level < q->nr_levels; level++) {
863                 sentinel = writeback_sentinel(mq, level);
864                 q_del(q, sentinel);
865                 q_push(q, sentinel);
866         }
867 }
868
869 static void __update_demote_sentinels(struct smq_policy *mq)
870 {
871         unsigned level;
872         struct queue *q = &mq->clean;
873         struct entry *sentinel;
874
875         for (level = 0; level < q->nr_levels; level++) {
876                 sentinel = demote_sentinel(mq, level);
877                 q_del(q, sentinel);
878                 q_push(q, sentinel);
879         }
880 }
881
882 static void update_sentinels(struct smq_policy *mq)
883 {
884         if (time_after(jiffies, mq->next_writeback_period)) {
885                 __update_writeback_sentinels(mq);
886                 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
887                 mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
888         }
889
890         if (time_after(jiffies, mq->next_demote_period)) {
891                 __update_demote_sentinels(mq);
892                 mq->next_demote_period = jiffies + DEMOTE_PERIOD;
893                 mq->current_demote_sentinels = !mq->current_demote_sentinels;
894         }
895 }
896
897 static void __sentinels_init(struct smq_policy *mq)
898 {
899         unsigned level;
900         struct entry *sentinel;
901
902         for (level = 0; level < NR_CACHE_LEVELS; level++) {
903                 sentinel = writeback_sentinel(mq, level);
904                 sentinel->level = level;
905                 q_push(&mq->dirty, sentinel);
906
907                 sentinel = demote_sentinel(mq, level);
908                 sentinel->level = level;
909                 q_push(&mq->clean, sentinel);
910         }
911 }
912
913 static void sentinels_init(struct smq_policy *mq)
914 {
915         mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
916         mq->next_demote_period = jiffies + DEMOTE_PERIOD;
917
918         mq->current_writeback_sentinels = false;
919         mq->current_demote_sentinels = false;
920         __sentinels_init(mq);
921
922         mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
923         mq->current_demote_sentinels = !mq->current_demote_sentinels;
924         __sentinels_init(mq);
925 }
926
927 /*----------------------------------------------------------------*/
928
929 /*
930  * These methods tie together the dirty queue, clean queue and hash table.
931  */
932 static void push_new(struct smq_policy *mq, struct entry *e)
933 {
934         struct queue *q = e->dirty ? &mq->dirty : &mq->clean;
935         h_insert(&mq->table, e);
936         q_push(q, e);
937 }
938
939 static void push(struct smq_policy *mq, struct entry *e)
940 {
941         struct entry *sentinel;
942
943         h_insert(&mq->table, e);
944
945         /*
946          * Punch this into the queue just in front of the sentinel, to
947          * ensure it's cleaned straight away.
948          */
949         if (e->dirty) {
950                 sentinel = writeback_sentinel(mq, e->level);
951                 q_push_before(&mq->dirty, sentinel, e);
952         } else {
953                 sentinel = demote_sentinel(mq, e->level);
954                 q_push_before(&mq->clean, sentinel, e);
955         }
956 }
957
958 /*
959  * Removes an entry from cache.  Removes from the hash table.
960  */
961 static void __del(struct smq_policy *mq, struct queue *q, struct entry *e)
962 {
963         q_del(q, e);
964         h_remove(&mq->table, e);
965 }
966
967 static void del(struct smq_policy *mq, struct entry *e)
968 {
969         __del(mq, e->dirty ? &mq->dirty : &mq->clean, e);
970 }
971
972 static struct entry *pop_old(struct smq_policy *mq, struct queue *q, unsigned max_level)
973 {
974         struct entry *e = q_pop_old(q, max_level);
975         if (e)
976                 h_remove(&mq->table, e);
977         return e;
978 }
979
980 static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e)
981 {
982         return to_cblock(get_index(&mq->cache_alloc, e));
983 }
984
985 static void requeue(struct smq_policy *mq, struct entry *e)
986 {
987         struct entry *sentinel;
988
989         if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) {
990                 if (e->dirty) {
991                         sentinel = writeback_sentinel(mq, e->level);
992                         q_requeue_before(&mq->dirty, sentinel, e, 1u);
993                 } else {
994                         sentinel = demote_sentinel(mq, e->level);
995                         q_requeue_before(&mq->clean, sentinel, e, 1u);
996                 }
997         }
998 }
999
1000 static unsigned default_promote_level(struct smq_policy *mq)
1001 {
1002         /*
1003          * The promote level depends on the current performance of the
1004          * cache.
1005          *
1006          * If the cache is performing badly, then we can't afford
1007          * to promote much without causing performance to drop below that
1008          * of the origin device.
1009          *
1010          * If the cache is performing well, then we don't need to promote
1011          * much.  If it isn't broken, don't fix it.
1012          *
1013          * If the cache is middling then we promote more.
1014          *
1015          * This scheme reminds me of a graph of entropy vs probability of a
1016          * binary variable.
1017          */
1018         static unsigned table[] = {1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1};
1019
1020         unsigned hits = mq->cache_stats.hits;
1021         unsigned misses = mq->cache_stats.misses;
1022         unsigned index = safe_div(hits << 4u, hits + misses);
1023         return table[index];
1024 }
1025
1026 static void update_promote_levels(struct smq_policy *mq)
1027 {
1028         /*
1029          * If there are unused cache entries then we want to be really
1030          * eager to promote.
1031          */
1032         unsigned threshold_level = allocator_empty(&mq->cache_alloc) ?
1033                 default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
1034
1035         /*
1036          * If the hotspot queue is performing badly then we have little
1037          * confidence that we know which blocks to promote.  So we cut down
1038          * the amount of promotions.
1039          */
1040         switch (stats_assess(&mq->hotspot_stats)) {
1041         case Q_POOR:
1042                 threshold_level /= 4u;
1043                 break;
1044
1045         case Q_FAIR:
1046                 threshold_level /= 2u;
1047                 break;
1048
1049         case Q_WELL:
1050                 break;
1051         }
1052
1053         mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level;
1054         mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level) + 2u;
1055 }
1056
1057 /*
1058  * If the hotspot queue is performing badly, then we try and move entries
1059  * around more quickly.
1060  */
1061 static void update_level_jump(struct smq_policy *mq)
1062 {
1063         switch (stats_assess(&mq->hotspot_stats)) {
1064         case Q_POOR:
1065                 mq->hotspot_level_jump = 4u;
1066                 break;
1067
1068         case Q_FAIR:
1069                 mq->hotspot_level_jump = 2u;
1070                 break;
1071
1072         case Q_WELL:
1073                 mq->hotspot_level_jump = 1u;
1074                 break;
1075         }
1076 }
1077
1078 static void end_hotspot_period(struct smq_policy *mq)
1079 {
1080         clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
1081         update_promote_levels(mq);
1082
1083         if (time_after(jiffies, mq->next_hotspot_period)) {
1084                 update_level_jump(mq);
1085                 q_redistribute(&mq->hotspot);
1086                 stats_reset(&mq->hotspot_stats);
1087                 mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD;
1088         }
1089 }
1090
1091 static void end_cache_period(struct smq_policy *mq)
1092 {
1093         if (time_after(jiffies, mq->next_cache_period)) {
1094                 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
1095
1096                 q_redistribute(&mq->dirty);
1097                 q_redistribute(&mq->clean);
1098                 stats_reset(&mq->cache_stats);
1099
1100                 mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD;
1101         }
1102 }
1103
1104 static int demote_cblock(struct smq_policy *mq,
1105                          struct policy_locker *locker,
1106                          dm_oblock_t *oblock)
1107 {
1108         struct entry *demoted = q_peek(&mq->clean, mq->clean.nr_levels, false);
1109         if (!demoted)
1110                 /*
1111                  * We could get a block from mq->dirty, but that
1112                  * would add extra latency to the triggering bio as it
1113                  * waits for the writeback.  Better to not promote this
1114                  * time and hope there's a clean block next time this block
1115                  * is hit.
1116                  */
1117                 return -ENOSPC;
1118
1119         if (locker->fn(locker, demoted->oblock))
1120                 /*
1121                  * We couldn't lock this block.
1122                  */
1123                 return -EBUSY;
1124
1125         del(mq, demoted);
1126         *oblock = demoted->oblock;
1127         free_entry(&mq->cache_alloc, demoted);
1128
1129         return 0;
1130 }
1131
1132 enum promote_result {
1133         PROMOTE_NOT,
1134         PROMOTE_TEMPORARY,
1135         PROMOTE_PERMANENT
1136 };
1137
1138 /*
1139  * Converts a boolean into a promote result.
1140  */
1141 static enum promote_result maybe_promote(bool promote)
1142 {
1143         return promote ? PROMOTE_PERMANENT : PROMOTE_NOT;
1144 }
1145
1146 static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e, struct bio *bio,
1147                                           bool fast_promote)
1148 {
1149         if (bio_data_dir(bio) == WRITE) {
1150                 if (!allocator_empty(&mq->cache_alloc) && fast_promote)
1151                         return PROMOTE_TEMPORARY;
1152
1153                 else
1154                         return maybe_promote(hs_e->level >= mq->write_promote_level);
1155         } else
1156                 return maybe_promote(hs_e->level >= mq->read_promote_level);
1157 }
1158
1159 static void insert_in_cache(struct smq_policy *mq, dm_oblock_t oblock,
1160                             struct policy_locker *locker,
1161                             struct policy_result *result, enum promote_result pr)
1162 {
1163         int r;
1164         struct entry *e;
1165
1166         if (allocator_empty(&mq->cache_alloc)) {
1167                 result->op = POLICY_REPLACE;
1168                 r = demote_cblock(mq, locker, &result->old_oblock);
1169                 if (r) {
1170                         result->op = POLICY_MISS;
1171                         return;
1172                 }
1173
1174         } else
1175                 result->op = POLICY_NEW;
1176
1177         e = alloc_entry(&mq->cache_alloc);
1178         BUG_ON(!e);
1179         e->oblock = oblock;
1180
1181         if (pr == PROMOTE_TEMPORARY)
1182                 push(mq, e);
1183         else
1184                 push_new(mq, e);
1185
1186         result->cblock = infer_cblock(mq, e);
1187 }
1188
1189 static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
1190 {
1191         sector_t r = from_oblock(b);
1192         (void) sector_div(r, mq->cache_blocks_per_hotspot_block);
1193         return to_oblock(r);
1194 }
1195
1196 static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b, struct bio *bio)
1197 {
1198         unsigned hi;
1199         dm_oblock_t hb = to_hblock(mq, b);
1200         struct entry *e = h_lookup(&mq->hotspot_table, hb);
1201
1202         if (e) {
1203                 stats_level_accessed(&mq->hotspot_stats, e->level);
1204
1205                 hi = get_index(&mq->hotspot_alloc, e);
1206                 q_requeue(&mq->hotspot, e,
1207                           test_and_set_bit(hi, mq->hotspot_hit_bits) ?
1208                           0u : mq->hotspot_level_jump);
1209
1210         } else {
1211                 stats_miss(&mq->hotspot_stats);
1212
1213                 e = alloc_entry(&mq->hotspot_alloc);
1214                 if (!e) {
1215                         e = q_pop(&mq->hotspot);
1216                         if (e) {
1217                                 h_remove(&mq->hotspot_table, e);
1218                                 hi = get_index(&mq->hotspot_alloc, e);
1219                                 clear_bit(hi, mq->hotspot_hit_bits);
1220                         }
1221
1222                 }
1223
1224                 if (e) {
1225                         e->oblock = hb;
1226                         q_push(&mq->hotspot, e);
1227                         h_insert(&mq->hotspot_table, e);
1228                 }
1229         }
1230
1231         return e;
1232 }
1233
1234 /*
1235  * Looks the oblock up in the hash table, then decides whether to put in
1236  * pre_cache, or cache etc.
1237  */
1238 static int map(struct smq_policy *mq, struct bio *bio, dm_oblock_t oblock,
1239                bool can_migrate, bool fast_promote,
1240                struct policy_locker *locker, struct policy_result *result)
1241 {
1242         struct entry *e, *hs_e;
1243         enum promote_result pr;
1244
1245         hs_e = update_hotspot_queue(mq, oblock, bio);
1246
1247         e = h_lookup(&mq->table, oblock);
1248         if (e) {
1249                 stats_level_accessed(&mq->cache_stats, e->level);
1250
1251                 requeue(mq, e);
1252                 result->op = POLICY_HIT;
1253                 result->cblock = infer_cblock(mq, e);
1254
1255         } else {
1256                 stats_miss(&mq->cache_stats);
1257
1258                 pr = should_promote(mq, hs_e, bio, fast_promote);
1259                 if (pr == PROMOTE_NOT)
1260                         result->op = POLICY_MISS;
1261
1262                 else {
1263                         if (!can_migrate) {
1264                                 result->op = POLICY_MISS;
1265                                 return -EWOULDBLOCK;
1266                         }
1267
1268                         insert_in_cache(mq, oblock, locker, result, pr);
1269                 }
1270         }
1271
1272         return 0;
1273 }
1274
1275 /*----------------------------------------------------------------*/
1276
1277 /*
1278  * Public interface, via the policy struct.  See dm-cache-policy.h for a
1279  * description of these.
1280  */
1281
1282 static struct smq_policy *to_smq_policy(struct dm_cache_policy *p)
1283 {
1284         return container_of(p, struct smq_policy, policy);
1285 }
1286
1287 static void smq_destroy(struct dm_cache_policy *p)
1288 {
1289         struct smq_policy *mq = to_smq_policy(p);
1290
1291         h_exit(&mq->hotspot_table);
1292         h_exit(&mq->table);
1293         free_bitset(mq->hotspot_hit_bits);
1294         free_bitset(mq->cache_hit_bits);
1295         space_exit(&mq->es);
1296         kfree(mq);
1297 }
1298
1299 static void copy_tick(struct smq_policy *mq)
1300 {
1301         unsigned long flags, tick;
1302
1303         spin_lock_irqsave(&mq->tick_lock, flags);
1304         tick = mq->tick_protected;
1305         if (tick != mq->tick) {
1306                 update_sentinels(mq);
1307                 end_hotspot_period(mq);
1308                 end_cache_period(mq);
1309                 mq->tick = tick;
1310         }
1311         spin_unlock_irqrestore(&mq->tick_lock, flags);
1312 }
1313
1314 static bool maybe_lock(struct smq_policy *mq, bool can_block)
1315 {
1316         if (can_block) {
1317                 mutex_lock(&mq->lock);
1318                 return true;
1319         } else
1320                 return mutex_trylock(&mq->lock);
1321 }
1322
1323 static int smq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
1324                    bool can_block, bool can_migrate, bool fast_promote,
1325                    struct bio *bio, struct policy_locker *locker,
1326                    struct policy_result *result)
1327 {
1328         int r;
1329         struct smq_policy *mq = to_smq_policy(p);
1330
1331         result->op = POLICY_MISS;
1332
1333         if (!maybe_lock(mq, can_block))
1334                 return -EWOULDBLOCK;
1335
1336         copy_tick(mq);
1337         r = map(mq, bio, oblock, can_migrate, fast_promote, locker, result);
1338         mutex_unlock(&mq->lock);
1339
1340         return r;
1341 }
1342
1343 static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
1344 {
1345         int r;
1346         struct smq_policy *mq = to_smq_policy(p);
1347         struct entry *e;
1348
1349         if (!mutex_trylock(&mq->lock))
1350                 return -EWOULDBLOCK;
1351
1352         e = h_lookup(&mq->table, oblock);
1353         if (e) {
1354                 *cblock = infer_cblock(mq, e);
1355                 r = 0;
1356         } else
1357                 r = -ENOENT;
1358
1359         mutex_unlock(&mq->lock);
1360
1361         return r;
1362 }
1363
1364 static void __smq_set_clear_dirty(struct smq_policy *mq, dm_oblock_t oblock, bool set)
1365 {
1366         struct entry *e;
1367
1368         e = h_lookup(&mq->table, oblock);
1369         BUG_ON(!e);
1370
1371         del(mq, e);
1372         e->dirty = set;
1373         push(mq, e);
1374 }
1375
1376 static void smq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
1377 {
1378         struct smq_policy *mq = to_smq_policy(p);
1379
1380         mutex_lock(&mq->lock);
1381         __smq_set_clear_dirty(mq, oblock, true);
1382         mutex_unlock(&mq->lock);
1383 }
1384
1385 static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
1386 {
1387         struct smq_policy *mq = to_smq_policy(p);
1388
1389         mutex_lock(&mq->lock);
1390         __smq_set_clear_dirty(mq, oblock, false);
1391         mutex_unlock(&mq->lock);
1392 }
1393
1394 static int smq_load_mapping(struct dm_cache_policy *p,
1395                             dm_oblock_t oblock, dm_cblock_t cblock,
1396                             uint32_t hint, bool hint_valid)
1397 {
1398         struct smq_policy *mq = to_smq_policy(p);
1399         struct entry *e;
1400
1401         e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
1402         e->oblock = oblock;
1403         e->dirty = false;       /* this gets corrected in a minute */
1404         e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : 1;
1405         push(mq, e);
1406
1407         return 0;
1408 }
1409
1410 static int smq_save_hints(struct smq_policy *mq, struct queue *q,
1411                           policy_walk_fn fn, void *context)
1412 {
1413         int r;
1414         unsigned level;
1415         struct entry *e;
1416
1417         for (level = 0; level < q->nr_levels; level++)
1418                 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
1419                         if (!e->sentinel) {
1420                                 r = fn(context, infer_cblock(mq, e),
1421                                        e->oblock, e->level);
1422                                 if (r)
1423                                         return r;
1424                         }
1425                 }
1426
1427         return 0;
1428 }
1429
1430 static int smq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
1431                              void *context)
1432 {
1433         struct smq_policy *mq = to_smq_policy(p);
1434         int r = 0;
1435
1436         mutex_lock(&mq->lock);
1437
1438         r = smq_save_hints(mq, &mq->clean, fn, context);
1439         if (!r)
1440                 r = smq_save_hints(mq, &mq->dirty, fn, context);
1441
1442         mutex_unlock(&mq->lock);
1443
1444         return r;
1445 }
1446
1447 static void __remove_mapping(struct smq_policy *mq, dm_oblock_t oblock)
1448 {
1449         struct entry *e;
1450
1451         e = h_lookup(&mq->table, oblock);
1452         BUG_ON(!e);
1453
1454         del(mq, e);
1455         free_entry(&mq->cache_alloc, e);
1456 }
1457
1458 static void smq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
1459 {
1460         struct smq_policy *mq = to_smq_policy(p);
1461
1462         mutex_lock(&mq->lock);
1463         __remove_mapping(mq, oblock);
1464         mutex_unlock(&mq->lock);
1465 }
1466
1467 static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock)
1468 {
1469         struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1470
1471         if (!e || !e->allocated)
1472                 return -ENODATA;
1473
1474         del(mq, e);
1475         free_entry(&mq->cache_alloc, e);
1476
1477         return 0;
1478 }
1479
1480 static int smq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
1481 {
1482         int r;
1483         struct smq_policy *mq = to_smq_policy(p);
1484
1485         mutex_lock(&mq->lock);
1486         r = __remove_cblock(mq, cblock);
1487         mutex_unlock(&mq->lock);
1488
1489         return r;
1490 }
1491
1492
1493 #define CLEAN_TARGET_CRITICAL 5u /* percent */
1494
1495 static bool clean_target_met(struct smq_policy *mq, bool critical)
1496 {
1497         if (critical) {
1498                 /*
1499                  * Cache entries may not be populated.  So we're cannot rely on the
1500                  * size of the clean queue.
1501                  */
1502                 unsigned nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
1503                 unsigned target = from_cblock(mq->cache_size) * CLEAN_TARGET_CRITICAL / 100u;
1504
1505                 return nr_clean >= target;
1506         } else
1507                 return !q_size(&mq->dirty);
1508 }
1509
1510 static int __smq_writeback_work(struct smq_policy *mq, dm_oblock_t *oblock,
1511                                 dm_cblock_t *cblock, bool critical_only)
1512 {
1513         struct entry *e = NULL;
1514         bool target_met = clean_target_met(mq, critical_only);
1515
1516         if (critical_only)
1517                 /*
1518                  * Always try and keep the bottom level clean.
1519                  */
1520                 e = pop_old(mq, &mq->dirty, target_met ? 1u : mq->dirty.nr_levels);
1521
1522         else
1523                 e = pop_old(mq, &mq->dirty, mq->dirty.nr_levels);
1524
1525         if (!e)
1526                 return -ENODATA;
1527
1528         *oblock = e->oblock;
1529         *cblock = infer_cblock(mq, e);
1530         e->dirty = false;
1531         push_new(mq, e);
1532
1533         return 0;
1534 }
1535
1536 static int smq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
1537                               dm_cblock_t *cblock, bool critical_only)
1538 {
1539         int r;
1540         struct smq_policy *mq = to_smq_policy(p);
1541
1542         mutex_lock(&mq->lock);
1543         r = __smq_writeback_work(mq, oblock, cblock, critical_only);
1544         mutex_unlock(&mq->lock);
1545
1546         return r;
1547 }
1548
1549 static void __force_mapping(struct smq_policy *mq,
1550                             dm_oblock_t current_oblock, dm_oblock_t new_oblock)
1551 {
1552         struct entry *e = h_lookup(&mq->table, current_oblock);
1553
1554         if (e) {
1555                 del(mq, e);
1556                 e->oblock = new_oblock;
1557                 e->dirty = true;
1558                 push(mq, e);
1559         }
1560 }
1561
1562 static void smq_force_mapping(struct dm_cache_policy *p,
1563                               dm_oblock_t current_oblock, dm_oblock_t new_oblock)
1564 {
1565         struct smq_policy *mq = to_smq_policy(p);
1566
1567         mutex_lock(&mq->lock);
1568         __force_mapping(mq, current_oblock, new_oblock);
1569         mutex_unlock(&mq->lock);
1570 }
1571
1572 static dm_cblock_t smq_residency(struct dm_cache_policy *p)
1573 {
1574         dm_cblock_t r;
1575         struct smq_policy *mq = to_smq_policy(p);
1576
1577         mutex_lock(&mq->lock);
1578         r = to_cblock(mq->cache_alloc.nr_allocated);
1579         mutex_unlock(&mq->lock);
1580
1581         return r;
1582 }
1583
1584 static void smq_tick(struct dm_cache_policy *p, bool can_block)
1585 {
1586         struct smq_policy *mq = to_smq_policy(p);
1587         unsigned long flags;
1588
1589         spin_lock_irqsave(&mq->tick_lock, flags);
1590         mq->tick_protected++;
1591         spin_unlock_irqrestore(&mq->tick_lock, flags);
1592
1593         if (can_block) {
1594                 mutex_lock(&mq->lock);
1595                 copy_tick(mq);
1596                 mutex_unlock(&mq->lock);
1597         }
1598 }
1599
1600 /* Init the policy plugin interface function pointers. */
1601 static void init_policy_functions(struct smq_policy *mq)
1602 {
1603         mq->policy.destroy = smq_destroy;
1604         mq->policy.map = smq_map;
1605         mq->policy.lookup = smq_lookup;
1606         mq->policy.set_dirty = smq_set_dirty;
1607         mq->policy.clear_dirty = smq_clear_dirty;
1608         mq->policy.load_mapping = smq_load_mapping;
1609         mq->policy.walk_mappings = smq_walk_mappings;
1610         mq->policy.remove_mapping = smq_remove_mapping;
1611         mq->policy.remove_cblock = smq_remove_cblock;
1612         mq->policy.writeback_work = smq_writeback_work;
1613         mq->policy.force_mapping = smq_force_mapping;
1614         mq->policy.residency = smq_residency;
1615         mq->policy.tick = smq_tick;
1616 }
1617
1618 static bool too_many_hotspot_blocks(sector_t origin_size,
1619                                     sector_t hotspot_block_size,
1620                                     unsigned nr_hotspot_blocks)
1621 {
1622         return (hotspot_block_size * nr_hotspot_blocks) > origin_size;
1623 }
1624
1625 static void calc_hotspot_params(sector_t origin_size,
1626                                 sector_t cache_block_size,
1627                                 unsigned nr_cache_blocks,
1628                                 sector_t *hotspot_block_size,
1629                                 unsigned *nr_hotspot_blocks)
1630 {
1631         *hotspot_block_size = cache_block_size * 16u;
1632         *nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u);
1633
1634         while ((*hotspot_block_size > cache_block_size) &&
1635                too_many_hotspot_blocks(origin_size, *hotspot_block_size, *nr_hotspot_blocks))
1636                 *hotspot_block_size /= 2u;
1637 }
1638
1639 static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
1640                                           sector_t origin_size,
1641                                           sector_t cache_block_size)
1642 {
1643         unsigned i;
1644         unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
1645         unsigned total_sentinels = 2u * nr_sentinels_per_queue;
1646         struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
1647
1648         if (!mq)
1649                 return NULL;
1650
1651         init_policy_functions(mq);
1652         mq->cache_size = cache_size;
1653         mq->cache_block_size = cache_block_size;
1654
1655         calc_hotspot_params(origin_size, cache_block_size, from_cblock(cache_size),
1656                             &mq->hotspot_block_size, &mq->nr_hotspot_blocks);
1657
1658         mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size);
1659         mq->hotspot_level_jump = 1u;
1660         if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) {
1661                 DMERR("couldn't initialize entry space");
1662                 goto bad_pool_init;
1663         }
1664
1665         init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue);
1666         for (i = 0; i < nr_sentinels_per_queue; i++)
1667                 get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true;
1668
1669         init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels);
1670         for (i = 0; i < nr_sentinels_per_queue; i++)
1671                 get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true;
1672
1673         init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels,
1674                        total_sentinels + mq->nr_hotspot_blocks);
1675
1676         init_allocator(&mq->cache_alloc, &mq->es,
1677                        total_sentinels + mq->nr_hotspot_blocks,
1678                        total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size));
1679
1680         mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks);
1681         if (!mq->hotspot_hit_bits) {
1682                 DMERR("couldn't allocate hotspot hit bitset");
1683                 goto bad_hotspot_hit_bits;
1684         }
1685         clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
1686
1687         if (from_cblock(cache_size)) {
1688                 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
1689                 if (!mq->cache_hit_bits) {
1690                         DMERR("couldn't allocate cache hit bitset");
1691                         goto bad_cache_hit_bits;
1692                 }
1693                 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
1694         } else
1695                 mq->cache_hit_bits = NULL;
1696
1697         mq->tick_protected = 0;
1698         mq->tick = 0;
1699         mutex_init(&mq->lock);
1700         spin_lock_init(&mq->tick_lock);
1701
1702         q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
1703         mq->hotspot.nr_top_levels = 8;
1704         mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS,
1705                                            from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block);
1706
1707         q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS);
1708         q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS);
1709
1710         stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS);
1711         stats_init(&mq->cache_stats, NR_CACHE_LEVELS);
1712
1713         if (h_init(&mq->table, &mq->es, from_cblock(cache_size)))
1714                 goto bad_alloc_table;
1715
1716         if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks))
1717                 goto bad_alloc_hotspot_table;
1718
1719         sentinels_init(mq);
1720         mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS;
1721
1722         mq->next_hotspot_period = jiffies;
1723         mq->next_cache_period = jiffies;
1724
1725         return &mq->policy;
1726
1727 bad_alloc_hotspot_table:
1728         h_exit(&mq->table);
1729 bad_alloc_table:
1730         free_bitset(mq->cache_hit_bits);
1731 bad_cache_hit_bits:
1732         free_bitset(mq->hotspot_hit_bits);
1733 bad_hotspot_hit_bits:
1734         space_exit(&mq->es);
1735 bad_pool_init:
1736         kfree(mq);
1737
1738         return NULL;
1739 }
1740
1741 /*----------------------------------------------------------------*/
1742
1743 static struct dm_cache_policy_type smq_policy_type = {
1744         .name = "smq",
1745         .version = {1, 0, 0},
1746         .hint_size = 4,
1747         .owner = THIS_MODULE,
1748         .create = smq_create
1749 };
1750
1751 static struct dm_cache_policy_type default_policy_type = {
1752         .name = "default",
1753         .version = {1, 4, 0},
1754         .hint_size = 4,
1755         .owner = THIS_MODULE,
1756         .create = smq_create,
1757         .real = &smq_policy_type
1758 };
1759
1760 static int __init smq_init(void)
1761 {
1762         int r;
1763
1764         r = dm_cache_policy_register(&smq_policy_type);
1765         if (r) {
1766                 DMERR("register failed %d", r);
1767                 return -ENOMEM;
1768         }
1769
1770         r = dm_cache_policy_register(&default_policy_type);
1771         if (r) {
1772                 DMERR("register failed (as default) %d", r);
1773                 dm_cache_policy_unregister(&smq_policy_type);
1774                 return -ENOMEM;
1775         }
1776
1777         return 0;
1778 }
1779
1780 static void __exit smq_exit(void)
1781 {
1782         dm_cache_policy_unregister(&smq_policy_type);
1783         dm_cache_policy_unregister(&default_policy_type);
1784 }
1785
1786 module_init(smq_init);
1787 module_exit(smq_exit);
1788
1789 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1790 MODULE_LICENSE("GPL");
1791 MODULE_DESCRIPTION("smq cache policy");
1792
1793 MODULE_ALIAS("dm-cache-default");