Merge tag 'char-misc-6.4-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[linux.git] / drivers / md / dm-cache-policy-smq.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Red Hat. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include "dm-cache-background-tracker.h"
9 #include "dm-cache-policy-internal.h"
10 #include "dm-cache-policy.h"
11 #include "dm.h"
12
13 #include <linux/hash.h>
14 #include <linux/jiffies.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/vmalloc.h>
18 #include <linux/math64.h>
19
20 #define DM_MSG_PREFIX "cache-policy-smq"
21
22 /*----------------------------------------------------------------*/
23
24 /*
25  * Safe division functions that return zero on divide by zero.
26  */
27 static unsigned int safe_div(unsigned int n, unsigned int d)
28 {
29         return d ? n / d : 0u;
30 }
31
32 static unsigned int safe_mod(unsigned int n, unsigned int d)
33 {
34         return d ? n % d : 0u;
35 }
36
37 /*----------------------------------------------------------------*/
38
39 struct entry {
40         unsigned int hash_next:28;
41         unsigned int prev:28;
42         unsigned int next:28;
43         unsigned int level:6;
44         bool dirty:1;
45         bool allocated:1;
46         bool sentinel:1;
47         bool pending_work:1;
48
49         dm_oblock_t oblock;
50 };
51
52 /*----------------------------------------------------------------*/
53
54 #define INDEXER_NULL ((1u << 28u) - 1u)
55
56 /*
57  * An entry_space manages a set of entries that we use for the queues.
58  * The clean and dirty queues share entries, so this object is separate
59  * from the queue itself.
60  */
61 struct entry_space {
62         struct entry *begin;
63         struct entry *end;
64 };
65
66 static int space_init(struct entry_space *es, unsigned int nr_entries)
67 {
68         if (!nr_entries) {
69                 es->begin = es->end = NULL;
70                 return 0;
71         }
72
73         es->begin = vzalloc(array_size(nr_entries, sizeof(struct entry)));
74         if (!es->begin)
75                 return -ENOMEM;
76
77         es->end = es->begin + nr_entries;
78         return 0;
79 }
80
81 static void space_exit(struct entry_space *es)
82 {
83         vfree(es->begin);
84 }
85
86 static struct entry *__get_entry(struct entry_space *es, unsigned int block)
87 {
88         struct entry *e;
89
90         e = es->begin + block;
91         BUG_ON(e >= es->end);
92
93         return e;
94 }
95
96 static unsigned int to_index(struct entry_space *es, struct entry *e)
97 {
98         BUG_ON(e < es->begin || e >= es->end);
99         return e - es->begin;
100 }
101
102 static struct entry *to_entry(struct entry_space *es, unsigned int block)
103 {
104         if (block == INDEXER_NULL)
105                 return NULL;
106
107         return __get_entry(es, block);
108 }
109
110 /*----------------------------------------------------------------*/
111
112 struct ilist {
113         unsigned int nr_elts;   /* excluding sentinel entries */
114         unsigned int head, tail;
115 };
116
117 static void l_init(struct ilist *l)
118 {
119         l->nr_elts = 0;
120         l->head = l->tail = INDEXER_NULL;
121 }
122
123 static struct entry *l_head(struct entry_space *es, struct ilist *l)
124 {
125         return to_entry(es, l->head);
126 }
127
128 static struct entry *l_tail(struct entry_space *es, struct ilist *l)
129 {
130         return to_entry(es, l->tail);
131 }
132
133 static struct entry *l_next(struct entry_space *es, struct entry *e)
134 {
135         return to_entry(es, e->next);
136 }
137
138 static struct entry *l_prev(struct entry_space *es, struct entry *e)
139 {
140         return to_entry(es, e->prev);
141 }
142
143 static bool l_empty(struct ilist *l)
144 {
145         return l->head == INDEXER_NULL;
146 }
147
148 static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e)
149 {
150         struct entry *head = l_head(es, l);
151
152         e->next = l->head;
153         e->prev = INDEXER_NULL;
154
155         if (head)
156                 head->prev = l->head = to_index(es, e);
157         else
158                 l->head = l->tail = to_index(es, e);
159
160         if (!e->sentinel)
161                 l->nr_elts++;
162 }
163
164 static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e)
165 {
166         struct entry *tail = l_tail(es, l);
167
168         e->next = INDEXER_NULL;
169         e->prev = l->tail;
170
171         if (tail)
172                 tail->next = l->tail = to_index(es, e);
173         else
174                 l->head = l->tail = to_index(es, e);
175
176         if (!e->sentinel)
177                 l->nr_elts++;
178 }
179
180 static void l_add_before(struct entry_space *es, struct ilist *l,
181                          struct entry *old, struct entry *e)
182 {
183         struct entry *prev = l_prev(es, old);
184
185         if (!prev)
186                 l_add_head(es, l, e);
187
188         else {
189                 e->prev = old->prev;
190                 e->next = to_index(es, old);
191                 prev->next = old->prev = to_index(es, e);
192
193                 if (!e->sentinel)
194                         l->nr_elts++;
195         }
196 }
197
198 static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
199 {
200         struct entry *prev = l_prev(es, e);
201         struct entry *next = l_next(es, e);
202
203         if (prev)
204                 prev->next = e->next;
205         else
206                 l->head = e->next;
207
208         if (next)
209                 next->prev = e->prev;
210         else
211                 l->tail = e->prev;
212
213         if (!e->sentinel)
214                 l->nr_elts--;
215 }
216
217 static struct entry *l_pop_head(struct entry_space *es, struct ilist *l)
218 {
219         struct entry *e;
220
221         for (e = l_head(es, l); e; e = l_next(es, e))
222                 if (!e->sentinel) {
223                         l_del(es, l, e);
224                         return e;
225                 }
226
227         return NULL;
228 }
229
230 static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
231 {
232         struct entry *e;
233
234         for (e = l_tail(es, l); e; e = l_prev(es, e))
235                 if (!e->sentinel) {
236                         l_del(es, l, e);
237                         return e;
238                 }
239
240         return NULL;
241 }
242
243 /*----------------------------------------------------------------*/
244
245 /*
246  * The stochastic-multi-queue is a set of lru lists stacked into levels.
247  * Entries are moved up levels when they are used, which loosely orders the
248  * most accessed entries in the top levels and least in the bottom.  This
249  * structure is *much* better than a single lru list.
250  */
251 #define MAX_LEVELS 64u
252
253 struct queue {
254         struct entry_space *es;
255
256         unsigned int nr_elts;
257         unsigned int nr_levels;
258         struct ilist qs[MAX_LEVELS];
259
260         /*
261          * We maintain a count of the number of entries we would like in each
262          * level.
263          */
264         unsigned int last_target_nr_elts;
265         unsigned int nr_top_levels;
266         unsigned int nr_in_top_levels;
267         unsigned int target_count[MAX_LEVELS];
268 };
269
270 static void q_init(struct queue *q, struct entry_space *es, unsigned int nr_levels)
271 {
272         unsigned int i;
273
274         q->es = es;
275         q->nr_elts = 0;
276         q->nr_levels = nr_levels;
277
278         for (i = 0; i < q->nr_levels; i++) {
279                 l_init(q->qs + i);
280                 q->target_count[i] = 0u;
281         }
282
283         q->last_target_nr_elts = 0u;
284         q->nr_top_levels = 0u;
285         q->nr_in_top_levels = 0u;
286 }
287
288 static unsigned int q_size(struct queue *q)
289 {
290         return q->nr_elts;
291 }
292
293 /*
294  * Insert an entry to the back of the given level.
295  */
296 static void q_push(struct queue *q, struct entry *e)
297 {
298         BUG_ON(e->pending_work);
299
300         if (!e->sentinel)
301                 q->nr_elts++;
302
303         l_add_tail(q->es, q->qs + e->level, e);
304 }
305
306 static void q_push_front(struct queue *q, struct entry *e)
307 {
308         BUG_ON(e->pending_work);
309
310         if (!e->sentinel)
311                 q->nr_elts++;
312
313         l_add_head(q->es, q->qs + e->level, e);
314 }
315
316 static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
317 {
318         BUG_ON(e->pending_work);
319
320         if (!e->sentinel)
321                 q->nr_elts++;
322
323         l_add_before(q->es, q->qs + e->level, old, e);
324 }
325
326 static void q_del(struct queue *q, struct entry *e)
327 {
328         l_del(q->es, q->qs + e->level, e);
329         if (!e->sentinel)
330                 q->nr_elts--;
331 }
332
333 /*
334  * Return the oldest entry of the lowest populated level.
335  */
336 static struct entry *q_peek(struct queue *q, unsigned int max_level, bool can_cross_sentinel)
337 {
338         unsigned int level;
339         struct entry *e;
340
341         max_level = min(max_level, q->nr_levels);
342
343         for (level = 0; level < max_level; level++)
344                 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
345                         if (e->sentinel) {
346                                 if (can_cross_sentinel)
347                                         continue;
348                                 else
349                                         break;
350                         }
351
352                         return e;
353                 }
354
355         return NULL;
356 }
357
358 static struct entry *q_pop(struct queue *q)
359 {
360         struct entry *e = q_peek(q, q->nr_levels, true);
361
362         if (e)
363                 q_del(q, e);
364
365         return e;
366 }
367
368 /*
369  * This function assumes there is a non-sentinel entry to pop.  It's only
370  * used by redistribute, so we know this is true.  It also doesn't adjust
371  * the q->nr_elts count.
372  */
373 static struct entry *__redist_pop_from(struct queue *q, unsigned int level)
374 {
375         struct entry *e;
376
377         for (; level < q->nr_levels; level++)
378                 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e))
379                         if (!e->sentinel) {
380                                 l_del(q->es, q->qs + e->level, e);
381                                 return e;
382                         }
383
384         return NULL;
385 }
386
387 static void q_set_targets_subrange_(struct queue *q, unsigned int nr_elts,
388                                     unsigned int lbegin, unsigned int lend)
389 {
390         unsigned int level, nr_levels, entries_per_level, remainder;
391
392         BUG_ON(lbegin > lend);
393         BUG_ON(lend > q->nr_levels);
394         nr_levels = lend - lbegin;
395         entries_per_level = safe_div(nr_elts, nr_levels);
396         remainder = safe_mod(nr_elts, nr_levels);
397
398         for (level = lbegin; level < lend; level++)
399                 q->target_count[level] =
400                         (level < (lbegin + remainder)) ? entries_per_level + 1u : entries_per_level;
401 }
402
403 /*
404  * Typically we have fewer elements in the top few levels which allows us
405  * to adjust the promote threshold nicely.
406  */
407 static void q_set_targets(struct queue *q)
408 {
409         if (q->last_target_nr_elts == q->nr_elts)
410                 return;
411
412         q->last_target_nr_elts = q->nr_elts;
413
414         if (q->nr_top_levels > q->nr_levels)
415                 q_set_targets_subrange_(q, q->nr_elts, 0, q->nr_levels);
416
417         else {
418                 q_set_targets_subrange_(q, q->nr_in_top_levels,
419                                         q->nr_levels - q->nr_top_levels, q->nr_levels);
420
421                 if (q->nr_in_top_levels < q->nr_elts)
422                         q_set_targets_subrange_(q, q->nr_elts - q->nr_in_top_levels,
423                                                 0, q->nr_levels - q->nr_top_levels);
424                 else
425                         q_set_targets_subrange_(q, 0, 0, q->nr_levels - q->nr_top_levels);
426         }
427 }
428
429 static void q_redistribute(struct queue *q)
430 {
431         unsigned int target, level;
432         struct ilist *l, *l_above;
433         struct entry *e;
434
435         q_set_targets(q);
436
437         for (level = 0u; level < q->nr_levels - 1u; level++) {
438                 l = q->qs + level;
439                 target = q->target_count[level];
440
441                 /*
442                  * Pull down some entries from the level above.
443                  */
444                 while (l->nr_elts < target) {
445                         e = __redist_pop_from(q, level + 1u);
446                         if (!e) {
447                                 /* bug in nr_elts */
448                                 break;
449                         }
450
451                         e->level = level;
452                         l_add_tail(q->es, l, e);
453                 }
454
455                 /*
456                  * Push some entries up.
457                  */
458                 l_above = q->qs + level + 1u;
459                 while (l->nr_elts > target) {
460                         e = l_pop_tail(q->es, l);
461
462                         if (!e)
463                                 /* bug in nr_elts */
464                                 break;
465
466                         e->level = level + 1u;
467                         l_add_tail(q->es, l_above, e);
468                 }
469         }
470 }
471
472 static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels,
473                       struct entry *s1, struct entry *s2)
474 {
475         struct entry *de;
476         unsigned int sentinels_passed = 0;
477         unsigned int new_level = min(q->nr_levels - 1u, e->level + extra_levels);
478
479         /* try and find an entry to swap with */
480         if (extra_levels && (e->level < q->nr_levels - 1u)) {
481                 for (de = l_head(q->es, q->qs + new_level); de && de->sentinel; de = l_next(q->es, de))
482                         sentinels_passed++;
483
484                 if (de) {
485                         q_del(q, de);
486                         de->level = e->level;
487                         if (s1) {
488                                 switch (sentinels_passed) {
489                                 case 0:
490                                         q_push_before(q, s1, de);
491                                         break;
492
493                                 case 1:
494                                         q_push_before(q, s2, de);
495                                         break;
496
497                                 default:
498                                         q_push(q, de);
499                                 }
500                         } else
501                                 q_push(q, de);
502                 }
503         }
504
505         q_del(q, e);
506         e->level = new_level;
507         q_push(q, e);
508 }
509
510 /*----------------------------------------------------------------*/
511
512 #define FP_SHIFT 8
513 #define SIXTEENTH (1u << (FP_SHIFT - 4u))
514 #define EIGHTH (1u << (FP_SHIFT - 3u))
515
516 struct stats {
517         unsigned int hit_threshold;
518         unsigned int hits;
519         unsigned int misses;
520 };
521
522 enum performance {
523         Q_POOR,
524         Q_FAIR,
525         Q_WELL
526 };
527
528 static void stats_init(struct stats *s, unsigned int nr_levels)
529 {
530         s->hit_threshold = (nr_levels * 3u) / 4u;
531         s->hits = 0u;
532         s->misses = 0u;
533 }
534
535 static void stats_reset(struct stats *s)
536 {
537         s->hits = s->misses = 0u;
538 }
539
540 static void stats_level_accessed(struct stats *s, unsigned int level)
541 {
542         if (level >= s->hit_threshold)
543                 s->hits++;
544         else
545                 s->misses++;
546 }
547
548 static void stats_miss(struct stats *s)
549 {
550         s->misses++;
551 }
552
553 /*
554  * There are times when we don't have any confidence in the hotspot queue.
555  * Such as when a fresh cache is created and the blocks have been spread
556  * out across the levels, or if an io load changes.  We detect this by
557  * seeing how often a lookup is in the top levels of the hotspot queue.
558  */
559 static enum performance stats_assess(struct stats *s)
560 {
561         unsigned int confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
562
563         if (confidence < SIXTEENTH)
564                 return Q_POOR;
565
566         else if (confidence < EIGHTH)
567                 return Q_FAIR;
568
569         else
570                 return Q_WELL;
571 }
572
573 /*----------------------------------------------------------------*/
574
575 struct smq_hash_table {
576         struct entry_space *es;
577         unsigned long long hash_bits;
578         unsigned int *buckets;
579 };
580
581 /*
582  * All cache entries are stored in a chained hash table.  To save space we
583  * use indexing again, and only store indexes to the next entry.
584  */
585 static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned int nr_entries)
586 {
587         unsigned int i, nr_buckets;
588
589         ht->es = es;
590         nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
591         ht->hash_bits = __ffs(nr_buckets);
592
593         ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets)));
594         if (!ht->buckets)
595                 return -ENOMEM;
596
597         for (i = 0; i < nr_buckets; i++)
598                 ht->buckets[i] = INDEXER_NULL;
599
600         return 0;
601 }
602
603 static void h_exit(struct smq_hash_table *ht)
604 {
605         vfree(ht->buckets);
606 }
607
608 static struct entry *h_head(struct smq_hash_table *ht, unsigned int bucket)
609 {
610         return to_entry(ht->es, ht->buckets[bucket]);
611 }
612
613 static struct entry *h_next(struct smq_hash_table *ht, struct entry *e)
614 {
615         return to_entry(ht->es, e->hash_next);
616 }
617
618 static void __h_insert(struct smq_hash_table *ht, unsigned int bucket, struct entry *e)
619 {
620         e->hash_next = ht->buckets[bucket];
621         ht->buckets[bucket] = to_index(ht->es, e);
622 }
623
624 static void h_insert(struct smq_hash_table *ht, struct entry *e)
625 {
626         unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
627
628         __h_insert(ht, h, e);
629 }
630
631 static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned int h, dm_oblock_t oblock,
632                                 struct entry **prev)
633 {
634         struct entry *e;
635
636         *prev = NULL;
637         for (e = h_head(ht, h); e; e = h_next(ht, e)) {
638                 if (e->oblock == oblock)
639                         return e;
640
641                 *prev = e;
642         }
643
644         return NULL;
645 }
646
647 static void __h_unlink(struct smq_hash_table *ht, unsigned int h,
648                        struct entry *e, struct entry *prev)
649 {
650         if (prev)
651                 prev->hash_next = e->hash_next;
652         else
653                 ht->buckets[h] = e->hash_next;
654 }
655
656 /*
657  * Also moves each entry to the front of the bucket.
658  */
659 static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
660 {
661         struct entry *e, *prev;
662         unsigned int h = hash_64(from_oblock(oblock), ht->hash_bits);
663
664         e = __h_lookup(ht, h, oblock, &prev);
665         if (e && prev) {
666                 /*
667                  * Move to the front because this entry is likely
668                  * to be hit again.
669                  */
670                 __h_unlink(ht, h, e, prev);
671                 __h_insert(ht, h, e);
672         }
673
674         return e;
675 }
676
677 static void h_remove(struct smq_hash_table *ht, struct entry *e)
678 {
679         unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
680         struct entry *prev;
681
682         /*
683          * The down side of using a singly linked list is we have to
684          * iterate the bucket to remove an item.
685          */
686         e = __h_lookup(ht, h, e->oblock, &prev);
687         if (e)
688                 __h_unlink(ht, h, e, prev);
689 }
690
691 /*----------------------------------------------------------------*/
692
693 struct entry_alloc {
694         struct entry_space *es;
695         unsigned int begin;
696
697         unsigned int nr_allocated;
698         struct ilist free;
699 };
700
701 static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
702                            unsigned int begin, unsigned int end)
703 {
704         unsigned int i;
705
706         ea->es = es;
707         ea->nr_allocated = 0u;
708         ea->begin = begin;
709
710         l_init(&ea->free);
711         for (i = begin; i != end; i++)
712                 l_add_tail(ea->es, &ea->free, __get_entry(ea->es, i));
713 }
714
715 static void init_entry(struct entry *e)
716 {
717         /*
718          * We can't memset because that would clear the hotspot and
719          * sentinel bits which remain constant.
720          */
721         e->hash_next = INDEXER_NULL;
722         e->next = INDEXER_NULL;
723         e->prev = INDEXER_NULL;
724         e->level = 0u;
725         e->dirty = true;        /* FIXME: audit */
726         e->allocated = true;
727         e->sentinel = false;
728         e->pending_work = false;
729 }
730
731 static struct entry *alloc_entry(struct entry_alloc *ea)
732 {
733         struct entry *e;
734
735         if (l_empty(&ea->free))
736                 return NULL;
737
738         e = l_pop_head(ea->es, &ea->free);
739         init_entry(e);
740         ea->nr_allocated++;
741
742         return e;
743 }
744
745 /*
746  * This assumes the cblock hasn't already been allocated.
747  */
748 static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned int i)
749 {
750         struct entry *e = __get_entry(ea->es, ea->begin + i);
751
752         BUG_ON(e->allocated);
753
754         l_del(ea->es, &ea->free, e);
755         init_entry(e);
756         ea->nr_allocated++;
757
758         return e;
759 }
760
761 static void free_entry(struct entry_alloc *ea, struct entry *e)
762 {
763         BUG_ON(!ea->nr_allocated);
764         BUG_ON(!e->allocated);
765
766         ea->nr_allocated--;
767         e->allocated = false;
768         l_add_tail(ea->es, &ea->free, e);
769 }
770
771 static bool allocator_empty(struct entry_alloc *ea)
772 {
773         return l_empty(&ea->free);
774 }
775
776 static unsigned int get_index(struct entry_alloc *ea, struct entry *e)
777 {
778         return to_index(ea->es, e) - ea->begin;
779 }
780
781 static struct entry *get_entry(struct entry_alloc *ea, unsigned int index)
782 {
783         return __get_entry(ea->es, ea->begin + index);
784 }
785
786 /*----------------------------------------------------------------*/
787
788 #define NR_HOTSPOT_LEVELS 64u
789 #define NR_CACHE_LEVELS 64u
790
791 #define WRITEBACK_PERIOD (10ul * HZ)
792 #define DEMOTE_PERIOD (60ul * HZ)
793
794 #define HOTSPOT_UPDATE_PERIOD (HZ)
795 #define CACHE_UPDATE_PERIOD (60ul * HZ)
796
797 struct smq_policy {
798         struct dm_cache_policy policy;
799
800         /* protects everything */
801         spinlock_t lock;
802         dm_cblock_t cache_size;
803         sector_t cache_block_size;
804
805         sector_t hotspot_block_size;
806         unsigned int nr_hotspot_blocks;
807         unsigned int cache_blocks_per_hotspot_block;
808         unsigned int hotspot_level_jump;
809
810         struct entry_space es;
811         struct entry_alloc writeback_sentinel_alloc;
812         struct entry_alloc demote_sentinel_alloc;
813         struct entry_alloc hotspot_alloc;
814         struct entry_alloc cache_alloc;
815
816         unsigned long *hotspot_hit_bits;
817         unsigned long *cache_hit_bits;
818
819         /*
820          * We maintain three queues of entries.  The cache proper,
821          * consisting of a clean and dirty queue, containing the currently
822          * active mappings.  The hotspot queue uses a larger block size to
823          * track blocks that are being hit frequently and potential
824          * candidates for promotion to the cache.
825          */
826         struct queue hotspot;
827         struct queue clean;
828         struct queue dirty;
829
830         struct stats hotspot_stats;
831         struct stats cache_stats;
832
833         /*
834          * Keeps track of time, incremented by the core.  We use this to
835          * avoid attributing multiple hits within the same tick.
836          */
837         unsigned int tick;
838
839         /*
840          * The hash tables allows us to quickly find an entry by origin
841          * block.
842          */
843         struct smq_hash_table table;
844         struct smq_hash_table hotspot_table;
845
846         bool current_writeback_sentinels;
847         unsigned long next_writeback_period;
848
849         bool current_demote_sentinels;
850         unsigned long next_demote_period;
851
852         unsigned int write_promote_level;
853         unsigned int read_promote_level;
854
855         unsigned long next_hotspot_period;
856         unsigned long next_cache_period;
857
858         struct background_tracker *bg_work;
859
860         bool migrations_allowed;
861 };
862
863 /*----------------------------------------------------------------*/
864
865 static struct entry *get_sentinel(struct entry_alloc *ea, unsigned int level, bool which)
866 {
867         return get_entry(ea, which ? level : NR_CACHE_LEVELS + level);
868 }
869
870 static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned int level)
871 {
872         return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
873 }
874
875 static struct entry *demote_sentinel(struct smq_policy *mq, unsigned int level)
876 {
877         return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
878 }
879
880 static void __update_writeback_sentinels(struct smq_policy *mq)
881 {
882         unsigned int level;
883         struct queue *q = &mq->dirty;
884         struct entry *sentinel;
885
886         for (level = 0; level < q->nr_levels; level++) {
887                 sentinel = writeback_sentinel(mq, level);
888                 q_del(q, sentinel);
889                 q_push(q, sentinel);
890         }
891 }
892
893 static void __update_demote_sentinels(struct smq_policy *mq)
894 {
895         unsigned int level;
896         struct queue *q = &mq->clean;
897         struct entry *sentinel;
898
899         for (level = 0; level < q->nr_levels; level++) {
900                 sentinel = demote_sentinel(mq, level);
901                 q_del(q, sentinel);
902                 q_push(q, sentinel);
903         }
904 }
905
906 static void update_sentinels(struct smq_policy *mq)
907 {
908         if (time_after(jiffies, mq->next_writeback_period)) {
909                 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
910                 mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
911                 __update_writeback_sentinels(mq);
912         }
913
914         if (time_after(jiffies, mq->next_demote_period)) {
915                 mq->next_demote_period = jiffies + DEMOTE_PERIOD;
916                 mq->current_demote_sentinels = !mq->current_demote_sentinels;
917                 __update_demote_sentinels(mq);
918         }
919 }
920
921 static void __sentinels_init(struct smq_policy *mq)
922 {
923         unsigned int level;
924         struct entry *sentinel;
925
926         for (level = 0; level < NR_CACHE_LEVELS; level++) {
927                 sentinel = writeback_sentinel(mq, level);
928                 sentinel->level = level;
929                 q_push(&mq->dirty, sentinel);
930
931                 sentinel = demote_sentinel(mq, level);
932                 sentinel->level = level;
933                 q_push(&mq->clean, sentinel);
934         }
935 }
936
937 static void sentinels_init(struct smq_policy *mq)
938 {
939         mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
940         mq->next_demote_period = jiffies + DEMOTE_PERIOD;
941
942         mq->current_writeback_sentinels = false;
943         mq->current_demote_sentinels = false;
944         __sentinels_init(mq);
945
946         mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
947         mq->current_demote_sentinels = !mq->current_demote_sentinels;
948         __sentinels_init(mq);
949 }
950
951 /*----------------------------------------------------------------*/
952
953 static void del_queue(struct smq_policy *mq, struct entry *e)
954 {
955         q_del(e->dirty ? &mq->dirty : &mq->clean, e);
956 }
957
958 static void push_queue(struct smq_policy *mq, struct entry *e)
959 {
960         if (e->dirty)
961                 q_push(&mq->dirty, e);
962         else
963                 q_push(&mq->clean, e);
964 }
965
966 // !h, !q, a -> h, q, a
967 static void push(struct smq_policy *mq, struct entry *e)
968 {
969         h_insert(&mq->table, e);
970         if (!e->pending_work)
971                 push_queue(mq, e);
972 }
973
974 static void push_queue_front(struct smq_policy *mq, struct entry *e)
975 {
976         if (e->dirty)
977                 q_push_front(&mq->dirty, e);
978         else
979                 q_push_front(&mq->clean, e);
980 }
981
982 static void push_front(struct smq_policy *mq, struct entry *e)
983 {
984         h_insert(&mq->table, e);
985         if (!e->pending_work)
986                 push_queue_front(mq, e);
987 }
988
989 static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e)
990 {
991         return to_cblock(get_index(&mq->cache_alloc, e));
992 }
993
994 static void requeue(struct smq_policy *mq, struct entry *e)
995 {
996         /*
997          * Pending work has temporarily been taken out of the queues.
998          */
999         if (e->pending_work)
1000                 return;
1001
1002         if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) {
1003                 if (!e->dirty) {
1004                         q_requeue(&mq->clean, e, 1u, NULL, NULL);
1005                         return;
1006                 }
1007
1008                 q_requeue(&mq->dirty, e, 1u,
1009                           get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels),
1010                           get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels));
1011         }
1012 }
1013
1014 static unsigned int default_promote_level(struct smq_policy *mq)
1015 {
1016         /*
1017          * The promote level depends on the current performance of the
1018          * cache.
1019          *
1020          * If the cache is performing badly, then we can't afford
1021          * to promote much without causing performance to drop below that
1022          * of the origin device.
1023          *
1024          * If the cache is performing well, then we don't need to promote
1025          * much.  If it isn't broken, don't fix it.
1026          *
1027          * If the cache is middling then we promote more.
1028          *
1029          * This scheme reminds me of a graph of entropy vs probability of a
1030          * binary variable.
1031          */
1032         static const unsigned int table[] = {
1033                 1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1
1034         };
1035
1036         unsigned int hits = mq->cache_stats.hits;
1037         unsigned int misses = mq->cache_stats.misses;
1038         unsigned int index = safe_div(hits << 4u, hits + misses);
1039         return table[index];
1040 }
1041
1042 static void update_promote_levels(struct smq_policy *mq)
1043 {
1044         /*
1045          * If there are unused cache entries then we want to be really
1046          * eager to promote.
1047          */
1048         unsigned int threshold_level = allocator_empty(&mq->cache_alloc) ?
1049                 default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
1050
1051         threshold_level = max(threshold_level, NR_HOTSPOT_LEVELS);
1052
1053         /*
1054          * If the hotspot queue is performing badly then we have little
1055          * confidence that we know which blocks to promote.  So we cut down
1056          * the amount of promotions.
1057          */
1058         switch (stats_assess(&mq->hotspot_stats)) {
1059         case Q_POOR:
1060                 threshold_level /= 4u;
1061                 break;
1062
1063         case Q_FAIR:
1064                 threshold_level /= 2u;
1065                 break;
1066
1067         case Q_WELL:
1068                 break;
1069         }
1070
1071         mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level;
1072         mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level);
1073 }
1074
1075 /*
1076  * If the hotspot queue is performing badly, then we try and move entries
1077  * around more quickly.
1078  */
1079 static void update_level_jump(struct smq_policy *mq)
1080 {
1081         switch (stats_assess(&mq->hotspot_stats)) {
1082         case Q_POOR:
1083                 mq->hotspot_level_jump = 4u;
1084                 break;
1085
1086         case Q_FAIR:
1087                 mq->hotspot_level_jump = 2u;
1088                 break;
1089
1090         case Q_WELL:
1091                 mq->hotspot_level_jump = 1u;
1092                 break;
1093         }
1094 }
1095
1096 static void end_hotspot_period(struct smq_policy *mq)
1097 {
1098         clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
1099         update_promote_levels(mq);
1100
1101         if (time_after(jiffies, mq->next_hotspot_period)) {
1102                 update_level_jump(mq);
1103                 q_redistribute(&mq->hotspot);
1104                 stats_reset(&mq->hotspot_stats);
1105                 mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD;
1106         }
1107 }
1108
1109 static void end_cache_period(struct smq_policy *mq)
1110 {
1111         if (time_after(jiffies, mq->next_cache_period)) {
1112                 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
1113
1114                 q_redistribute(&mq->dirty);
1115                 q_redistribute(&mq->clean);
1116                 stats_reset(&mq->cache_stats);
1117
1118                 mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD;
1119         }
1120 }
1121
1122 /*----------------------------------------------------------------*/
1123
1124 /*
1125  * Targets are given as a percentage.
1126  */
1127 #define CLEAN_TARGET 25u
1128 #define FREE_TARGET 25u
1129
1130 static unsigned int percent_to_target(struct smq_policy *mq, unsigned int p)
1131 {
1132         return from_cblock(mq->cache_size) * p / 100u;
1133 }
1134
1135 static bool clean_target_met(struct smq_policy *mq, bool idle)
1136 {
1137         /*
1138          * Cache entries may not be populated.  So we cannot rely on the
1139          * size of the clean queue.
1140          */
1141         if (idle) {
1142                 /*
1143                  * We'd like to clean everything.
1144                  */
1145                 return q_size(&mq->dirty) == 0u;
1146         }
1147
1148         /*
1149          * If we're busy we don't worry about cleaning at all.
1150          */
1151         return true;
1152 }
1153
1154 static bool free_target_met(struct smq_policy *mq)
1155 {
1156         unsigned int nr_free;
1157
1158         nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
1159         return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
1160                 percent_to_target(mq, FREE_TARGET);
1161 }
1162
1163 /*----------------------------------------------------------------*/
1164
1165 static void mark_pending(struct smq_policy *mq, struct entry *e)
1166 {
1167         BUG_ON(e->sentinel);
1168         BUG_ON(!e->allocated);
1169         BUG_ON(e->pending_work);
1170         e->pending_work = true;
1171 }
1172
1173 static void clear_pending(struct smq_policy *mq, struct entry *e)
1174 {
1175         BUG_ON(!e->pending_work);
1176         e->pending_work = false;
1177 }
1178
1179 static void queue_writeback(struct smq_policy *mq, bool idle)
1180 {
1181         int r;
1182         struct policy_work work;
1183         struct entry *e;
1184
1185         e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle);
1186         if (e) {
1187                 mark_pending(mq, e);
1188                 q_del(&mq->dirty, e);
1189
1190                 work.op = POLICY_WRITEBACK;
1191                 work.oblock = e->oblock;
1192                 work.cblock = infer_cblock(mq, e);
1193
1194                 r = btracker_queue(mq->bg_work, &work, NULL);
1195                 if (r) {
1196                         clear_pending(mq, e);
1197                         q_push_front(&mq->dirty, e);
1198                 }
1199         }
1200 }
1201
1202 static void queue_demotion(struct smq_policy *mq)
1203 {
1204         int r;
1205         struct policy_work work;
1206         struct entry *e;
1207
1208         if (WARN_ON_ONCE(!mq->migrations_allowed))
1209                 return;
1210
1211         e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
1212         if (!e) {
1213                 if (!clean_target_met(mq, true))
1214                         queue_writeback(mq, false);
1215                 return;
1216         }
1217
1218         mark_pending(mq, e);
1219         q_del(&mq->clean, e);
1220
1221         work.op = POLICY_DEMOTE;
1222         work.oblock = e->oblock;
1223         work.cblock = infer_cblock(mq, e);
1224         r = btracker_queue(mq->bg_work, &work, NULL);
1225         if (r) {
1226                 clear_pending(mq, e);
1227                 q_push_front(&mq->clean, e);
1228         }
1229 }
1230
1231 static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
1232                             struct policy_work **workp)
1233 {
1234         int r;
1235         struct entry *e;
1236         struct policy_work work;
1237
1238         if (!mq->migrations_allowed)
1239                 return;
1240
1241         if (allocator_empty(&mq->cache_alloc)) {
1242                 /*
1243                  * We always claim to be 'idle' to ensure some demotions happen
1244                  * with continuous loads.
1245                  */
1246                 if (!free_target_met(mq))
1247                         queue_demotion(mq);
1248                 return;
1249         }
1250
1251         if (btracker_promotion_already_present(mq->bg_work, oblock))
1252                 return;
1253
1254         /*
1255          * We allocate the entry now to reserve the cblock.  If the
1256          * background work is aborted we must remember to free it.
1257          */
1258         e = alloc_entry(&mq->cache_alloc);
1259         BUG_ON(!e);
1260         e->pending_work = true;
1261         work.op = POLICY_PROMOTE;
1262         work.oblock = oblock;
1263         work.cblock = infer_cblock(mq, e);
1264         r = btracker_queue(mq->bg_work, &work, workp);
1265         if (r)
1266                 free_entry(&mq->cache_alloc, e);
1267 }
1268
1269 /*----------------------------------------------------------------*/
1270
1271 enum promote_result {
1272         PROMOTE_NOT,
1273         PROMOTE_TEMPORARY,
1274         PROMOTE_PERMANENT
1275 };
1276
1277 /*
1278  * Converts a boolean into a promote result.
1279  */
1280 static enum promote_result maybe_promote(bool promote)
1281 {
1282         return promote ? PROMOTE_PERMANENT : PROMOTE_NOT;
1283 }
1284
1285 static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e,
1286                                           int data_dir, bool fast_promote)
1287 {
1288         if (data_dir == WRITE) {
1289                 if (!allocator_empty(&mq->cache_alloc) && fast_promote)
1290                         return PROMOTE_TEMPORARY;
1291
1292                 return maybe_promote(hs_e->level >= mq->write_promote_level);
1293         } else
1294                 return maybe_promote(hs_e->level >= mq->read_promote_level);
1295 }
1296
1297 static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
1298 {
1299         sector_t r = from_oblock(b);
1300         (void) sector_div(r, mq->cache_blocks_per_hotspot_block);
1301         return to_oblock(r);
1302 }
1303
1304 static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b)
1305 {
1306         unsigned int hi;
1307         dm_oblock_t hb = to_hblock(mq, b);
1308         struct entry *e = h_lookup(&mq->hotspot_table, hb);
1309
1310         if (e) {
1311                 stats_level_accessed(&mq->hotspot_stats, e->level);
1312
1313                 hi = get_index(&mq->hotspot_alloc, e);
1314                 q_requeue(&mq->hotspot, e,
1315                           test_and_set_bit(hi, mq->hotspot_hit_bits) ?
1316                           0u : mq->hotspot_level_jump,
1317                           NULL, NULL);
1318
1319         } else {
1320                 stats_miss(&mq->hotspot_stats);
1321
1322                 e = alloc_entry(&mq->hotspot_alloc);
1323                 if (!e) {
1324                         e = q_pop(&mq->hotspot);
1325                         if (e) {
1326                                 h_remove(&mq->hotspot_table, e);
1327                                 hi = get_index(&mq->hotspot_alloc, e);
1328                                 clear_bit(hi, mq->hotspot_hit_bits);
1329                         }
1330
1331                 }
1332
1333                 if (e) {
1334                         e->oblock = hb;
1335                         q_push(&mq->hotspot, e);
1336                         h_insert(&mq->hotspot_table, e);
1337                 }
1338         }
1339
1340         return e;
1341 }
1342
1343 /*----------------------------------------------------------------*/
1344
1345 /*
1346  * Public interface, via the policy struct.  See dm-cache-policy.h for a
1347  * description of these.
1348  */
1349
1350 static struct smq_policy *to_smq_policy(struct dm_cache_policy *p)
1351 {
1352         return container_of(p, struct smq_policy, policy);
1353 }
1354
1355 static void smq_destroy(struct dm_cache_policy *p)
1356 {
1357         struct smq_policy *mq = to_smq_policy(p);
1358
1359         btracker_destroy(mq->bg_work);
1360         h_exit(&mq->hotspot_table);
1361         h_exit(&mq->table);
1362         free_bitset(mq->hotspot_hit_bits);
1363         free_bitset(mq->cache_hit_bits);
1364         space_exit(&mq->es);
1365         kfree(mq);
1366 }
1367
1368 /*----------------------------------------------------------------*/
1369
1370 static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock,
1371                     int data_dir, bool fast_copy,
1372                     struct policy_work **work, bool *background_work)
1373 {
1374         struct entry *e, *hs_e;
1375         enum promote_result pr;
1376
1377         *background_work = false;
1378
1379         e = h_lookup(&mq->table, oblock);
1380         if (e) {
1381                 stats_level_accessed(&mq->cache_stats, e->level);
1382
1383                 requeue(mq, e);
1384                 *cblock = infer_cblock(mq, e);
1385                 return 0;
1386
1387         } else {
1388                 stats_miss(&mq->cache_stats);
1389
1390                 /*
1391                  * The hotspot queue only gets updated with misses.
1392                  */
1393                 hs_e = update_hotspot_queue(mq, oblock);
1394
1395                 pr = should_promote(mq, hs_e, data_dir, fast_copy);
1396                 if (pr != PROMOTE_NOT) {
1397                         queue_promotion(mq, oblock, work);
1398                         *background_work = true;
1399                 }
1400
1401                 return -ENOENT;
1402         }
1403 }
1404
1405 static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
1406                       int data_dir, bool fast_copy,
1407                       bool *background_work)
1408 {
1409         int r;
1410         unsigned long flags;
1411         struct smq_policy *mq = to_smq_policy(p);
1412
1413         spin_lock_irqsave(&mq->lock, flags);
1414         r = __lookup(mq, oblock, cblock,
1415                      data_dir, fast_copy,
1416                      NULL, background_work);
1417         spin_unlock_irqrestore(&mq->lock, flags);
1418
1419         return r;
1420 }
1421
1422 static int smq_lookup_with_work(struct dm_cache_policy *p,
1423                                 dm_oblock_t oblock, dm_cblock_t *cblock,
1424                                 int data_dir, bool fast_copy,
1425                                 struct policy_work **work)
1426 {
1427         int r;
1428         bool background_queued;
1429         unsigned long flags;
1430         struct smq_policy *mq = to_smq_policy(p);
1431
1432         spin_lock_irqsave(&mq->lock, flags);
1433         r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued);
1434         spin_unlock_irqrestore(&mq->lock, flags);
1435
1436         return r;
1437 }
1438
1439 static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
1440                                    struct policy_work **result)
1441 {
1442         int r;
1443         unsigned long flags;
1444         struct smq_policy *mq = to_smq_policy(p);
1445
1446         spin_lock_irqsave(&mq->lock, flags);
1447         r = btracker_issue(mq->bg_work, result);
1448         if (r == -ENODATA) {
1449                 if (!clean_target_met(mq, idle)) {
1450                         queue_writeback(mq, idle);
1451                         r = btracker_issue(mq->bg_work, result);
1452                 }
1453         }
1454         spin_unlock_irqrestore(&mq->lock, flags);
1455
1456         return r;
1457 }
1458
1459 /*
1460  * We need to clear any pending work flags that have been set, and in the
1461  * case of promotion free the entry for the destination cblock.
1462  */
1463 static void __complete_background_work(struct smq_policy *mq,
1464                                        struct policy_work *work,
1465                                        bool success)
1466 {
1467         struct entry *e = get_entry(&mq->cache_alloc,
1468                                     from_cblock(work->cblock));
1469
1470         switch (work->op) {
1471         case POLICY_PROMOTE:
1472                 // !h, !q, a
1473                 clear_pending(mq, e);
1474                 if (success) {
1475                         e->oblock = work->oblock;
1476                         e->level = NR_CACHE_LEVELS - 1;
1477                         push(mq, e);
1478                         // h, q, a
1479                 } else {
1480                         free_entry(&mq->cache_alloc, e);
1481                         // !h, !q, !a
1482                 }
1483                 break;
1484
1485         case POLICY_DEMOTE:
1486                 // h, !q, a
1487                 if (success) {
1488                         h_remove(&mq->table, e);
1489                         free_entry(&mq->cache_alloc, e);
1490                         // !h, !q, !a
1491                 } else {
1492                         clear_pending(mq, e);
1493                         push_queue(mq, e);
1494                         // h, q, a
1495                 }
1496                 break;
1497
1498         case POLICY_WRITEBACK:
1499                 // h, !q, a
1500                 clear_pending(mq, e);
1501                 push_queue(mq, e);
1502                 // h, q, a
1503                 break;
1504         }
1505
1506         btracker_complete(mq->bg_work, work);
1507 }
1508
1509 static void smq_complete_background_work(struct dm_cache_policy *p,
1510                                          struct policy_work *work,
1511                                          bool success)
1512 {
1513         unsigned long flags;
1514         struct smq_policy *mq = to_smq_policy(p);
1515
1516         spin_lock_irqsave(&mq->lock, flags);
1517         __complete_background_work(mq, work, success);
1518         spin_unlock_irqrestore(&mq->lock, flags);
1519 }
1520
1521 // in_hash(oblock) -> in_hash(oblock)
1522 static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set)
1523 {
1524         struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1525
1526         if (e->pending_work)
1527                 e->dirty = set;
1528         else {
1529                 del_queue(mq, e);
1530                 e->dirty = set;
1531                 push_queue(mq, e);
1532         }
1533 }
1534
1535 static void smq_set_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
1536 {
1537         unsigned long flags;
1538         struct smq_policy *mq = to_smq_policy(p);
1539
1540         spin_lock_irqsave(&mq->lock, flags);
1541         __smq_set_clear_dirty(mq, cblock, true);
1542         spin_unlock_irqrestore(&mq->lock, flags);
1543 }
1544
1545 static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
1546 {
1547         struct smq_policy *mq = to_smq_policy(p);
1548         unsigned long flags;
1549
1550         spin_lock_irqsave(&mq->lock, flags);
1551         __smq_set_clear_dirty(mq, cblock, false);
1552         spin_unlock_irqrestore(&mq->lock, flags);
1553 }
1554
1555 static unsigned int random_level(dm_cblock_t cblock)
1556 {
1557         return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
1558 }
1559
1560 static int smq_load_mapping(struct dm_cache_policy *p,
1561                             dm_oblock_t oblock, dm_cblock_t cblock,
1562                             bool dirty, uint32_t hint, bool hint_valid)
1563 {
1564         struct smq_policy *mq = to_smq_policy(p);
1565         struct entry *e;
1566
1567         e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
1568         e->oblock = oblock;
1569         e->dirty = dirty;
1570         e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock);
1571         e->pending_work = false;
1572
1573         /*
1574          * When we load mappings we push ahead of both sentinels in order to
1575          * allow demotions and cleaning to occur immediately.
1576          */
1577         push_front(mq, e);
1578
1579         return 0;
1580 }
1581
1582 static int smq_invalidate_mapping(struct dm_cache_policy *p, dm_cblock_t cblock)
1583 {
1584         struct smq_policy *mq = to_smq_policy(p);
1585         struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1586
1587         if (!e->allocated)
1588                 return -ENODATA;
1589
1590         // FIXME: what if this block has pending background work?
1591         del_queue(mq, e);
1592         h_remove(&mq->table, e);
1593         free_entry(&mq->cache_alloc, e);
1594         return 0;
1595 }
1596
1597 static uint32_t smq_get_hint(struct dm_cache_policy *p, dm_cblock_t cblock)
1598 {
1599         struct smq_policy *mq = to_smq_policy(p);
1600         struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1601
1602         if (!e->allocated)
1603                 return 0;
1604
1605         return e->level;
1606 }
1607
1608 static dm_cblock_t smq_residency(struct dm_cache_policy *p)
1609 {
1610         dm_cblock_t r;
1611         unsigned long flags;
1612         struct smq_policy *mq = to_smq_policy(p);
1613
1614         spin_lock_irqsave(&mq->lock, flags);
1615         r = to_cblock(mq->cache_alloc.nr_allocated);
1616         spin_unlock_irqrestore(&mq->lock, flags);
1617
1618         return r;
1619 }
1620
1621 static void smq_tick(struct dm_cache_policy *p, bool can_block)
1622 {
1623         struct smq_policy *mq = to_smq_policy(p);
1624         unsigned long flags;
1625
1626         spin_lock_irqsave(&mq->lock, flags);
1627         mq->tick++;
1628         update_sentinels(mq);
1629         end_hotspot_period(mq);
1630         end_cache_period(mq);
1631         spin_unlock_irqrestore(&mq->lock, flags);
1632 }
1633
1634 static void smq_allow_migrations(struct dm_cache_policy *p, bool allow)
1635 {
1636         struct smq_policy *mq = to_smq_policy(p);
1637
1638         mq->migrations_allowed = allow;
1639 }
1640
1641 /*
1642  * smq has no config values, but the old mq policy did.  To avoid breaking
1643  * software we continue to accept these configurables for the mq policy,
1644  * but they have no effect.
1645  */
1646 static int mq_set_config_value(struct dm_cache_policy *p,
1647                                const char *key, const char *value)
1648 {
1649         unsigned long tmp;
1650
1651         if (kstrtoul(value, 10, &tmp))
1652                 return -EINVAL;
1653
1654         if (!strcasecmp(key, "random_threshold") ||
1655             !strcasecmp(key, "sequential_threshold") ||
1656             !strcasecmp(key, "discard_promote_adjustment") ||
1657             !strcasecmp(key, "read_promote_adjustment") ||
1658             !strcasecmp(key, "write_promote_adjustment")) {
1659                 DMWARN("tunable '%s' no longer has any effect, mq policy is now an alias for smq", key);
1660                 return 0;
1661         }
1662
1663         return -EINVAL;
1664 }
1665
1666 static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
1667                                  unsigned int maxlen, ssize_t *sz_ptr)
1668 {
1669         ssize_t sz = *sz_ptr;
1670
1671         DMEMIT("10 random_threshold 0 "
1672                "sequential_threshold 0 "
1673                "discard_promote_adjustment 0 "
1674                "read_promote_adjustment 0 "
1675                "write_promote_adjustment 0 ");
1676
1677         *sz_ptr = sz;
1678         return 0;
1679 }
1680
1681 /* Init the policy plugin interface function pointers. */
1682 static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
1683 {
1684         mq->policy.destroy = smq_destroy;
1685         mq->policy.lookup = smq_lookup;
1686         mq->policy.lookup_with_work = smq_lookup_with_work;
1687         mq->policy.get_background_work = smq_get_background_work;
1688         mq->policy.complete_background_work = smq_complete_background_work;
1689         mq->policy.set_dirty = smq_set_dirty;
1690         mq->policy.clear_dirty = smq_clear_dirty;
1691         mq->policy.load_mapping = smq_load_mapping;
1692         mq->policy.invalidate_mapping = smq_invalidate_mapping;
1693         mq->policy.get_hint = smq_get_hint;
1694         mq->policy.residency = smq_residency;
1695         mq->policy.tick = smq_tick;
1696         mq->policy.allow_migrations = smq_allow_migrations;
1697
1698         if (mimic_mq) {
1699                 mq->policy.set_config_value = mq_set_config_value;
1700                 mq->policy.emit_config_values = mq_emit_config_values;
1701         }
1702 }
1703
1704 static bool too_many_hotspot_blocks(sector_t origin_size,
1705                                     sector_t hotspot_block_size,
1706                                     unsigned int nr_hotspot_blocks)
1707 {
1708         return (hotspot_block_size * nr_hotspot_blocks) > origin_size;
1709 }
1710
1711 static void calc_hotspot_params(sector_t origin_size,
1712                                 sector_t cache_block_size,
1713                                 unsigned int nr_cache_blocks,
1714                                 sector_t *hotspot_block_size,
1715                                 unsigned int *nr_hotspot_blocks)
1716 {
1717         *hotspot_block_size = cache_block_size * 16u;
1718         *nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u);
1719
1720         while ((*hotspot_block_size > cache_block_size) &&
1721                too_many_hotspot_blocks(origin_size, *hotspot_block_size, *nr_hotspot_blocks))
1722                 *hotspot_block_size /= 2u;
1723 }
1724
1725 static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
1726                                             sector_t origin_size,
1727                                             sector_t cache_block_size,
1728                                             bool mimic_mq,
1729                                             bool migrations_allowed)
1730 {
1731         unsigned int i;
1732         unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
1733         unsigned int total_sentinels = 2u * nr_sentinels_per_queue;
1734         struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
1735
1736         if (!mq)
1737                 return NULL;
1738
1739         init_policy_functions(mq, mimic_mq);
1740         mq->cache_size = cache_size;
1741         mq->cache_block_size = cache_block_size;
1742
1743         calc_hotspot_params(origin_size, cache_block_size, from_cblock(cache_size),
1744                             &mq->hotspot_block_size, &mq->nr_hotspot_blocks);
1745
1746         mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size);
1747         mq->hotspot_level_jump = 1u;
1748         if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) {
1749                 DMERR("couldn't initialize entry space");
1750                 goto bad_pool_init;
1751         }
1752
1753         init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue);
1754         for (i = 0; i < nr_sentinels_per_queue; i++)
1755                 get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true;
1756
1757         init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels);
1758         for (i = 0; i < nr_sentinels_per_queue; i++)
1759                 get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true;
1760
1761         init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels,
1762                        total_sentinels + mq->nr_hotspot_blocks);
1763
1764         init_allocator(&mq->cache_alloc, &mq->es,
1765                        total_sentinels + mq->nr_hotspot_blocks,
1766                        total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size));
1767
1768         mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks);
1769         if (!mq->hotspot_hit_bits) {
1770                 DMERR("couldn't allocate hotspot hit bitset");
1771                 goto bad_hotspot_hit_bits;
1772         }
1773         clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
1774
1775         if (from_cblock(cache_size)) {
1776                 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
1777                 if (!mq->cache_hit_bits) {
1778                         DMERR("couldn't allocate cache hit bitset");
1779                         goto bad_cache_hit_bits;
1780                 }
1781                 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
1782         } else
1783                 mq->cache_hit_bits = NULL;
1784
1785         mq->tick = 0;
1786         spin_lock_init(&mq->lock);
1787
1788         q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
1789         mq->hotspot.nr_top_levels = 8;
1790         mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS,
1791                                            from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block);
1792
1793         q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS);
1794         q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS);
1795
1796         stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS);
1797         stats_init(&mq->cache_stats, NR_CACHE_LEVELS);
1798
1799         if (h_init(&mq->table, &mq->es, from_cblock(cache_size)))
1800                 goto bad_alloc_table;
1801
1802         if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks))
1803                 goto bad_alloc_hotspot_table;
1804
1805         sentinels_init(mq);
1806         mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS;
1807
1808         mq->next_hotspot_period = jiffies;
1809         mq->next_cache_period = jiffies;
1810
1811         mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */
1812         if (!mq->bg_work)
1813                 goto bad_btracker;
1814
1815         mq->migrations_allowed = migrations_allowed;
1816
1817         return &mq->policy;
1818
1819 bad_btracker:
1820         h_exit(&mq->hotspot_table);
1821 bad_alloc_hotspot_table:
1822         h_exit(&mq->table);
1823 bad_alloc_table:
1824         free_bitset(mq->cache_hit_bits);
1825 bad_cache_hit_bits:
1826         free_bitset(mq->hotspot_hit_bits);
1827 bad_hotspot_hit_bits:
1828         space_exit(&mq->es);
1829 bad_pool_init:
1830         kfree(mq);
1831
1832         return NULL;
1833 }
1834
1835 static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
1836                                           sector_t origin_size,
1837                                           sector_t cache_block_size)
1838 {
1839         return __smq_create(cache_size, origin_size, cache_block_size, false, true);
1840 }
1841
1842 static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
1843                                          sector_t origin_size,
1844                                          sector_t cache_block_size)
1845 {
1846         return __smq_create(cache_size, origin_size, cache_block_size, true, true);
1847 }
1848
1849 static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
1850                                               sector_t origin_size,
1851                                               sector_t cache_block_size)
1852 {
1853         return __smq_create(cache_size, origin_size, cache_block_size, false, false);
1854 }
1855
1856 /*----------------------------------------------------------------*/
1857
1858 static struct dm_cache_policy_type smq_policy_type = {
1859         .name = "smq",
1860         .version = {2, 0, 0},
1861         .hint_size = 4,
1862         .owner = THIS_MODULE,
1863         .create = smq_create
1864 };
1865
1866 static struct dm_cache_policy_type mq_policy_type = {
1867         .name = "mq",
1868         .version = {2, 0, 0},
1869         .hint_size = 4,
1870         .owner = THIS_MODULE,
1871         .create = mq_create,
1872 };
1873
1874 static struct dm_cache_policy_type cleaner_policy_type = {
1875         .name = "cleaner",
1876         .version = {2, 0, 0},
1877         .hint_size = 4,
1878         .owner = THIS_MODULE,
1879         .create = cleaner_create,
1880 };
1881
1882 static struct dm_cache_policy_type default_policy_type = {
1883         .name = "default",
1884         .version = {2, 0, 0},
1885         .hint_size = 4,
1886         .owner = THIS_MODULE,
1887         .create = smq_create,
1888         .real = &smq_policy_type
1889 };
1890
1891 static int __init smq_init(void)
1892 {
1893         int r;
1894
1895         r = dm_cache_policy_register(&smq_policy_type);
1896         if (r) {
1897                 DMERR("register failed %d", r);
1898                 return -ENOMEM;
1899         }
1900
1901         r = dm_cache_policy_register(&mq_policy_type);
1902         if (r) {
1903                 DMERR("register failed (as mq) %d", r);
1904                 goto out_mq;
1905         }
1906
1907         r = dm_cache_policy_register(&cleaner_policy_type);
1908         if (r) {
1909                 DMERR("register failed (as cleaner) %d", r);
1910                 goto out_cleaner;
1911         }
1912
1913         r = dm_cache_policy_register(&default_policy_type);
1914         if (r) {
1915                 DMERR("register failed (as default) %d", r);
1916                 goto out_default;
1917         }
1918
1919         return 0;
1920
1921 out_default:
1922         dm_cache_policy_unregister(&cleaner_policy_type);
1923 out_cleaner:
1924         dm_cache_policy_unregister(&mq_policy_type);
1925 out_mq:
1926         dm_cache_policy_unregister(&smq_policy_type);
1927
1928         return -ENOMEM;
1929 }
1930
1931 static void __exit smq_exit(void)
1932 {
1933         dm_cache_policy_unregister(&cleaner_policy_type);
1934         dm_cache_policy_unregister(&smq_policy_type);
1935         dm_cache_policy_unregister(&mq_policy_type);
1936         dm_cache_policy_unregister(&default_policy_type);
1937 }
1938
1939 module_init(smq_init);
1940 module_exit(smq_exit);
1941
1942 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1943 MODULE_LICENSE("GPL");
1944 MODULE_DESCRIPTION("smq cache policy");
1945
1946 MODULE_ALIAS("dm-cache-default");
1947 MODULE_ALIAS("dm-cache-mq");
1948 MODULE_ALIAS("dm-cache-cleaner");