1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * zswap.c - zswap driver file
5 * zswap is a cache that takes pages that are in the process
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/swap.h>
24 #include <linux/crypto.h>
25 #include <linux/scatterlist.h>
26 #include <linux/mempolicy.h>
27 #include <linux/mempool.h>
28 #include <linux/zpool.h>
29 #include <crypto/acompress.h>
30 #include <linux/zswap.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
33 #include <linux/swapops.h>
34 #include <linux/writeback.h>
35 #include <linux/pagemap.h>
36 #include <linux/workqueue.h>
37 #include <linux/list_lru.h>
42 /*********************************
44 **********************************/
45 /* The number of compressed pages currently stored in zswap */
46 atomic_t zswap_stored_pages = ATOMIC_INIT(0);
49 * The statistics below are not protected from concurrent access for
50 * performance reasons so they may not be a 100% accurate. However,
51 * they do provide useful information on roughly how many times a
52 * certain event is occurring.
55 /* Pool limit was hit (see zswap_max_pool_percent) */
56 static u64 zswap_pool_limit_hit;
57 /* Pages written back when pool limit was reached */
58 static u64 zswap_written_back_pages;
59 /* Store failed due to a reclaim failure after pool limit was reached */
60 static u64 zswap_reject_reclaim_fail;
61 /* Store failed due to compression algorithm failure */
62 static u64 zswap_reject_compress_fail;
63 /* Compressed page was too big for the allocator to (optimally) store */
64 static u64 zswap_reject_compress_poor;
65 /* Store failed because underlying allocator could not get memory */
66 static u64 zswap_reject_alloc_fail;
67 /* Store failed because the entry metadata could not be allocated (rare) */
68 static u64 zswap_reject_kmemcache_fail;
70 /* Shrinker work queue */
71 static struct workqueue_struct *shrink_wq;
72 /* Pool limit was hit, we need to calm down */
73 static bool zswap_pool_reached_full;
75 /*********************************
77 **********************************/
79 #define ZSWAP_PARAM_UNSET ""
81 static int zswap_setup(void);
83 /* Enable/disable zswap */
84 static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled);
85 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
86 static int zswap_enabled_param_set(const char *,
87 const struct kernel_param *);
88 static const struct kernel_param_ops zswap_enabled_param_ops = {
89 .set = zswap_enabled_param_set,
90 .get = param_get_bool,
92 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
94 /* Crypto compressor to use */
95 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
96 static int zswap_compressor_param_set(const char *,
97 const struct kernel_param *);
98 static const struct kernel_param_ops zswap_compressor_param_ops = {
99 .set = zswap_compressor_param_set,
100 .get = param_get_charp,
101 .free = param_free_charp,
103 module_param_cb(compressor, &zswap_compressor_param_ops,
104 &zswap_compressor, 0644);
106 /* Compressed storage zpool to use */
107 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
108 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
109 static const struct kernel_param_ops zswap_zpool_param_ops = {
110 .set = zswap_zpool_param_set,
111 .get = param_get_charp,
112 .free = param_free_charp,
114 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
116 /* The maximum percentage of memory that the compressed pool can occupy */
117 static unsigned int zswap_max_pool_percent = 20;
118 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
120 /* The threshold for accepting new pages after the max_pool_percent was hit */
121 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
122 module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
125 /* Enable/disable memory pressure-based shrinker. */
126 static bool zswap_shrinker_enabled = IS_ENABLED(
127 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
128 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
130 bool zswap_is_enabled(void)
132 return zswap_enabled;
135 bool zswap_never_enabled(void)
137 return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled);
140 /*********************************
142 **********************************/
144 struct crypto_acomp_ctx {
145 struct crypto_acomp *acomp;
146 struct acomp_req *req;
147 struct crypto_wait wait;
154 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
155 * The only case where lru_lock is not acquired while holding tree.lock is
156 * when a zswap_entry is taken off the lru for writeback, in that case it
157 * needs to be verified that it's still valid in the tree.
161 struct crypto_acomp_ctx __percpu *acomp_ctx;
162 struct percpu_ref ref;
163 struct list_head list;
164 struct work_struct release_work;
165 struct hlist_node node;
166 char tfm_name[CRYPTO_MAX_ALG_NAME];
169 /* Global LRU lists shared by all zswap pools. */
170 static struct list_lru zswap_list_lru;
172 /* The lock protects zswap_next_shrink updates. */
173 static DEFINE_SPINLOCK(zswap_shrink_lock);
174 static struct mem_cgroup *zswap_next_shrink;
175 static struct work_struct zswap_shrink_work;
176 static struct shrinker *zswap_shrinker;
181 * This structure contains the metadata for tracking a single compressed
184 * swpentry - associated swap entry, the offset indexes into the red-black tree
185 * length - the length in bytes of the compressed page data. Needed during
187 * referenced - true if the entry recently entered the zswap pool. Unset by the
188 * writeback logic. The entry is only reclaimed by the writeback
189 * logic if referenced is unset. See comments in the shrinker
190 * section for context.
191 * pool - the zswap_pool the entry's data is in
192 * handle - zpool allocation handle that stores the compressed page data
193 * value - value of the same-value filled pages which have same content
194 * objcg - the obj_cgroup that the compressed memory is charged to
195 * lru - handle to the pool's lru used to evict pages.
198 swp_entry_t swpentry;
201 struct zswap_pool *pool;
202 unsigned long handle;
203 struct obj_cgroup *objcg;
204 struct list_head lru;
207 static struct xarray *zswap_trees[MAX_SWAPFILES];
208 static unsigned int nr_zswap_trees[MAX_SWAPFILES];
210 /* RCU-protected iteration */
211 static LIST_HEAD(zswap_pools);
212 /* protects zswap_pools list modification */
213 static DEFINE_SPINLOCK(zswap_pools_lock);
214 /* pool counter to provide unique names to zpool */
215 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
217 enum zswap_init_type {
223 static enum zswap_init_type zswap_init_state;
225 /* used to ensure the integrity of initialization */
226 static DEFINE_MUTEX(zswap_init_lock);
228 /* init completed, but couldn't create the initial pool */
229 static bool zswap_has_pool;
231 /*********************************
232 * helpers and fwd declarations
233 **********************************/
235 static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
237 return &zswap_trees[swp_type(swp)][swp_offset(swp)
238 >> SWAP_ADDRESS_SPACE_SHIFT];
241 #define zswap_pool_debug(msg, p) \
242 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
243 zpool_get_type((p)->zpool))
245 /*********************************
247 **********************************/
248 static void __zswap_pool_empty(struct percpu_ref *ref);
250 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
252 struct zswap_pool *pool;
253 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
254 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
257 if (!zswap_has_pool) {
258 /* if either are unset, pool initialization failed, and we
259 * need both params to be set correctly before trying to
262 if (!strcmp(type, ZSWAP_PARAM_UNSET))
264 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
268 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
272 /* unique name for each pool specifically required by zsmalloc */
273 snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
274 pool->zpool = zpool_create_pool(type, name, gfp);
276 pr_err("%s zpool not available\n", type);
279 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
281 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
283 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
284 if (!pool->acomp_ctx) {
285 pr_err("percpu alloc failed\n");
289 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
294 /* being the current pool takes 1 ref; this func expects the
295 * caller to always add the new pool as the current pool
297 ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
298 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
301 INIT_LIST_HEAD(&pool->list);
303 zswap_pool_debug("created", pool);
308 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
311 free_percpu(pool->acomp_ctx);
313 zpool_destroy_pool(pool->zpool);
318 static struct zswap_pool *__zswap_pool_create_fallback(void)
320 bool has_comp, has_zpool;
322 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
323 if (!has_comp && strcmp(zswap_compressor,
324 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
325 pr_err("compressor %s not available, using default %s\n",
326 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
327 param_free_charp(&zswap_compressor);
328 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
329 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
332 pr_err("default compressor %s not available\n",
334 param_free_charp(&zswap_compressor);
335 zswap_compressor = ZSWAP_PARAM_UNSET;
338 has_zpool = zpool_has_pool(zswap_zpool_type);
339 if (!has_zpool && strcmp(zswap_zpool_type,
340 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
341 pr_err("zpool %s not available, using default %s\n",
342 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
343 param_free_charp(&zswap_zpool_type);
344 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
345 has_zpool = zpool_has_pool(zswap_zpool_type);
348 pr_err("default zpool %s not available\n",
350 param_free_charp(&zswap_zpool_type);
351 zswap_zpool_type = ZSWAP_PARAM_UNSET;
354 if (!has_comp || !has_zpool)
357 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
360 static void zswap_pool_destroy(struct zswap_pool *pool)
362 zswap_pool_debug("destroying", pool);
364 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
365 free_percpu(pool->acomp_ctx);
367 zpool_destroy_pool(pool->zpool);
371 static void __zswap_pool_release(struct work_struct *work)
373 struct zswap_pool *pool = container_of(work, typeof(*pool),
378 /* nobody should have been able to get a ref... */
379 WARN_ON(!percpu_ref_is_zero(&pool->ref));
380 percpu_ref_exit(&pool->ref);
382 /* pool is now off zswap_pools list and has no references. */
383 zswap_pool_destroy(pool);
386 static struct zswap_pool *zswap_pool_current(void);
388 static void __zswap_pool_empty(struct percpu_ref *ref)
390 struct zswap_pool *pool;
392 pool = container_of(ref, typeof(*pool), ref);
394 spin_lock_bh(&zswap_pools_lock);
396 WARN_ON(pool == zswap_pool_current());
398 list_del_rcu(&pool->list);
400 INIT_WORK(&pool->release_work, __zswap_pool_release);
401 schedule_work(&pool->release_work);
403 spin_unlock_bh(&zswap_pools_lock);
406 static int __must_check zswap_pool_get(struct zswap_pool *pool)
411 return percpu_ref_tryget(&pool->ref);
414 static void zswap_pool_put(struct zswap_pool *pool)
416 percpu_ref_put(&pool->ref);
419 static struct zswap_pool *__zswap_pool_current(void)
421 struct zswap_pool *pool;
423 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
424 WARN_ONCE(!pool && zswap_has_pool,
425 "%s: no page storage pool!\n", __func__);
430 static struct zswap_pool *zswap_pool_current(void)
432 assert_spin_locked(&zswap_pools_lock);
434 return __zswap_pool_current();
437 static struct zswap_pool *zswap_pool_current_get(void)
439 struct zswap_pool *pool;
443 pool = __zswap_pool_current();
444 if (!zswap_pool_get(pool))
452 /* type and compressor must be null-terminated */
453 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
455 struct zswap_pool *pool;
457 assert_spin_locked(&zswap_pools_lock);
459 list_for_each_entry_rcu(pool, &zswap_pools, list) {
460 if (strcmp(pool->tfm_name, compressor))
462 if (strcmp(zpool_get_type(pool->zpool), type))
464 /* if we can't get it, it's about to be destroyed */
465 if (!zswap_pool_get(pool))
473 static unsigned long zswap_max_pages(void)
475 return totalram_pages() * zswap_max_pool_percent / 100;
478 static unsigned long zswap_accept_thr_pages(void)
480 return zswap_max_pages() * zswap_accept_thr_percent / 100;
483 unsigned long zswap_total_pages(void)
485 struct zswap_pool *pool;
486 unsigned long total = 0;
489 list_for_each_entry_rcu(pool, &zswap_pools, list)
490 total += zpool_get_total_pages(pool->zpool);
496 static bool zswap_check_limits(void)
498 unsigned long cur_pages = zswap_total_pages();
499 unsigned long max_pages = zswap_max_pages();
501 if (cur_pages >= max_pages) {
502 zswap_pool_limit_hit++;
503 zswap_pool_reached_full = true;
504 } else if (zswap_pool_reached_full &&
505 cur_pages <= zswap_accept_thr_pages()) {
506 zswap_pool_reached_full = false;
508 return zswap_pool_reached_full;
511 /*********************************
513 **********************************/
515 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
517 /* no change required */
518 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
523 /* val must be a null-terminated string */
524 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
525 char *type, char *compressor)
527 struct zswap_pool *pool, *put_pool = NULL;
528 char *s = strstrip((char *)val);
530 bool new_pool = false;
532 mutex_lock(&zswap_init_lock);
533 switch (zswap_init_state) {
535 /* if this is load-time (pre-init) param setting,
536 * don't create a pool; that's done during init.
538 ret = param_set_charp(s, kp);
540 case ZSWAP_INIT_SUCCEED:
541 new_pool = zswap_pool_changed(s, kp);
543 case ZSWAP_INIT_FAILED:
544 pr_err("can't set param, initialization failed\n");
547 mutex_unlock(&zswap_init_lock);
549 /* no need to create a new pool, return directly */
554 if (!zpool_has_pool(s)) {
555 pr_err("zpool %s not available\n", s);
559 } else if (!compressor) {
560 if (!crypto_has_acomp(s, 0, 0)) {
561 pr_err("compressor %s not available\n", s);
570 spin_lock_bh(&zswap_pools_lock);
572 pool = zswap_pool_find_get(type, compressor);
574 zswap_pool_debug("using existing", pool);
575 WARN_ON(pool == zswap_pool_current());
576 list_del_rcu(&pool->list);
579 spin_unlock_bh(&zswap_pools_lock);
582 pool = zswap_pool_create(type, compressor);
585 * Restore the initial ref dropped by percpu_ref_kill()
586 * when the pool was decommissioned and switch it again
589 percpu_ref_resurrect(&pool->ref);
591 /* Drop the ref from zswap_pool_find_get(). */
592 zswap_pool_put(pool);
596 ret = param_set_charp(s, kp);
600 spin_lock_bh(&zswap_pools_lock);
603 put_pool = zswap_pool_current();
604 list_add_rcu(&pool->list, &zswap_pools);
605 zswap_has_pool = true;
607 /* add the possibly pre-existing pool to the end of the pools
608 * list; if it's new (and empty) then it'll be removed and
609 * destroyed by the put after we drop the lock
611 list_add_tail_rcu(&pool->list, &zswap_pools);
615 spin_unlock_bh(&zswap_pools_lock);
617 if (!zswap_has_pool && !pool) {
618 /* if initial pool creation failed, and this pool creation also
619 * failed, maybe both compressor and zpool params were bad.
620 * Allow changing this param, so pool creation will succeed
621 * when the other param is changed. We already verified this
622 * param is ok in the zpool_has_pool() or crypto_has_acomp()
625 ret = param_set_charp(s, kp);
628 /* drop the ref from either the old current pool,
629 * or the new pool we failed to add
632 percpu_ref_kill(&put_pool->ref);
637 static int zswap_compressor_param_set(const char *val,
638 const struct kernel_param *kp)
640 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
643 static int zswap_zpool_param_set(const char *val,
644 const struct kernel_param *kp)
646 return __zswap_param_set(val, kp, NULL, zswap_compressor);
649 static int zswap_enabled_param_set(const char *val,
650 const struct kernel_param *kp)
654 /* if this is load-time (pre-init) param setting, only set param. */
655 if (system_state != SYSTEM_RUNNING)
656 return param_set_bool(val, kp);
658 mutex_lock(&zswap_init_lock);
659 switch (zswap_init_state) {
664 case ZSWAP_INIT_SUCCEED:
666 pr_err("can't enable, no pool configured\n");
668 ret = param_set_bool(val, kp);
670 case ZSWAP_INIT_FAILED:
671 pr_err("can't enable, initialization failed\n");
673 mutex_unlock(&zswap_init_lock);
678 /*********************************
680 **********************************/
682 /* should be called under RCU */
684 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
686 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
689 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
695 static inline int entry_to_nid(struct zswap_entry *entry)
697 return page_to_nid(virt_to_page(entry));
700 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
702 int nid = entry_to_nid(entry);
703 struct mem_cgroup *memcg;
706 * Note that it is safe to use rcu_read_lock() here, even in the face of
707 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
708 * used in list_lru lookup, only two scenarios are possible:
710 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
711 * new entry will be reparented to memcg's parent's list_lru.
712 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
713 * new entry will be added directly to memcg's parent's list_lru.
715 * Similar reasoning holds for list_lru_del().
718 memcg = mem_cgroup_from_entry(entry);
719 /* will always succeed */
720 list_lru_add(list_lru, &entry->lru, nid, memcg);
724 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
726 int nid = entry_to_nid(entry);
727 struct mem_cgroup *memcg;
730 memcg = mem_cgroup_from_entry(entry);
731 /* will always succeed */
732 list_lru_del(list_lru, &entry->lru, nid, memcg);
736 void zswap_lruvec_state_init(struct lruvec *lruvec)
738 atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0);
741 void zswap_folio_swapin(struct folio *folio)
743 struct lruvec *lruvec;
746 lruvec = folio_lruvec(folio);
747 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins);
752 * This function should be called when a memcg is being offlined.
754 * Since the global shrinker shrink_worker() may hold a reference
755 * of the memcg, we must check and release the reference in
758 * shrink_worker() must handle the case where this function releases
759 * the reference of memcg being shrunk.
761 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
763 /* lock out zswap shrinker walking memcg tree */
764 spin_lock(&zswap_shrink_lock);
765 if (zswap_next_shrink == memcg) {
767 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
768 } while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink));
770 spin_unlock(&zswap_shrink_lock);
773 /*********************************
774 * zswap entry functions
775 **********************************/
776 static struct kmem_cache *zswap_entry_cache;
778 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
780 struct zswap_entry *entry;
781 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
787 static void zswap_entry_cache_free(struct zswap_entry *entry)
789 kmem_cache_free(zswap_entry_cache, entry);
793 * Carries out the common pattern of freeing and entry's zpool allocation,
794 * freeing the entry itself, and decrementing the number of stored pages.
796 static void zswap_entry_free(struct zswap_entry *entry)
798 zswap_lru_del(&zswap_list_lru, entry);
799 zpool_free(entry->pool->zpool, entry->handle);
800 zswap_pool_put(entry->pool);
802 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
803 obj_cgroup_put(entry->objcg);
805 zswap_entry_cache_free(entry);
806 atomic_dec(&zswap_stored_pages);
809 /*********************************
810 * compressed storage functions
811 **********************************/
812 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
814 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
815 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
816 struct crypto_acomp *acomp;
817 struct acomp_req *req;
820 mutex_init(&acomp_ctx->mutex);
822 acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
823 if (!acomp_ctx->buffer)
826 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
828 pr_err("could not alloc crypto acomp %s : %ld\n",
829 pool->tfm_name, PTR_ERR(acomp));
830 ret = PTR_ERR(acomp);
833 acomp_ctx->acomp = acomp;
834 acomp_ctx->is_sleepable = acomp_is_async(acomp);
836 req = acomp_request_alloc(acomp_ctx->acomp);
838 pr_err("could not alloc crypto acomp_request %s\n",
843 acomp_ctx->req = req;
845 crypto_init_wait(&acomp_ctx->wait);
847 * if the backend of acomp is async zip, crypto_req_done() will wakeup
848 * crypto_wait_req(); if the backend of acomp is scomp, the callback
849 * won't be called, crypto_wait_req() will return without blocking.
851 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
852 crypto_req_done, &acomp_ctx->wait);
857 crypto_free_acomp(acomp_ctx->acomp);
859 kfree(acomp_ctx->buffer);
863 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
865 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
866 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
868 if (!IS_ERR_OR_NULL(acomp_ctx)) {
869 if (!IS_ERR_OR_NULL(acomp_ctx->req))
870 acomp_request_free(acomp_ctx->req);
871 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
872 crypto_free_acomp(acomp_ctx->acomp);
873 kfree(acomp_ctx->buffer);
879 static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
881 struct crypto_acomp_ctx *acomp_ctx;
882 struct scatterlist input, output;
883 int comp_ret = 0, alloc_ret = 0;
884 unsigned int dlen = PAGE_SIZE;
885 unsigned long handle;
891 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
893 mutex_lock(&acomp_ctx->mutex);
895 dst = acomp_ctx->buffer;
896 sg_init_table(&input, 1);
897 sg_set_folio(&input, folio, PAGE_SIZE, 0);
900 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
901 * and hardware-accelerators may won't check the dst buffer size, so
902 * giving the dst buffer with enough length to avoid buffer overflow.
904 sg_init_one(&output, dst, PAGE_SIZE * 2);
905 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
908 * it maybe looks a little bit silly that we send an asynchronous request,
909 * then wait for its completion synchronously. This makes the process look
910 * synchronous in fact.
911 * Theoretically, acomp supports users send multiple acomp requests in one
912 * acomp instance, then get those requests done simultaneously. but in this
913 * case, zswap actually does store and load page by page, there is no
914 * existing method to send the second page before the first page is done
915 * in one thread doing zwap.
916 * but in different threads running on different cpu, we have different
917 * acomp instance, so multiple threads can do (de)compression in parallel.
919 comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
920 dlen = acomp_ctx->req->dlen;
924 zpool = entry->pool->zpool;
925 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
926 if (zpool_malloc_support_movable(zpool))
927 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
928 alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
932 buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
933 memcpy(buf, dst, dlen);
934 zpool_unmap_handle(zpool, handle);
936 entry->handle = handle;
937 entry->length = dlen;
940 if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
941 zswap_reject_compress_poor++;
943 zswap_reject_compress_fail++;
945 zswap_reject_alloc_fail++;
947 mutex_unlock(&acomp_ctx->mutex);
948 return comp_ret == 0 && alloc_ret == 0;
951 static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
953 struct zpool *zpool = entry->pool->zpool;
954 struct scatterlist input, output;
955 struct crypto_acomp_ctx *acomp_ctx;
958 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
959 mutex_lock(&acomp_ctx->mutex);
961 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
963 * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
964 * to do crypto_acomp_decompress() which might sleep. In such cases, we must
965 * resort to copying the buffer to a temporary one.
966 * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
967 * such as a kmap address of high memory or even ever a vmap address.
968 * However, sg_init_one is only equipped to handle linearly mapped low memory.
969 * In such cases, we also must copy the buffer to a temporary and lowmem one.
971 if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
972 !virt_addr_valid(src)) {
973 memcpy(acomp_ctx->buffer, src, entry->length);
974 src = acomp_ctx->buffer;
975 zpool_unmap_handle(zpool, entry->handle);
978 sg_init_one(&input, src, entry->length);
979 sg_init_table(&output, 1);
980 sg_set_folio(&output, folio, PAGE_SIZE, 0);
981 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
982 BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
983 BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
984 mutex_unlock(&acomp_ctx->mutex);
986 if (src != acomp_ctx->buffer)
987 zpool_unmap_handle(zpool, entry->handle);
990 /*********************************
992 **********************************/
994 * Attempts to free an entry by adding a folio to the swap cache,
995 * decompressing the entry data into the folio, and issuing a
996 * bio write to write the folio back to the swap device.
998 * This can be thought of as a "resumed writeback" of the folio
999 * to the swap device. We are basically resuming the same swap
1000 * writeback path that was intercepted with the zswap_store()
1001 * in the first place. After the folio has been decompressed into
1002 * the swap cache, the compressed version stored by zswap can be
1005 static int zswap_writeback_entry(struct zswap_entry *entry,
1006 swp_entry_t swpentry)
1008 struct xarray *tree;
1009 pgoff_t offset = swp_offset(swpentry);
1010 struct folio *folio;
1011 struct mempolicy *mpol;
1012 bool folio_was_allocated;
1013 struct writeback_control wbc = {
1014 .sync_mode = WB_SYNC_NONE,
1017 /* try to allocate swap cache folio */
1018 mpol = get_task_policy(current);
1019 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1020 NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1025 * Found an existing folio, we raced with swapin or concurrent
1026 * shrinker. We generally writeback cold folios from zswap, and
1027 * swapin means the folio just became hot, so skip this folio.
1028 * For unlikely concurrent shrinker case, it will be unlinked
1029 * and freed when invalidated by the concurrent shrinker anyway.
1031 if (!folio_was_allocated) {
1037 * folio is locked, and the swapcache is now secured against
1038 * concurrent swapping to and from the slot, and concurrent
1039 * swapoff so we can safely dereference the zswap tree here.
1040 * Verify that the swap entry hasn't been invalidated and recycled
1041 * behind our backs, to avoid overwriting a new swap folio with
1042 * old compressed data. Only when this is successful can the entry
1045 tree = swap_zswap_tree(swpentry);
1046 if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
1047 delete_from_swap_cache(folio);
1048 folio_unlock(folio);
1053 zswap_decompress(entry, folio);
1055 count_vm_event(ZSWPWB);
1057 count_objcg_event(entry->objcg, ZSWPWB);
1059 zswap_entry_free(entry);
1061 /* folio is up to date */
1062 folio_mark_uptodate(folio);
1064 /* move it to the tail of the inactive list after end_writeback */
1065 folio_set_reclaim(folio);
1067 /* start writeback */
1068 __swap_writepage(folio, &wbc);
1074 /*********************************
1075 * shrinker functions
1076 **********************************/
1078 * The dynamic shrinker is modulated by the following factors:
1080 * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving
1081 * the entry a second chance) before rotating it in the LRU list. If the
1082 * entry is considered again by the shrinker, with its referenced bit unset,
1083 * it is written back. The writeback rate as a result is dynamically
1084 * adjusted by the pool activities - if the pool is dominated by new entries
1085 * (i.e lots of recent zswapouts), these entries will be protected and
1086 * the writeback rate will slow down. On the other hand, if the pool has a
1087 * lot of stagnant entries, these entries will be reclaimed immediately,
1088 * effectively increasing the writeback rate.
1090 * 2. Swapins counter: If we observe swapins, it is a sign that we are
1091 * overshrinking and should slow down. We maintain a swapins counter, which
1092 * is consumed and subtract from the number of eligible objects on the LRU
1093 * in zswap_shrinker_count().
1095 * 3. Compression ratio. The better the workload compresses, the less gains we
1096 * can expect from writeback. We scale down the number of objects available
1097 * for reclaim by this ratio.
1099 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
1100 spinlock_t *lock, void *arg)
1102 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1103 bool *encountered_page_in_swapcache = (bool *)arg;
1104 swp_entry_t swpentry;
1105 enum lru_status ret = LRU_REMOVED_RETRY;
1106 int writeback_result;
1109 * Second chance algorithm: if the entry has its referenced bit set, give it
1110 * a second chance. Only clear the referenced bit and rotate it in the
1113 if (entry->referenced) {
1114 entry->referenced = false;
1119 * As soon as we drop the LRU lock, the entry can be freed by
1120 * a concurrent invalidation. This means the following:
1122 * 1. We extract the swp_entry_t to the stack, allowing
1123 * zswap_writeback_entry() to pin the swap entry and
1124 * then validate the zwap entry against that swap entry's
1125 * tree using pointer value comparison. Only when that
1126 * is successful can the entry be dereferenced.
1128 * 2. Usually, objects are taken off the LRU for reclaim. In
1129 * this case this isn't possible, because if reclaim fails
1130 * for whatever reason, we have no means of knowing if the
1131 * entry is alive to put it back on the LRU.
1133 * So rotate it before dropping the lock. If the entry is
1134 * written back or invalidated, the free path will unlink
1135 * it. For failures, rotation is the right thing as well.
1137 * Temporary failures, where the same entry should be tried
1138 * again immediately, almost never happen for this shrinker.
1139 * We don't do any trylocking; -ENOMEM comes closest,
1140 * but that's extremely rare and doesn't happen spuriously
1141 * either. Don't bother distinguishing this case.
1143 list_move_tail(item, &l->list);
1146 * Once the lru lock is dropped, the entry might get freed. The
1147 * swpentry is copied to the stack, and entry isn't deref'd again
1148 * until the entry is verified to still be alive in the tree.
1150 swpentry = entry->swpentry;
1153 * It's safe to drop the lock here because we return either
1154 * LRU_REMOVED_RETRY or LRU_RETRY.
1158 writeback_result = zswap_writeback_entry(entry, swpentry);
1160 if (writeback_result) {
1161 zswap_reject_reclaim_fail++;
1165 * Encountering a page already in swap cache is a sign that we are shrinking
1166 * into the warmer region. We should terminate shrinking (if we're in the dynamic
1167 * shrinker context).
1169 if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1171 *encountered_page_in_swapcache = true;
1174 zswap_written_back_pages++;
1181 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1182 struct shrink_control *sc)
1184 unsigned long shrink_ret;
1185 bool encountered_page_in_swapcache = false;
1187 if (!zswap_shrinker_enabled ||
1188 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1193 shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
1194 &encountered_page_in_swapcache);
1196 if (encountered_page_in_swapcache)
1199 return shrink_ret ? shrink_ret : SHRINK_STOP;
1202 static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1203 struct shrink_control *sc)
1205 struct mem_cgroup *memcg = sc->memcg;
1206 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1207 atomic_long_t *nr_disk_swapins =
1208 &lruvec->zswap_lruvec_state.nr_disk_swapins;
1209 unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur,
1212 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1216 * The shrinker resumes swap writeback, which will enter block
1217 * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
1218 * rules (may_enter_fs()), which apply on a per-folio basis.
1220 if (!gfp_has_io_fs(sc->gfp_mask))
1224 * For memcg, use the cgroup-wide ZSWAP stats since we don't
1225 * have them per-node and thus per-lruvec. Careful if memcg is
1226 * runtime-disabled: we can get sc->memcg == NULL, which is ok
1227 * for the lruvec, but not for memcg_page_state().
1229 * Without memcg, use the zswap pool-wide metrics.
1231 if (!mem_cgroup_disabled()) {
1232 mem_cgroup_flush_stats(memcg);
1233 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1234 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1236 nr_backing = zswap_total_pages();
1237 nr_stored = atomic_read(&zswap_stored_pages);
1243 nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
1248 * Subtract from the lru size the number of pages that are recently swapped
1249 * in from disk. The idea is that had we protect the zswap's LRU by this
1250 * amount of pages, these disk swapins would not have happened.
1252 nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins);
1254 if (nr_freeable >= nr_disk_swapins_cur)
1257 nr_remain = nr_disk_swapins_cur - nr_freeable;
1258 } while (!atomic_long_try_cmpxchg(
1259 nr_disk_swapins, &nr_disk_swapins_cur, nr_remain));
1261 nr_freeable -= nr_disk_swapins_cur - nr_remain;
1266 * Scale the number of freeable pages by the memory saving factor.
1267 * This ensures that the better zswap compresses memory, the fewer
1268 * pages we will evict to swap (as it will otherwise incur IO for
1269 * relatively small memory saving).
1271 return mult_frac(nr_freeable, nr_backing, nr_stored);
1274 static struct shrinker *zswap_alloc_shrinker(void)
1276 struct shrinker *shrinker;
1279 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
1283 shrinker->scan_objects = zswap_shrinker_scan;
1284 shrinker->count_objects = zswap_shrinker_count;
1285 shrinker->batch = 0;
1286 shrinker->seeks = DEFAULT_SEEKS;
1290 static int shrink_memcg(struct mem_cgroup *memcg)
1292 int nid, shrunk = 0, scanned = 0;
1294 if (!mem_cgroup_zswap_writeback_enabled(memcg))
1298 * Skip zombies because their LRUs are reparented and we would be
1299 * reclaiming from the parent instead of the dead memcg.
1301 if (memcg && !mem_cgroup_online(memcg))
1304 for_each_node_state(nid, N_NORMAL_MEMORY) {
1305 unsigned long nr_to_walk = 1;
1307 shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
1308 &shrink_memcg_cb, NULL, &nr_to_walk);
1309 scanned += 1 - nr_to_walk;
1315 return shrunk ? 0 : -EAGAIN;
1318 static void shrink_worker(struct work_struct *w)
1320 struct mem_cgroup *memcg;
1321 int ret, failures = 0, attempts = 0;
1324 /* Reclaim down to the accept threshold */
1325 thr = zswap_accept_thr_pages();
1328 * Global reclaim will select cgroup in a round-robin fashion from all
1329 * online memcgs, but memcgs that have no pages in zswap and
1330 * writeback-disabled memcgs (memory.zswap.writeback=0) are not
1331 * candidates for shrinking.
1333 * Shrinking will be aborted if we encounter the following
1334 * MAX_RECLAIM_RETRIES times:
1335 * - No writeback-candidate memcgs found in a memcg tree walk.
1336 * - Shrinking a writeback-candidate memcg failed.
1338 * We save iteration cursor memcg into zswap_next_shrink,
1339 * which can be modified by the offline memcg cleaner
1340 * zswap_memcg_offline_cleanup().
1342 * Since the offline cleaner is called only once, we cannot leave an
1343 * offline memcg reference in zswap_next_shrink.
1344 * We can rely on the cleaner only if we get online memcg under lock.
1346 * If we get an offline memcg, we cannot determine if the cleaner has
1347 * already been called or will be called later. We must put back the
1348 * reference before returning from this function. Otherwise, the
1349 * offline memcg left in zswap_next_shrink will hold the reference
1350 * until the next run of shrink_worker().
1354 * Start shrinking from the next memcg after zswap_next_shrink.
1355 * When the offline cleaner has already advanced the cursor,
1356 * advancing the cursor here overlooks one memcg, but this
1357 * should be negligibly rare.
1359 * If we get an online memcg, keep the extra reference in case
1360 * the original one obtained by mem_cgroup_iter() is dropped by
1361 * zswap_memcg_offline_cleanup() while we are shrinking the
1364 spin_lock(&zswap_shrink_lock);
1366 memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
1367 zswap_next_shrink = memcg;
1368 } while (memcg && !mem_cgroup_tryget_online(memcg));
1369 spin_unlock(&zswap_shrink_lock);
1373 * Continue shrinking without incrementing failures if
1374 * we found candidate memcgs in the last tree walk.
1376 if (!attempts && ++failures == MAX_RECLAIM_RETRIES)
1383 ret = shrink_memcg(memcg);
1384 /* drop the extra reference */
1385 mem_cgroup_put(memcg);
1388 * There are no writeback-candidate pages in the memcg.
1389 * This is not an issue as long as we can find another memcg
1390 * with pages in zswap. Skip this without incrementing attempts
1397 if (ret && ++failures == MAX_RECLAIM_RETRIES)
1401 } while (zswap_total_pages() > thr);
1404 /*********************************
1406 **********************************/
1407 bool zswap_store(struct folio *folio)
1409 swp_entry_t swp = folio->swap;
1410 pgoff_t offset = swp_offset(swp);
1411 struct xarray *tree = swap_zswap_tree(swp);
1412 struct zswap_entry *entry, *old;
1413 struct obj_cgroup *objcg = NULL;
1414 struct mem_cgroup *memcg = NULL;
1416 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1417 VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1419 /* Large folios aren't supported */
1420 if (folio_test_large(folio))
1426 /* Check cgroup limits */
1427 objcg = get_obj_cgroup_from_folio(folio);
1428 if (objcg && !obj_cgroup_may_zswap(objcg)) {
1429 memcg = get_mem_cgroup_from_objcg(objcg);
1430 if (shrink_memcg(memcg)) {
1431 mem_cgroup_put(memcg);
1434 mem_cgroup_put(memcg);
1437 if (zswap_check_limits())
1440 /* allocate entry */
1441 entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
1443 zswap_reject_kmemcache_fail++;
1447 /* if entry is successfully added, it keeps the reference */
1448 entry->pool = zswap_pool_current_get();
1453 memcg = get_mem_cgroup_from_objcg(objcg);
1454 if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
1455 mem_cgroup_put(memcg);
1458 mem_cgroup_put(memcg);
1461 if (!zswap_compress(folio, entry))
1464 entry->swpentry = swp;
1465 entry->objcg = objcg;
1466 entry->referenced = true;
1468 old = xa_store(tree, offset, entry, GFP_KERNEL);
1469 if (xa_is_err(old)) {
1470 int err = xa_err(old);
1472 WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err);
1473 zswap_reject_alloc_fail++;
1478 * We may have had an existing entry that became stale when
1479 * the folio was redirtied and now the new version is being
1480 * swapped out. Get rid of the old.
1483 zswap_entry_free(old);
1486 obj_cgroup_charge_zswap(objcg, entry->length);
1487 count_objcg_event(objcg, ZSWPOUT);
1491 * We finish initializing the entry while it's already in xarray.
1492 * This is safe because:
1494 * 1. Concurrent stores and invalidations are excluded by folio lock.
1496 * 2. Writeback is excluded by the entry not being on the LRU yet.
1497 * The publishing order matters to prevent writeback from seeing
1498 * an incoherent entry.
1500 if (entry->length) {
1501 INIT_LIST_HEAD(&entry->lru);
1502 zswap_lru_add(&zswap_list_lru, entry);
1506 atomic_inc(&zswap_stored_pages);
1507 count_vm_event(ZSWPOUT);
1512 zpool_free(entry->pool->zpool, entry->handle);
1514 zswap_pool_put(entry->pool);
1516 zswap_entry_cache_free(entry);
1518 obj_cgroup_put(objcg);
1519 if (zswap_pool_reached_full)
1520 queue_work(shrink_wq, &zswap_shrink_work);
1523 * If the zswap store fails or zswap is disabled, we must invalidate the
1524 * possibly stale entry which was previously stored at this offset.
1525 * Otherwise, writeback could overwrite the new data in the swapfile.
1527 entry = xa_erase(tree, offset);
1529 zswap_entry_free(entry);
1533 bool zswap_load(struct folio *folio)
1535 swp_entry_t swp = folio->swap;
1536 pgoff_t offset = swp_offset(swp);
1537 bool swapcache = folio_test_swapcache(folio);
1538 struct xarray *tree = swap_zswap_tree(swp);
1539 struct zswap_entry *entry;
1541 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1543 if (zswap_never_enabled())
1547 * Large folios should not be swapped in while zswap is being used, as
1548 * they are not properly handled. Zswap does not properly load large
1549 * folios, and a large folio may only be partially in zswap.
1551 * Return true without marking the folio uptodate so that an IO error is
1552 * emitted (e.g. do_swap_page() will sigbus).
1554 if (WARN_ON_ONCE(folio_test_large(folio)))
1558 * When reading into the swapcache, invalidate our entry. The
1559 * swapcache can be the authoritative owner of the page and
1560 * its mappings, and the pressure that results from having two
1561 * in-memory copies outweighs any benefits of caching the
1564 * (Most swapins go through the swapcache. The notable
1565 * exception is the singleton fault on SWP_SYNCHRONOUS_IO
1566 * files, which reads into a private page and may free it if
1567 * the fault fails. We remain the primary owner of the entry.)
1570 entry = xa_erase(tree, offset);
1572 entry = xa_load(tree, offset);
1577 zswap_decompress(entry, folio);
1579 count_vm_event(ZSWPIN);
1581 count_objcg_event(entry->objcg, ZSWPIN);
1584 zswap_entry_free(entry);
1585 folio_mark_dirty(folio);
1588 folio_mark_uptodate(folio);
1592 void zswap_invalidate(swp_entry_t swp)
1594 pgoff_t offset = swp_offset(swp);
1595 struct xarray *tree = swap_zswap_tree(swp);
1596 struct zswap_entry *entry;
1598 entry = xa_erase(tree, offset);
1600 zswap_entry_free(entry);
1603 int zswap_swapon(int type, unsigned long nr_pages)
1605 struct xarray *trees, *tree;
1608 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
1609 trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
1611 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1615 for (i = 0; i < nr; i++)
1618 nr_zswap_trees[type] = nr;
1619 zswap_trees[type] = trees;
1623 void zswap_swapoff(int type)
1625 struct xarray *trees = zswap_trees[type];
1631 /* try_to_unuse() invalidated all the entries already */
1632 for (i = 0; i < nr_zswap_trees[type]; i++)
1633 WARN_ON_ONCE(!xa_empty(trees + i));
1636 nr_zswap_trees[type] = 0;
1637 zswap_trees[type] = NULL;
1640 /*********************************
1642 **********************************/
1643 #ifdef CONFIG_DEBUG_FS
1644 #include <linux/debugfs.h>
1646 static struct dentry *zswap_debugfs_root;
1648 static int debugfs_get_total_size(void *data, u64 *val)
1650 *val = zswap_total_pages() * PAGE_SIZE;
1653 DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n");
1655 static int zswap_debugfs_init(void)
1657 if (!debugfs_initialized())
1660 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1662 debugfs_create_u64("pool_limit_hit", 0444,
1663 zswap_debugfs_root, &zswap_pool_limit_hit);
1664 debugfs_create_u64("reject_reclaim_fail", 0444,
1665 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1666 debugfs_create_u64("reject_alloc_fail", 0444,
1667 zswap_debugfs_root, &zswap_reject_alloc_fail);
1668 debugfs_create_u64("reject_kmemcache_fail", 0444,
1669 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1670 debugfs_create_u64("reject_compress_fail", 0444,
1671 zswap_debugfs_root, &zswap_reject_compress_fail);
1672 debugfs_create_u64("reject_compress_poor", 0444,
1673 zswap_debugfs_root, &zswap_reject_compress_poor);
1674 debugfs_create_u64("written_back_pages", 0444,
1675 zswap_debugfs_root, &zswap_written_back_pages);
1676 debugfs_create_file("pool_total_size", 0444,
1677 zswap_debugfs_root, NULL, &total_size_fops);
1678 debugfs_create_atomic_t("stored_pages", 0444,
1679 zswap_debugfs_root, &zswap_stored_pages);
1684 static int zswap_debugfs_init(void)
1690 /*********************************
1691 * module init and exit
1692 **********************************/
1693 static int zswap_setup(void)
1695 struct zswap_pool *pool;
1698 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1699 if (!zswap_entry_cache) {
1700 pr_err("entry cache creation failed\n");
1704 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1705 "mm/zswap_pool:prepare",
1706 zswap_cpu_comp_prepare,
1707 zswap_cpu_comp_dead);
1711 shrink_wq = alloc_workqueue("zswap-shrink",
1712 WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
1714 goto shrink_wq_fail;
1716 zswap_shrinker = zswap_alloc_shrinker();
1717 if (!zswap_shrinker)
1719 if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
1721 shrinker_register(zswap_shrinker);
1723 INIT_WORK(&zswap_shrink_work, shrink_worker);
1725 pool = __zswap_pool_create_fallback();
1727 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1728 zpool_get_type(pool->zpool));
1729 list_add(&pool->list, &zswap_pools);
1730 zswap_has_pool = true;
1731 static_branch_enable(&zswap_ever_enabled);
1733 pr_err("pool creation failed\n");
1734 zswap_enabled = false;
1737 if (zswap_debugfs_init())
1738 pr_warn("debugfs initialization failed\n");
1739 zswap_init_state = ZSWAP_INIT_SUCCEED;
1743 shrinker_free(zswap_shrinker);
1745 destroy_workqueue(shrink_wq);
1747 cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE);
1749 kmem_cache_destroy(zswap_entry_cache);
1751 /* if built-in, we aren't unloaded on failure; don't allow use */
1752 zswap_init_state = ZSWAP_INIT_FAILED;
1753 zswap_enabled = false;
1757 static int __init zswap_init(void)
1761 return zswap_setup();
1763 /* must be late so crypto has time to come up */
1764 late_initcall(zswap_init);
1766 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1767 MODULE_DESCRIPTION("Compressed cache for swap pages");