~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/zswap.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * zswap.c - zswap driver file
  4  *
  5  * zswap is a cache that takes pages that are in the process
  6  * of being swapped out and attempts to compress and store them in a
  7  * RAM-based memory pool.  This can result in a significant I/O reduction on
  8  * the swap device and, in the case where decompressing from RAM is faster
  9  * than reading from the swap device, can also improve workload performance.
 10  *
 11  * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
 12 */
 13 
 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 15 
 16 #include <linux/module.h>
 17 #include <linux/cpu.h>
 18 #include <linux/highmem.h>
 19 #include <linux/slab.h>
 20 #include <linux/spinlock.h>
 21 #include <linux/types.h>
 22 #include <linux/atomic.h>
 23 #include <linux/swap.h>
 24 #include <linux/crypto.h>
 25 #include <linux/scatterlist.h>
 26 #include <linux/mempolicy.h>
 27 #include <linux/mempool.h>
 28 #include <linux/zpool.h>
 29 #include <crypto/acompress.h>
 30 #include <linux/zswap.h>
 31 #include <linux/mm_types.h>
 32 #include <linux/page-flags.h>
 33 #include <linux/swapops.h>
 34 #include <linux/writeback.h>
 35 #include <linux/pagemap.h>
 36 #include <linux/workqueue.h>
 37 #include <linux/list_lru.h>
 38 
 39 #include "swap.h"
 40 #include "internal.h"
 41 
 42 /*********************************
 43 * statistics
 44 **********************************/
 45 /* The number of compressed pages currently stored in zswap */
 46 atomic_t zswap_stored_pages = ATOMIC_INIT(0);
 47 /* The number of same-value filled pages currently stored in zswap */
 48 static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
 49 
 50 /*
 51  * The statistics below are not protected from concurrent access for
 52  * performance reasons so they may not be a 100% accurate.  However,
 53  * they do provide useful information on roughly how many times a
 54  * certain event is occurring.
 55 */
 56 
 57 /* Pool limit was hit (see zswap_max_pool_percent) */
 58 static u64 zswap_pool_limit_hit;
 59 /* Pages written back when pool limit was reached */
 60 static u64 zswap_written_back_pages;
 61 /* Store failed due to a reclaim failure after pool limit was reached */
 62 static u64 zswap_reject_reclaim_fail;
 63 /* Store failed due to compression algorithm failure */
 64 static u64 zswap_reject_compress_fail;
 65 /* Compressed page was too big for the allocator to (optimally) store */
 66 static u64 zswap_reject_compress_poor;
 67 /* Store failed because underlying allocator could not get memory */
 68 static u64 zswap_reject_alloc_fail;
 69 /* Store failed because the entry metadata could not be allocated (rare) */
 70 static u64 zswap_reject_kmemcache_fail;
 71 
 72 /* Shrinker work queue */
 73 static struct workqueue_struct *shrink_wq;
 74 /* Pool limit was hit, we need to calm down */
 75 static bool zswap_pool_reached_full;
 76 
 77 /*********************************
 78 * tunables
 79 **********************************/
 80 
 81 #define ZSWAP_PARAM_UNSET ""
 82 
 83 static int zswap_setup(void);
 84 
 85 /* Enable/disable zswap */
 86 static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled);
 87 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
 88 static int zswap_enabled_param_set(const char *,
 89                                    const struct kernel_param *);
 90 static const struct kernel_param_ops zswap_enabled_param_ops = {
 91         .set =          zswap_enabled_param_set,
 92         .get =          param_get_bool,
 93 };
 94 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
 95 
 96 /* Crypto compressor to use */
 97 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
 98 static int zswap_compressor_param_set(const char *,
 99                                       const struct kernel_param *);
100 static const struct kernel_param_ops zswap_compressor_param_ops = {
101         .set =          zswap_compressor_param_set,
102         .get =          param_get_charp,
103         .free =         param_free_charp,
104 };
105 module_param_cb(compressor, &zswap_compressor_param_ops,
106                 &zswap_compressor, 0644);
107 
108 /* Compressed storage zpool to use */
109 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
110 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
111 static const struct kernel_param_ops zswap_zpool_param_ops = {
112         .set =          zswap_zpool_param_set,
113         .get =          param_get_charp,
114         .free =         param_free_charp,
115 };
116 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
117 
118 /* The maximum percentage of memory that the compressed pool can occupy */
119 static unsigned int zswap_max_pool_percent = 20;
120 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
121 
122 /* The threshold for accepting new pages after the max_pool_percent was hit */
123 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
124 module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
125                    uint, 0644);
126 
127 /* Enable/disable memory pressure-based shrinker. */
128 static bool zswap_shrinker_enabled = IS_ENABLED(
129                 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
130 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
131 
132 bool zswap_is_enabled(void)
133 {
134         return zswap_enabled;
135 }
136 
137 bool zswap_never_enabled(void)
138 {
139         return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled);
140 }
141 
142 /*********************************
143 * data structures
144 **********************************/
145 
146 struct crypto_acomp_ctx {
147         struct crypto_acomp *acomp;
148         struct acomp_req *req;
149         struct crypto_wait wait;
150         u8 *buffer;
151         struct mutex mutex;
152         bool is_sleepable;
153 };
154 
155 /*
156  * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
157  * The only case where lru_lock is not acquired while holding tree.lock is
158  * when a zswap_entry is taken off the lru for writeback, in that case it
159  * needs to be verified that it's still valid in the tree.
160  */
161 struct zswap_pool {
162         struct zpool *zpool;
163         struct crypto_acomp_ctx __percpu *acomp_ctx;
164         struct percpu_ref ref;
165         struct list_head list;
166         struct work_struct release_work;
167         struct hlist_node node;
168         char tfm_name[CRYPTO_MAX_ALG_NAME];
169 };
170 
171 /* Global LRU lists shared by all zswap pools. */
172 static struct list_lru zswap_list_lru;
173 
174 /* The lock protects zswap_next_shrink updates. */
175 static DEFINE_SPINLOCK(zswap_shrink_lock);
176 static struct mem_cgroup *zswap_next_shrink;
177 static struct work_struct zswap_shrink_work;
178 static struct shrinker *zswap_shrinker;
179 
180 /*
181  * struct zswap_entry
182  *
183  * This structure contains the metadata for tracking a single compressed
184  * page within zswap.
185  *
186  * swpentry - associated swap entry, the offset indexes into the red-black tree
187  * length - the length in bytes of the compressed page data.  Needed during
188  *          decompression. For a same value filled page length is 0, and both
189  *          pool and lru are invalid and must be ignored.
190  * pool - the zswap_pool the entry's data is in
191  * handle - zpool allocation handle that stores the compressed page data
192  * value - value of the same-value filled pages which have same content
193  * objcg - the obj_cgroup that the compressed memory is charged to
194  * lru - handle to the pool's lru used to evict pages.
195  */
196 struct zswap_entry {
197         swp_entry_t swpentry;
198         unsigned int length;
199         struct zswap_pool *pool;
200         union {
201                 unsigned long handle;
202                 unsigned long value;
203         };
204         struct obj_cgroup *objcg;
205         struct list_head lru;
206 };
207 
208 static struct xarray *zswap_trees[MAX_SWAPFILES];
209 static unsigned int nr_zswap_trees[MAX_SWAPFILES];
210 
211 /* RCU-protected iteration */
212 static LIST_HEAD(zswap_pools);
213 /* protects zswap_pools list modification */
214 static DEFINE_SPINLOCK(zswap_pools_lock);
215 /* pool counter to provide unique names to zpool */
216 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
217 
218 enum zswap_init_type {
219         ZSWAP_UNINIT,
220         ZSWAP_INIT_SUCCEED,
221         ZSWAP_INIT_FAILED
222 };
223 
224 static enum zswap_init_type zswap_init_state;
225 
226 /* used to ensure the integrity of initialization */
227 static DEFINE_MUTEX(zswap_init_lock);
228 
229 /* init completed, but couldn't create the initial pool */
230 static bool zswap_has_pool;
231 
232 /*********************************
233 * helpers and fwd declarations
234 **********************************/
235 
236 static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
237 {
238         return &zswap_trees[swp_type(swp)][swp_offset(swp)
239                 >> SWAP_ADDRESS_SPACE_SHIFT];
240 }
241 
242 #define zswap_pool_debug(msg, p)                                \
243         pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,         \
244                  zpool_get_type((p)->zpool))
245 
246 /*********************************
247 * pool functions
248 **********************************/
249 static void __zswap_pool_empty(struct percpu_ref *ref);
250 
251 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
252 {
253         struct zswap_pool *pool;
254         char name[38]; /* 'zswap' + 32 char (max) num + \0 */
255         gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
256         int ret;
257 
258         if (!zswap_has_pool) {
259                 /* if either are unset, pool initialization failed, and we
260                  * need both params to be set correctly before trying to
261                  * create a pool.
262                  */
263                 if (!strcmp(type, ZSWAP_PARAM_UNSET))
264                         return NULL;
265                 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
266                         return NULL;
267         }
268 
269         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
270         if (!pool)
271                 return NULL;
272 
273         /* unique name for each pool specifically required by zsmalloc */
274         snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
275         pool->zpool = zpool_create_pool(type, name, gfp);
276         if (!pool->zpool) {
277                 pr_err("%s zpool not available\n", type);
278                 goto error;
279         }
280         pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
281 
282         strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
283 
284         pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
285         if (!pool->acomp_ctx) {
286                 pr_err("percpu alloc failed\n");
287                 goto error;
288         }
289 
290         ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
291                                        &pool->node);
292         if (ret)
293                 goto error;
294 
295         /* being the current pool takes 1 ref; this func expects the
296          * caller to always add the new pool as the current pool
297          */
298         ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
299                               PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
300         if (ret)
301                 goto ref_fail;
302         INIT_LIST_HEAD(&pool->list);
303 
304         zswap_pool_debug("created", pool);
305 
306         return pool;
307 
308 ref_fail:
309         cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
310 error:
311         if (pool->acomp_ctx)
312                 free_percpu(pool->acomp_ctx);
313         if (pool->zpool)
314                 zpool_destroy_pool(pool->zpool);
315         kfree(pool);
316         return NULL;
317 }
318 
319 static struct zswap_pool *__zswap_pool_create_fallback(void)
320 {
321         bool has_comp, has_zpool;
322 
323         has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
324         if (!has_comp && strcmp(zswap_compressor,
325                                 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
326                 pr_err("compressor %s not available, using default %s\n",
327                        zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
328                 param_free_charp(&zswap_compressor);
329                 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
330                 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
331         }
332         if (!has_comp) {
333                 pr_err("default compressor %s not available\n",
334                        zswap_compressor);
335                 param_free_charp(&zswap_compressor);
336                 zswap_compressor = ZSWAP_PARAM_UNSET;
337         }
338 
339         has_zpool = zpool_has_pool(zswap_zpool_type);
340         if (!has_zpool && strcmp(zswap_zpool_type,
341                                  CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
342                 pr_err("zpool %s not available, using default %s\n",
343                        zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
344                 param_free_charp(&zswap_zpool_type);
345                 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
346                 has_zpool = zpool_has_pool(zswap_zpool_type);
347         }
348         if (!has_zpool) {
349                 pr_err("default zpool %s not available\n",
350                        zswap_zpool_type);
351                 param_free_charp(&zswap_zpool_type);
352                 zswap_zpool_type = ZSWAP_PARAM_UNSET;
353         }
354 
355         if (!has_comp || !has_zpool)
356                 return NULL;
357 
358         return zswap_pool_create(zswap_zpool_type, zswap_compressor);
359 }
360 
361 static void zswap_pool_destroy(struct zswap_pool *pool)
362 {
363         zswap_pool_debug("destroying", pool);
364 
365         cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
366         free_percpu(pool->acomp_ctx);
367 
368         zpool_destroy_pool(pool->zpool);
369         kfree(pool);
370 }
371 
372 static void __zswap_pool_release(struct work_struct *work)
373 {
374         struct zswap_pool *pool = container_of(work, typeof(*pool),
375                                                 release_work);
376 
377         synchronize_rcu();
378 
379         /* nobody should have been able to get a ref... */
380         WARN_ON(!percpu_ref_is_zero(&pool->ref));
381         percpu_ref_exit(&pool->ref);
382 
383         /* pool is now off zswap_pools list and has no references. */
384         zswap_pool_destroy(pool);
385 }
386 
387 static struct zswap_pool *zswap_pool_current(void);
388 
389 static void __zswap_pool_empty(struct percpu_ref *ref)
390 {
391         struct zswap_pool *pool;
392 
393         pool = container_of(ref, typeof(*pool), ref);
394 
395         spin_lock_bh(&zswap_pools_lock);
396 
397         WARN_ON(pool == zswap_pool_current());
398 
399         list_del_rcu(&pool->list);
400 
401         INIT_WORK(&pool->release_work, __zswap_pool_release);
402         schedule_work(&pool->release_work);
403 
404         spin_unlock_bh(&zswap_pools_lock);
405 }
406 
407 static int __must_check zswap_pool_get(struct zswap_pool *pool)
408 {
409         if (!pool)
410                 return 0;
411 
412         return percpu_ref_tryget(&pool->ref);
413 }
414 
415 static void zswap_pool_put(struct zswap_pool *pool)
416 {
417         percpu_ref_put(&pool->ref);
418 }
419 
420 static struct zswap_pool *__zswap_pool_current(void)
421 {
422         struct zswap_pool *pool;
423 
424         pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
425         WARN_ONCE(!pool && zswap_has_pool,
426                   "%s: no page storage pool!\n", __func__);
427 
428         return pool;
429 }
430 
431 static struct zswap_pool *zswap_pool_current(void)
432 {
433         assert_spin_locked(&zswap_pools_lock);
434 
435         return __zswap_pool_current();
436 }
437 
438 static struct zswap_pool *zswap_pool_current_get(void)
439 {
440         struct zswap_pool *pool;
441 
442         rcu_read_lock();
443 
444         pool = __zswap_pool_current();
445         if (!zswap_pool_get(pool))
446                 pool = NULL;
447 
448         rcu_read_unlock();
449 
450         return pool;
451 }
452 
453 /* type and compressor must be null-terminated */
454 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
455 {
456         struct zswap_pool *pool;
457 
458         assert_spin_locked(&zswap_pools_lock);
459 
460         list_for_each_entry_rcu(pool, &zswap_pools, list) {
461                 if (strcmp(pool->tfm_name, compressor))
462                         continue;
463                 if (strcmp(zpool_get_type(pool->zpool), type))
464                         continue;
465                 /* if we can't get it, it's about to be destroyed */
466                 if (!zswap_pool_get(pool))
467                         continue;
468                 return pool;
469         }
470 
471         return NULL;
472 }
473 
474 static unsigned long zswap_max_pages(void)
475 {
476         return totalram_pages() * zswap_max_pool_percent / 100;
477 }
478 
479 static unsigned long zswap_accept_thr_pages(void)
480 {
481         return zswap_max_pages() * zswap_accept_thr_percent / 100;
482 }
483 
484 unsigned long zswap_total_pages(void)
485 {
486         struct zswap_pool *pool;
487         unsigned long total = 0;
488 
489         rcu_read_lock();
490         list_for_each_entry_rcu(pool, &zswap_pools, list)
491                 total += zpool_get_total_pages(pool->zpool);
492         rcu_read_unlock();
493 
494         return total;
495 }
496 
497 static bool zswap_check_limits(void)
498 {
499         unsigned long cur_pages = zswap_total_pages();
500         unsigned long max_pages = zswap_max_pages();
501 
502         if (cur_pages >= max_pages) {
503                 zswap_pool_limit_hit++;
504                 zswap_pool_reached_full = true;
505         } else if (zswap_pool_reached_full &&
506                    cur_pages <= zswap_accept_thr_pages()) {
507                         zswap_pool_reached_full = false;
508         }
509         return zswap_pool_reached_full;
510 }
511 
512 /*********************************
513 * param callbacks
514 **********************************/
515 
516 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
517 {
518         /* no change required */
519         if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
520                 return false;
521         return true;
522 }
523 
524 /* val must be a null-terminated string */
525 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
526                              char *type, char *compressor)
527 {
528         struct zswap_pool *pool, *put_pool = NULL;
529         char *s = strstrip((char *)val);
530         int ret = 0;
531         bool new_pool = false;
532 
533         mutex_lock(&zswap_init_lock);
534         switch (zswap_init_state) {
535         case ZSWAP_UNINIT:
536                 /* if this is load-time (pre-init) param setting,
537                  * don't create a pool; that's done during init.
538                  */
539                 ret = param_set_charp(s, kp);
540                 break;
541         case ZSWAP_INIT_SUCCEED:
542                 new_pool = zswap_pool_changed(s, kp);
543                 break;
544         case ZSWAP_INIT_FAILED:
545                 pr_err("can't set param, initialization failed\n");
546                 ret = -ENODEV;
547         }
548         mutex_unlock(&zswap_init_lock);
549 
550         /* no need to create a new pool, return directly */
551         if (!new_pool)
552                 return ret;
553 
554         if (!type) {
555                 if (!zpool_has_pool(s)) {
556                         pr_err("zpool %s not available\n", s);
557                         return -ENOENT;
558                 }
559                 type = s;
560         } else if (!compressor) {
561                 if (!crypto_has_acomp(s, 0, 0)) {
562                         pr_err("compressor %s not available\n", s);
563                         return -ENOENT;
564                 }
565                 compressor = s;
566         } else {
567                 WARN_ON(1);
568                 return -EINVAL;
569         }
570 
571         spin_lock_bh(&zswap_pools_lock);
572 
573         pool = zswap_pool_find_get(type, compressor);
574         if (pool) {
575                 zswap_pool_debug("using existing", pool);
576                 WARN_ON(pool == zswap_pool_current());
577                 list_del_rcu(&pool->list);
578         }
579 
580         spin_unlock_bh(&zswap_pools_lock);
581 
582         if (!pool)
583                 pool = zswap_pool_create(type, compressor);
584         else {
585                 /*
586                  * Restore the initial ref dropped by percpu_ref_kill()
587                  * when the pool was decommissioned and switch it again
588                  * to percpu mode.
589                  */
590                 percpu_ref_resurrect(&pool->ref);
591 
592                 /* Drop the ref from zswap_pool_find_get(). */
593                 zswap_pool_put(pool);
594         }
595 
596         if (pool)
597                 ret = param_set_charp(s, kp);
598         else
599                 ret = -EINVAL;
600 
601         spin_lock_bh(&zswap_pools_lock);
602 
603         if (!ret) {
604                 put_pool = zswap_pool_current();
605                 list_add_rcu(&pool->list, &zswap_pools);
606                 zswap_has_pool = true;
607         } else if (pool) {
608                 /* add the possibly pre-existing pool to the end of the pools
609                  * list; if it's new (and empty) then it'll be removed and
610                  * destroyed by the put after we drop the lock
611                  */
612                 list_add_tail_rcu(&pool->list, &zswap_pools);
613                 put_pool = pool;
614         }
615 
616         spin_unlock_bh(&zswap_pools_lock);
617 
618         if (!zswap_has_pool && !pool) {
619                 /* if initial pool creation failed, and this pool creation also
620                  * failed, maybe both compressor and zpool params were bad.
621                  * Allow changing this param, so pool creation will succeed
622                  * when the other param is changed. We already verified this
623                  * param is ok in the zpool_has_pool() or crypto_has_acomp()
624                  * checks above.
625                  */
626                 ret = param_set_charp(s, kp);
627         }
628 
629         /* drop the ref from either the old current pool,
630          * or the new pool we failed to add
631          */
632         if (put_pool)
633                 percpu_ref_kill(&put_pool->ref);
634 
635         return ret;
636 }
637 
638 static int zswap_compressor_param_set(const char *val,
639                                       const struct kernel_param *kp)
640 {
641         return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
642 }
643 
644 static int zswap_zpool_param_set(const char *val,
645                                  const struct kernel_param *kp)
646 {
647         return __zswap_param_set(val, kp, NULL, zswap_compressor);
648 }
649 
650 static int zswap_enabled_param_set(const char *val,
651                                    const struct kernel_param *kp)
652 {
653         int ret = -ENODEV;
654 
655         /* if this is load-time (pre-init) param setting, only set param. */
656         if (system_state != SYSTEM_RUNNING)
657                 return param_set_bool(val, kp);
658 
659         mutex_lock(&zswap_init_lock);
660         switch (zswap_init_state) {
661         case ZSWAP_UNINIT:
662                 if (zswap_setup())
663                         break;
664                 fallthrough;
665         case ZSWAP_INIT_SUCCEED:
666                 if (!zswap_has_pool)
667                         pr_err("can't enable, no pool configured\n");
668                 else
669                         ret = param_set_bool(val, kp);
670                 break;
671         case ZSWAP_INIT_FAILED:
672                 pr_err("can't enable, initialization failed\n");
673         }
674         mutex_unlock(&zswap_init_lock);
675 
676         return ret;
677 }
678 
679 /*********************************
680 * lru functions
681 **********************************/
682 
683 /* should be called under RCU */
684 #ifdef CONFIG_MEMCG
685 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
686 {
687         return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
688 }
689 #else
690 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
691 {
692         return NULL;
693 }
694 #endif
695 
696 static inline int entry_to_nid(struct zswap_entry *entry)
697 {
698         return page_to_nid(virt_to_page(entry));
699 }
700 
701 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
702 {
703         atomic_long_t *nr_zswap_protected;
704         unsigned long lru_size, old, new;
705         int nid = entry_to_nid(entry);
706         struct mem_cgroup *memcg;
707         struct lruvec *lruvec;
708 
709         /*
710          * Note that it is safe to use rcu_read_lock() here, even in the face of
711          * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
712          * used in list_lru lookup, only two scenarios are possible:
713          *
714          * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
715          *    new entry will be reparented to memcg's parent's list_lru.
716          * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
717          *    new entry will be added directly to memcg's parent's list_lru.
718          *
719          * Similar reasoning holds for list_lru_del().
720          */
721         rcu_read_lock();
722         memcg = mem_cgroup_from_entry(entry);
723         /* will always succeed */
724         list_lru_add(list_lru, &entry->lru, nid, memcg);
725 
726         /* Update the protection area */
727         lru_size = list_lru_count_one(list_lru, nid, memcg);
728         lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
729         nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
730         old = atomic_long_inc_return(nr_zswap_protected);
731         /*
732          * Decay to avoid overflow and adapt to changing workloads.
733          * This is based on LRU reclaim cost decaying heuristics.
734          */
735         do {
736                 new = old > lru_size / 4 ? old / 2 : old;
737         } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
738         rcu_read_unlock();
739 }
740 
741 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
742 {
743         int nid = entry_to_nid(entry);
744         struct mem_cgroup *memcg;
745 
746         rcu_read_lock();
747         memcg = mem_cgroup_from_entry(entry);
748         /* will always succeed */
749         list_lru_del(list_lru, &entry->lru, nid, memcg);
750         rcu_read_unlock();
751 }
752 
753 void zswap_lruvec_state_init(struct lruvec *lruvec)
754 {
755         atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
756 }
757 
758 void zswap_folio_swapin(struct folio *folio)
759 {
760         struct lruvec *lruvec;
761 
762         if (folio) {
763                 lruvec = folio_lruvec(folio);
764                 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
765         }
766 }
767 
768 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
769 {
770         /* lock out zswap shrinker walking memcg tree */
771         spin_lock(&zswap_shrink_lock);
772         if (zswap_next_shrink == memcg)
773                 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
774         spin_unlock(&zswap_shrink_lock);
775 }
776 
777 /*********************************
778 * zswap entry functions
779 **********************************/
780 static struct kmem_cache *zswap_entry_cache;
781 
782 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
783 {
784         struct zswap_entry *entry;
785         entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
786         if (!entry)
787                 return NULL;
788         return entry;
789 }
790 
791 static void zswap_entry_cache_free(struct zswap_entry *entry)
792 {
793         kmem_cache_free(zswap_entry_cache, entry);
794 }
795 
796 /*
797  * Carries out the common pattern of freeing and entry's zpool allocation,
798  * freeing the entry itself, and decrementing the number of stored pages.
799  */
800 static void zswap_entry_free(struct zswap_entry *entry)
801 {
802         if (!entry->length)
803                 atomic_dec(&zswap_same_filled_pages);
804         else {
805                 zswap_lru_del(&zswap_list_lru, entry);
806                 zpool_free(entry->pool->zpool, entry->handle);
807                 zswap_pool_put(entry->pool);
808         }
809         if (entry->objcg) {
810                 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
811                 obj_cgroup_put(entry->objcg);
812         }
813         zswap_entry_cache_free(entry);
814         atomic_dec(&zswap_stored_pages);
815 }
816 
817 /*********************************
818 * compressed storage functions
819 **********************************/
820 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
821 {
822         struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
823         struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
824         struct crypto_acomp *acomp;
825         struct acomp_req *req;
826         int ret;
827 
828         mutex_init(&acomp_ctx->mutex);
829 
830         acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
831         if (!acomp_ctx->buffer)
832                 return -ENOMEM;
833 
834         acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
835         if (IS_ERR(acomp)) {
836                 pr_err("could not alloc crypto acomp %s : %ld\n",
837                                 pool->tfm_name, PTR_ERR(acomp));
838                 ret = PTR_ERR(acomp);
839                 goto acomp_fail;
840         }
841         acomp_ctx->acomp = acomp;
842         acomp_ctx->is_sleepable = acomp_is_async(acomp);
843 
844         req = acomp_request_alloc(acomp_ctx->acomp);
845         if (!req) {
846                 pr_err("could not alloc crypto acomp_request %s\n",
847                        pool->tfm_name);
848                 ret = -ENOMEM;
849                 goto req_fail;
850         }
851         acomp_ctx->req = req;
852 
853         crypto_init_wait(&acomp_ctx->wait);
854         /*
855          * if the backend of acomp is async zip, crypto_req_done() will wakeup
856          * crypto_wait_req(); if the backend of acomp is scomp, the callback
857          * won't be called, crypto_wait_req() will return without blocking.
858          */
859         acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
860                                    crypto_req_done, &acomp_ctx->wait);
861 
862         return 0;
863 
864 req_fail:
865         crypto_free_acomp(acomp_ctx->acomp);
866 acomp_fail:
867         kfree(acomp_ctx->buffer);
868         return ret;
869 }
870 
871 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
872 {
873         struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
874         struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
875 
876         if (!IS_ERR_OR_NULL(acomp_ctx)) {
877                 if (!IS_ERR_OR_NULL(acomp_ctx->req))
878                         acomp_request_free(acomp_ctx->req);
879                 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
880                         crypto_free_acomp(acomp_ctx->acomp);
881                 kfree(acomp_ctx->buffer);
882         }
883 
884         return 0;
885 }
886 
887 static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
888 {
889         struct crypto_acomp_ctx *acomp_ctx;
890         struct scatterlist input, output;
891         int comp_ret = 0, alloc_ret = 0;
892         unsigned int dlen = PAGE_SIZE;
893         unsigned long handle;
894         struct zpool *zpool;
895         char *buf;
896         gfp_t gfp;
897         u8 *dst;
898 
899         acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
900 
901         mutex_lock(&acomp_ctx->mutex);
902 
903         dst = acomp_ctx->buffer;
904         sg_init_table(&input, 1);
905         sg_set_folio(&input, folio, PAGE_SIZE, 0);
906 
907         /*
908          * We need PAGE_SIZE * 2 here since there maybe over-compression case,
909          * and hardware-accelerators may won't check the dst buffer size, so
910          * giving the dst buffer with enough length to avoid buffer overflow.
911          */
912         sg_init_one(&output, dst, PAGE_SIZE * 2);
913         acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
914 
915         /*
916          * it maybe looks a little bit silly that we send an asynchronous request,
917          * then wait for its completion synchronously. This makes the process look
918          * synchronous in fact.
919          * Theoretically, acomp supports users send multiple acomp requests in one
920          * acomp instance, then get those requests done simultaneously. but in this
921          * case, zswap actually does store and load page by page, there is no
922          * existing method to send the second page before the first page is done
923          * in one thread doing zwap.
924          * but in different threads running on different cpu, we have different
925          * acomp instance, so multiple threads can do (de)compression in parallel.
926          */
927         comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
928         dlen = acomp_ctx->req->dlen;
929         if (comp_ret)
930                 goto unlock;
931 
932         zpool = entry->pool->zpool;
933         gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
934         if (zpool_malloc_support_movable(zpool))
935                 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
936         alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
937         if (alloc_ret)
938                 goto unlock;
939 
940         buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
941         memcpy(buf, dst, dlen);
942         zpool_unmap_handle(zpool, handle);
943 
944         entry->handle = handle;
945         entry->length = dlen;
946 
947 unlock:
948         if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
949                 zswap_reject_compress_poor++;
950         else if (comp_ret)
951                 zswap_reject_compress_fail++;
952         else if (alloc_ret)
953                 zswap_reject_alloc_fail++;
954 
955         mutex_unlock(&acomp_ctx->mutex);
956         return comp_ret == 0 && alloc_ret == 0;
957 }
958 
959 static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
960 {
961         struct zpool *zpool = entry->pool->zpool;
962         struct scatterlist input, output;
963         struct crypto_acomp_ctx *acomp_ctx;
964         u8 *src;
965 
966         acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
967         mutex_lock(&acomp_ctx->mutex);
968 
969         src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
970         /*
971          * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
972          * to do crypto_acomp_decompress() which might sleep. In such cases, we must
973          * resort to copying the buffer to a temporary one.
974          * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
975          * such as a kmap address of high memory or even ever a vmap address.
976          * However, sg_init_one is only equipped to handle linearly mapped low memory.
977          * In such cases, we also must copy the buffer to a temporary and lowmem one.
978          */
979         if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
980             !virt_addr_valid(src)) {
981                 memcpy(acomp_ctx->buffer, src, entry->length);
982                 src = acomp_ctx->buffer;
983                 zpool_unmap_handle(zpool, entry->handle);
984         }
985 
986         sg_init_one(&input, src, entry->length);
987         sg_init_table(&output, 1);
988         sg_set_folio(&output, folio, PAGE_SIZE, 0);
989         acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
990         BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
991         BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
992         mutex_unlock(&acomp_ctx->mutex);
993 
994         if (src != acomp_ctx->buffer)
995                 zpool_unmap_handle(zpool, entry->handle);
996 }
997 
998 /*********************************
999 * writeback code
1000 **********************************/
1001 /*
1002  * Attempts to free an entry by adding a folio to the swap cache,
1003  * decompressing the entry data into the folio, and issuing a
1004  * bio write to write the folio back to the swap device.
1005  *
1006  * This can be thought of as a "resumed writeback" of the folio
1007  * to the swap device.  We are basically resuming the same swap
1008  * writeback path that was intercepted with the zswap_store()
1009  * in the first place.  After the folio has been decompressed into
1010  * the swap cache, the compressed version stored by zswap can be
1011  * freed.
1012  */
1013 static int zswap_writeback_entry(struct zswap_entry *entry,
1014                                  swp_entry_t swpentry)
1015 {
1016         struct xarray *tree;
1017         pgoff_t offset = swp_offset(swpentry);
1018         struct folio *folio;
1019         struct mempolicy *mpol;
1020         bool folio_was_allocated;
1021         struct writeback_control wbc = {
1022                 .sync_mode = WB_SYNC_NONE,
1023         };
1024 
1025         /* try to allocate swap cache folio */
1026         mpol = get_task_policy(current);
1027         folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1028                                 NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1029         if (!folio)
1030                 return -ENOMEM;
1031 
1032         /*
1033          * Found an existing folio, we raced with swapin or concurrent
1034          * shrinker. We generally writeback cold folios from zswap, and
1035          * swapin means the folio just became hot, so skip this folio.
1036          * For unlikely concurrent shrinker case, it will be unlinked
1037          * and freed when invalidated by the concurrent shrinker anyway.
1038          */
1039         if (!folio_was_allocated) {
1040                 folio_put(folio);
1041                 return -EEXIST;
1042         }
1043 
1044         /*
1045          * folio is locked, and the swapcache is now secured against
1046          * concurrent swapping to and from the slot, and concurrent
1047          * swapoff so we can safely dereference the zswap tree here.
1048          * Verify that the swap entry hasn't been invalidated and recycled
1049          * behind our backs, to avoid overwriting a new swap folio with
1050          * old compressed data. Only when this is successful can the entry
1051          * be dereferenced.
1052          */
1053         tree = swap_zswap_tree(swpentry);
1054         if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
1055                 delete_from_swap_cache(folio);
1056                 folio_unlock(folio);
1057                 folio_put(folio);
1058                 return -ENOMEM;
1059         }
1060 
1061         zswap_decompress(entry, folio);
1062 
1063         count_vm_event(ZSWPWB);
1064         if (entry->objcg)
1065                 count_objcg_event(entry->objcg, ZSWPWB);
1066 
1067         zswap_entry_free(entry);
1068 
1069         /* folio is up to date */
1070         folio_mark_uptodate(folio);
1071 
1072         /* move it to the tail of the inactive list after end_writeback */
1073         folio_set_reclaim(folio);
1074 
1075         /* start writeback */
1076         __swap_writepage(folio, &wbc);
1077         folio_put(folio);
1078 
1079         return 0;
1080 }
1081 
1082 /*********************************
1083 * shrinker functions
1084 **********************************/
1085 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
1086                                        spinlock_t *lock, void *arg)
1087 {
1088         struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1089         bool *encountered_page_in_swapcache = (bool *)arg;
1090         swp_entry_t swpentry;
1091         enum lru_status ret = LRU_REMOVED_RETRY;
1092         int writeback_result;
1093 
1094         /*
1095          * As soon as we drop the LRU lock, the entry can be freed by
1096          * a concurrent invalidation. This means the following:
1097          *
1098          * 1. We extract the swp_entry_t to the stack, allowing
1099          *    zswap_writeback_entry() to pin the swap entry and
1100          *    then validate the zwap entry against that swap entry's
1101          *    tree using pointer value comparison. Only when that
1102          *    is successful can the entry be dereferenced.
1103          *
1104          * 2. Usually, objects are taken off the LRU for reclaim. In
1105          *    this case this isn't possible, because if reclaim fails
1106          *    for whatever reason, we have no means of knowing if the
1107          *    entry is alive to put it back on the LRU.
1108          *
1109          *    So rotate it before dropping the lock. If the entry is
1110          *    written back or invalidated, the free path will unlink
1111          *    it. For failures, rotation is the right thing as well.
1112          *
1113          *    Temporary failures, where the same entry should be tried
1114          *    again immediately, almost never happen for this shrinker.
1115          *    We don't do any trylocking; -ENOMEM comes closest,
1116          *    but that's extremely rare and doesn't happen spuriously
1117          *    either. Don't bother distinguishing this case.
1118          */
1119         list_move_tail(item, &l->list);
1120 
1121         /*
1122          * Once the lru lock is dropped, the entry might get freed. The
1123          * swpentry is copied to the stack, and entry isn't deref'd again
1124          * until the entry is verified to still be alive in the tree.
1125          */
1126         swpentry = entry->swpentry;
1127 
1128         /*
1129          * It's safe to drop the lock here because we return either
1130          * LRU_REMOVED_RETRY or LRU_RETRY.
1131          */
1132         spin_unlock(lock);
1133 
1134         writeback_result = zswap_writeback_entry(entry, swpentry);
1135 
1136         if (writeback_result) {
1137                 zswap_reject_reclaim_fail++;
1138                 ret = LRU_RETRY;
1139 
1140                 /*
1141                  * Encountering a page already in swap cache is a sign that we are shrinking
1142                  * into the warmer region. We should terminate shrinking (if we're in the dynamic
1143                  * shrinker context).
1144                  */
1145                 if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1146                         ret = LRU_STOP;
1147                         *encountered_page_in_swapcache = true;
1148                 }
1149         } else {
1150                 zswap_written_back_pages++;
1151         }
1152 
1153         spin_lock(lock);
1154         return ret;
1155 }
1156 
1157 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1158                 struct shrink_control *sc)
1159 {
1160         struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
1161         unsigned long shrink_ret, nr_protected, lru_size;
1162         bool encountered_page_in_swapcache = false;
1163 
1164         if (!zswap_shrinker_enabled ||
1165                         !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1166                 sc->nr_scanned = 0;
1167                 return SHRINK_STOP;
1168         }
1169 
1170         nr_protected =
1171                 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1172         lru_size = list_lru_shrink_count(&zswap_list_lru, sc);
1173 
1174         /*
1175          * Abort if we are shrinking into the protected region.
1176          *
1177          * This short-circuiting is necessary because if we have too many multiple
1178          * concurrent reclaimers getting the freeable zswap object counts at the
1179          * same time (before any of them made reasonable progress), the total
1180          * number of reclaimed objects might be more than the number of unprotected
1181          * objects (i.e the reclaimers will reclaim into the protected area of the
1182          * zswap LRU).
1183          */
1184         if (nr_protected >= lru_size - sc->nr_to_scan) {
1185                 sc->nr_scanned = 0;
1186                 return SHRINK_STOP;
1187         }
1188 
1189         shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
1190                 &encountered_page_in_swapcache);
1191 
1192         if (encountered_page_in_swapcache)
1193                 return SHRINK_STOP;
1194 
1195         return shrink_ret ? shrink_ret : SHRINK_STOP;
1196 }
1197 
1198 static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1199                 struct shrink_control *sc)
1200 {
1201         struct mem_cgroup *memcg = sc->memcg;
1202         struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1203         unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
1204 
1205         if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1206                 return 0;
1207 
1208         /*
1209          * The shrinker resumes swap writeback, which will enter block
1210          * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
1211          * rules (may_enter_fs()), which apply on a per-folio basis.
1212          */
1213         if (!gfp_has_io_fs(sc->gfp_mask))
1214                 return 0;
1215 
1216         /*
1217          * For memcg, use the cgroup-wide ZSWAP stats since we don't
1218          * have them per-node and thus per-lruvec. Careful if memcg is
1219          * runtime-disabled: we can get sc->memcg == NULL, which is ok
1220          * for the lruvec, but not for memcg_page_state().
1221          *
1222          * Without memcg, use the zswap pool-wide metrics.
1223          */
1224         if (!mem_cgroup_disabled()) {
1225                 mem_cgroup_flush_stats(memcg);
1226                 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1227                 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1228         } else {
1229                 nr_backing = zswap_total_pages();
1230                 nr_stored = atomic_read(&zswap_stored_pages);
1231         }
1232 
1233         if (!nr_stored)
1234                 return 0;
1235 
1236         nr_protected =
1237                 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1238         nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
1239         /*
1240          * Subtract the lru size by an estimate of the number of pages
1241          * that should be protected.
1242          */
1243         nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
1244 
1245         /*
1246          * Scale the number of freeable pages by the memory saving factor.
1247          * This ensures that the better zswap compresses memory, the fewer
1248          * pages we will evict to swap (as it will otherwise incur IO for
1249          * relatively small memory saving).
1250          *
1251          * The memory saving factor calculated here takes same-filled pages into
1252          * account, but those are not freeable since they almost occupy no
1253          * space. Hence, we may scale nr_freeable down a little bit more than we
1254          * should if we have a lot of same-filled pages.
1255          */
1256         return mult_frac(nr_freeable, nr_backing, nr_stored);
1257 }
1258 
1259 static struct shrinker *zswap_alloc_shrinker(void)
1260 {
1261         struct shrinker *shrinker;
1262 
1263         shrinker =
1264                 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
1265         if (!shrinker)
1266                 return NULL;
1267 
1268         shrinker->scan_objects = zswap_shrinker_scan;
1269         shrinker->count_objects = zswap_shrinker_count;
1270         shrinker->batch = 0;
1271         shrinker->seeks = DEFAULT_SEEKS;
1272         return shrinker;
1273 }
1274 
1275 static int shrink_memcg(struct mem_cgroup *memcg)
1276 {
1277         int nid, shrunk = 0;
1278 
1279         if (!mem_cgroup_zswap_writeback_enabled(memcg))
1280                 return -EINVAL;
1281 
1282         /*
1283          * Skip zombies because their LRUs are reparented and we would be
1284          * reclaiming from the parent instead of the dead memcg.
1285          */
1286         if (memcg && !mem_cgroup_online(memcg))
1287                 return -ENOENT;
1288 
1289         for_each_node_state(nid, N_NORMAL_MEMORY) {
1290                 unsigned long nr_to_walk = 1;
1291 
1292                 shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
1293                                             &shrink_memcg_cb, NULL, &nr_to_walk);
1294         }
1295         return shrunk ? 0 : -EAGAIN;
1296 }
1297 
1298 static void shrink_worker(struct work_struct *w)
1299 {
1300         struct mem_cgroup *memcg;
1301         int ret, failures = 0;
1302         unsigned long thr;
1303 
1304         /* Reclaim down to the accept threshold */
1305         thr = zswap_accept_thr_pages();
1306 
1307         /* global reclaim will select cgroup in a round-robin fashion. */
1308         do {
1309                 spin_lock(&zswap_shrink_lock);
1310                 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
1311                 memcg = zswap_next_shrink;
1312 
1313                 /*
1314                  * We need to retry if we have gone through a full round trip, or if we
1315                  * got an offline memcg (or else we risk undoing the effect of the
1316                  * zswap memcg offlining cleanup callback). This is not catastrophic
1317                  * per se, but it will keep the now offlined memcg hostage for a while.
1318                  *
1319                  * Note that if we got an online memcg, we will keep the extra
1320                  * reference in case the original reference obtained by mem_cgroup_iter
1321                  * is dropped by the zswap memcg offlining callback, ensuring that the
1322                  * memcg is not killed when we are reclaiming.
1323                  */
1324                 if (!memcg) {
1325                         spin_unlock(&zswap_shrink_lock);
1326                         if (++failures == MAX_RECLAIM_RETRIES)
1327                                 break;
1328 
1329                         goto resched;
1330                 }
1331 
1332                 if (!mem_cgroup_tryget_online(memcg)) {
1333                         /* drop the reference from mem_cgroup_iter() */
1334                         mem_cgroup_iter_break(NULL, memcg);
1335                         zswap_next_shrink = NULL;
1336                         spin_unlock(&zswap_shrink_lock);
1337 
1338                         if (++failures == MAX_RECLAIM_RETRIES)
1339                                 break;
1340 
1341                         goto resched;
1342                 }
1343                 spin_unlock(&zswap_shrink_lock);
1344 
1345                 ret = shrink_memcg(memcg);
1346                 /* drop the extra reference */
1347                 mem_cgroup_put(memcg);
1348 
1349                 if (ret == -EINVAL)
1350                         break;
1351                 if (ret && ++failures == MAX_RECLAIM_RETRIES)
1352                         break;
1353 resched:
1354                 cond_resched();
1355         } while (zswap_total_pages() > thr);
1356 }
1357 
1358 /*********************************
1359 * same-filled functions
1360 **********************************/
1361 static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value)
1362 {
1363         unsigned long *data;
1364         unsigned long val;
1365         unsigned int pos, last_pos = PAGE_SIZE / sizeof(*data) - 1;
1366         bool ret = false;
1367 
1368         data = kmap_local_folio(folio, 0);
1369         val = data[0];
1370 
1371         if (val != data[last_pos])
1372                 goto out;
1373 
1374         for (pos = 1; pos < last_pos; pos++) {
1375                 if (val != data[pos])
1376                         goto out;
1377         }
1378 
1379         *value = val;
1380         ret = true;
1381 out:
1382         kunmap_local(data);
1383         return ret;
1384 }
1385 
1386 static void zswap_fill_folio(struct folio *folio, unsigned long value)
1387 {
1388         unsigned long *data = kmap_local_folio(folio, 0);
1389 
1390         memset_l(data, value, PAGE_SIZE / sizeof(unsigned long));
1391         kunmap_local(data);
1392 }
1393 
1394 /*********************************
1395 * main API
1396 **********************************/
1397 bool zswap_store(struct folio *folio)
1398 {
1399         swp_entry_t swp = folio->swap;
1400         pgoff_t offset = swp_offset(swp);
1401         struct xarray *tree = swap_zswap_tree(swp);
1402         struct zswap_entry *entry, *old;
1403         struct obj_cgroup *objcg = NULL;
1404         struct mem_cgroup *memcg = NULL;
1405         unsigned long value;
1406 
1407         VM_WARN_ON_ONCE(!folio_test_locked(folio));
1408         VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1409 
1410         /* Large folios aren't supported */
1411         if (folio_test_large(folio))
1412                 return false;
1413 
1414         if (!zswap_enabled)
1415                 goto check_old;
1416 
1417         /* Check cgroup limits */
1418         objcg = get_obj_cgroup_from_folio(folio);
1419         if (objcg && !obj_cgroup_may_zswap(objcg)) {
1420                 memcg = get_mem_cgroup_from_objcg(objcg);
1421                 if (shrink_memcg(memcg)) {
1422                         mem_cgroup_put(memcg);
1423                         goto reject;
1424                 }
1425                 mem_cgroup_put(memcg);
1426         }
1427 
1428         if (zswap_check_limits())
1429                 goto reject;
1430 
1431         /* allocate entry */
1432         entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
1433         if (!entry) {
1434                 zswap_reject_kmemcache_fail++;
1435                 goto reject;
1436         }
1437 
1438         if (zswap_is_folio_same_filled(folio, &value)) {
1439                 entry->length = 0;
1440                 entry->value = value;
1441                 atomic_inc(&zswap_same_filled_pages);
1442                 goto store_entry;
1443         }
1444 
1445         /* if entry is successfully added, it keeps the reference */
1446         entry->pool = zswap_pool_current_get();
1447         if (!entry->pool)
1448                 goto freepage;
1449 
1450         if (objcg) {
1451                 memcg = get_mem_cgroup_from_objcg(objcg);
1452                 if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
1453                         mem_cgroup_put(memcg);
1454                         goto put_pool;
1455                 }
1456                 mem_cgroup_put(memcg);
1457         }
1458 
1459         if (!zswap_compress(folio, entry))
1460                 goto put_pool;
1461 
1462 store_entry:
1463         entry->swpentry = swp;
1464         entry->objcg = objcg;
1465 
1466         old = xa_store(tree, offset, entry, GFP_KERNEL);
1467         if (xa_is_err(old)) {
1468                 int err = xa_err(old);
1469 
1470                 WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err);
1471                 zswap_reject_alloc_fail++;
1472                 goto store_failed;
1473         }
1474 
1475         /*
1476          * We may have had an existing entry that became stale when
1477          * the folio was redirtied and now the new version is being
1478          * swapped out. Get rid of the old.
1479          */
1480         if (old)
1481                 zswap_entry_free(old);
1482 
1483         if (objcg) {
1484                 obj_cgroup_charge_zswap(objcg, entry->length);
1485                 count_objcg_event(objcg, ZSWPOUT);
1486         }
1487 
1488         /*
1489          * We finish initializing the entry while it's already in xarray.
1490          * This is safe because:
1491          *
1492          * 1. Concurrent stores and invalidations are excluded by folio lock.
1493          *
1494          * 2. Writeback is excluded by the entry not being on the LRU yet.
1495          *    The publishing order matters to prevent writeback from seeing
1496          *    an incoherent entry.
1497          */
1498         if (entry->length) {
1499                 INIT_LIST_HEAD(&entry->lru);
1500                 zswap_lru_add(&zswap_list_lru, entry);
1501         }
1502 
1503         /* update stats */
1504         atomic_inc(&zswap_stored_pages);
1505         count_vm_event(ZSWPOUT);
1506 
1507         return true;
1508 
1509 store_failed:
1510         if (!entry->length)
1511                 atomic_dec(&zswap_same_filled_pages);
1512         else {
1513                 zpool_free(entry->pool->zpool, entry->handle);
1514 put_pool:
1515                 zswap_pool_put(entry->pool);
1516         }
1517 freepage:
1518         zswap_entry_cache_free(entry);
1519 reject:
1520         obj_cgroup_put(objcg);
1521         if (zswap_pool_reached_full)
1522                 queue_work(shrink_wq, &zswap_shrink_work);
1523 check_old:
1524         /*
1525          * If the zswap store fails or zswap is disabled, we must invalidate the
1526          * possibly stale entry which was previously stored at this offset.
1527          * Otherwise, writeback could overwrite the new data in the swapfile.
1528          */
1529         entry = xa_erase(tree, offset);
1530         if (entry)
1531                 zswap_entry_free(entry);
1532         return false;
1533 }
1534 
1535 bool zswap_load(struct folio *folio)
1536 {
1537         swp_entry_t swp = folio->swap;
1538         pgoff_t offset = swp_offset(swp);
1539         bool swapcache = folio_test_swapcache(folio);
1540         struct xarray *tree = swap_zswap_tree(swp);
1541         struct zswap_entry *entry;
1542 
1543         VM_WARN_ON_ONCE(!folio_test_locked(folio));
1544 
1545         if (zswap_never_enabled())
1546                 return false;
1547 
1548         /*
1549          * Large folios should not be swapped in while zswap is being used, as
1550          * they are not properly handled. Zswap does not properly load large
1551          * folios, and a large folio may only be partially in zswap.
1552          *
1553          * Return true without marking the folio uptodate so that an IO error is
1554          * emitted (e.g. do_swap_page() will sigbus).
1555          */
1556         if (WARN_ON_ONCE(folio_test_large(folio)))
1557                 return true;
1558 
1559         /*
1560          * When reading into the swapcache, invalidate our entry. The
1561          * swapcache can be the authoritative owner of the page and
1562          * its mappings, and the pressure that results from having two
1563          * in-memory copies outweighs any benefits of caching the
1564          * compression work.
1565          *
1566          * (Most swapins go through the swapcache. The notable
1567          * exception is the singleton fault on SWP_SYNCHRONOUS_IO
1568          * files, which reads into a private page and may free it if
1569          * the fault fails. We remain the primary owner of the entry.)
1570          */
1571         if (swapcache)
1572                 entry = xa_erase(tree, offset);
1573         else
1574                 entry = xa_load(tree, offset);
1575 
1576         if (!entry)
1577                 return false;
1578 
1579         if (entry->length)
1580                 zswap_decompress(entry, folio);
1581         else
1582                 zswap_fill_folio(folio, entry->value);
1583 
1584         count_vm_event(ZSWPIN);
1585         if (entry->objcg)
1586                 count_objcg_event(entry->objcg, ZSWPIN);
1587 
1588         if (swapcache) {
1589                 zswap_entry_free(entry);
1590                 folio_mark_dirty(folio);
1591         }
1592 
1593         folio_mark_uptodate(folio);
1594         return true;
1595 }
1596 
1597 void zswap_invalidate(swp_entry_t swp)
1598 {
1599         pgoff_t offset = swp_offset(swp);
1600         struct xarray *tree = swap_zswap_tree(swp);
1601         struct zswap_entry *entry;
1602 
1603         entry = xa_erase(tree, offset);
1604         if (entry)
1605                 zswap_entry_free(entry);
1606 }
1607 
1608 int zswap_swapon(int type, unsigned long nr_pages)
1609 {
1610         struct xarray *trees, *tree;
1611         unsigned int nr, i;
1612 
1613         nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
1614         trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
1615         if (!trees) {
1616                 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1617                 return -ENOMEM;
1618         }
1619 
1620         for (i = 0; i < nr; i++)
1621                 xa_init(trees + i);
1622 
1623         nr_zswap_trees[type] = nr;
1624         zswap_trees[type] = trees;
1625         return 0;
1626 }
1627 
1628 void zswap_swapoff(int type)
1629 {
1630         struct xarray *trees = zswap_trees[type];
1631         unsigned int i;
1632 
1633         if (!trees)
1634                 return;
1635 
1636         /* try_to_unuse() invalidated all the entries already */
1637         for (i = 0; i < nr_zswap_trees[type]; i++)
1638                 WARN_ON_ONCE(!xa_empty(trees + i));
1639 
1640         kvfree(trees);
1641         nr_zswap_trees[type] = 0;
1642         zswap_trees[type] = NULL;
1643 }
1644 
1645 /*********************************
1646 * debugfs functions
1647 **********************************/
1648 #ifdef CONFIG_DEBUG_FS
1649 #include <linux/debugfs.h>
1650 
1651 static struct dentry *zswap_debugfs_root;
1652 
1653 static int debugfs_get_total_size(void *data, u64 *val)
1654 {
1655         *val = zswap_total_pages() * PAGE_SIZE;
1656         return 0;
1657 }
1658 DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n");
1659 
1660 static int zswap_debugfs_init(void)
1661 {
1662         if (!debugfs_initialized())
1663                 return -ENODEV;
1664 
1665         zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1666 
1667         debugfs_create_u64("pool_limit_hit", 0444,
1668                            zswap_debugfs_root, &zswap_pool_limit_hit);
1669         debugfs_create_u64("reject_reclaim_fail", 0444,
1670                            zswap_debugfs_root, &zswap_reject_reclaim_fail);
1671         debugfs_create_u64("reject_alloc_fail", 0444,
1672                            zswap_debugfs_root, &zswap_reject_alloc_fail);
1673         debugfs_create_u64("reject_kmemcache_fail", 0444,
1674                            zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1675         debugfs_create_u64("reject_compress_fail", 0444,
1676                            zswap_debugfs_root, &zswap_reject_compress_fail);
1677         debugfs_create_u64("reject_compress_poor", 0444,
1678                            zswap_debugfs_root, &zswap_reject_compress_poor);
1679         debugfs_create_u64("written_back_pages", 0444,
1680                            zswap_debugfs_root, &zswap_written_back_pages);
1681         debugfs_create_file("pool_total_size", 0444,
1682                             zswap_debugfs_root, NULL, &total_size_fops);
1683         debugfs_create_atomic_t("stored_pages", 0444,
1684                                 zswap_debugfs_root, &zswap_stored_pages);
1685         debugfs_create_atomic_t("same_filled_pages", 0444,
1686                                 zswap_debugfs_root, &zswap_same_filled_pages);
1687 
1688         return 0;
1689 }
1690 #else
1691 static int zswap_debugfs_init(void)
1692 {
1693         return 0;
1694 }
1695 #endif
1696 
1697 /*********************************
1698 * module init and exit
1699 **********************************/
1700 static int zswap_setup(void)
1701 {
1702         struct zswap_pool *pool;
1703         int ret;
1704 
1705         zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1706         if (!zswap_entry_cache) {
1707                 pr_err("entry cache creation failed\n");
1708                 goto cache_fail;
1709         }
1710 
1711         ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1712                                       "mm/zswap_pool:prepare",
1713                                       zswap_cpu_comp_prepare,
1714                                       zswap_cpu_comp_dead);
1715         if (ret)
1716                 goto hp_fail;
1717 
1718         shrink_wq = alloc_workqueue("zswap-shrink",
1719                         WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
1720         if (!shrink_wq)
1721                 goto shrink_wq_fail;
1722 
1723         zswap_shrinker = zswap_alloc_shrinker();
1724         if (!zswap_shrinker)
1725                 goto shrinker_fail;
1726         if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
1727                 goto lru_fail;
1728         shrinker_register(zswap_shrinker);
1729 
1730         INIT_WORK(&zswap_shrink_work, shrink_worker);
1731 
1732         pool = __zswap_pool_create_fallback();
1733         if (pool) {
1734                 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1735                         zpool_get_type(pool->zpool));
1736                 list_add(&pool->list, &zswap_pools);
1737                 zswap_has_pool = true;
1738                 static_branch_enable(&zswap_ever_enabled);
1739         } else {
1740                 pr_err("pool creation failed\n");
1741                 zswap_enabled = false;
1742         }
1743 
1744         if (zswap_debugfs_init())
1745                 pr_warn("debugfs initialization failed\n");
1746         zswap_init_state = ZSWAP_INIT_SUCCEED;
1747         return 0;
1748 
1749 lru_fail:
1750         shrinker_free(zswap_shrinker);
1751 shrinker_fail:
1752         destroy_workqueue(shrink_wq);
1753 shrink_wq_fail:
1754         cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE);
1755 hp_fail:
1756         kmem_cache_destroy(zswap_entry_cache);
1757 cache_fail:
1758         /* if built-in, we aren't unloaded on failure; don't allow use */
1759         zswap_init_state = ZSWAP_INIT_FAILED;
1760         zswap_enabled = false;
1761         return -ENOMEM;
1762 }
1763 
1764 static int __init zswap_init(void)
1765 {
1766         if (!zswap_enabled)
1767                 return 0;
1768         return zswap_setup();
1769 }
1770 /* must be late so crypto has time to come up */
1771 late_initcall(zswap_init);
1772 
1773 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1774 MODULE_DESCRIPTION("Compressed cache for swap pages");
1775 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php