~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/erofs/zdata.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Copyright (C) 2018 HUAWEI, Inc.
  4  *             https://www.huawei.com/
  5  * Copyright (C) 2022 Alibaba Cloud
  6  */
  7 #include "compress.h"
  8 #include <linux/psi.h>
  9 #include <linux/cpuhotplug.h>
 10 #include <trace/events/erofs.h>
 11 
 12 #define Z_EROFS_PCLUSTER_MAX_PAGES      (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
 13 #define Z_EROFS_INLINE_BVECS            2
 14 
 15 /*
 16  * let's leave a type here in case of introducing
 17  * another tagged pointer later.
 18  */
 19 typedef void *z_erofs_next_pcluster_t;
 20 
 21 struct z_erofs_bvec {
 22         struct page *page;
 23         int offset;
 24         unsigned int end;
 25 };
 26 
 27 #define __Z_EROFS_BVSET(name, total) \
 28 struct name { \
 29         /* point to the next page which contains the following bvecs */ \
 30         struct page *nextpage; \
 31         struct z_erofs_bvec bvec[total]; \
 32 }
 33 __Z_EROFS_BVSET(z_erofs_bvset,);
 34 __Z_EROFS_BVSET(z_erofs_bvset_inline, Z_EROFS_INLINE_BVECS);
 35 
 36 /*
 37  * Structure fields follow one of the following exclusion rules.
 38  *
 39  * I: Modifiable by initialization/destruction paths and read-only
 40  *    for everyone else;
 41  *
 42  * L: Field should be protected by the pcluster lock;
 43  *
 44  * A: Field should be accessed / updated in atomic for parallelized code.
 45  */
 46 struct z_erofs_pcluster {
 47         struct erofs_workgroup obj;
 48         struct mutex lock;
 49 
 50         /* A: point to next chained pcluster or TAILs */
 51         z_erofs_next_pcluster_t next;
 52 
 53         /* L: the maximum decompression size of this round */
 54         unsigned int length;
 55 
 56         /* L: total number of bvecs */
 57         unsigned int vcnt;
 58 
 59         /* I: pcluster size (compressed size) in bytes */
 60         unsigned int pclustersize;
 61 
 62         /* I: page offset of start position of decompression */
 63         unsigned short pageofs_out;
 64 
 65         /* I: page offset of inline compressed data */
 66         unsigned short pageofs_in;
 67 
 68         union {
 69                 /* L: inline a certain number of bvec for bootstrap */
 70                 struct z_erofs_bvset_inline bvset;
 71 
 72                 /* I: can be used to free the pcluster by RCU. */
 73                 struct rcu_head rcu;
 74         };
 75 
 76         /* I: compression algorithm format */
 77         unsigned char algorithmformat;
 78 
 79         /* L: whether partial decompression or not */
 80         bool partial;
 81 
 82         /* L: indicate several pageofs_outs or not */
 83         bool multibases;
 84 
 85         /* L: whether extra buffer allocations are best-effort */
 86         bool besteffort;
 87 
 88         /* A: compressed bvecs (can be cached or inplaced pages) */
 89         struct z_erofs_bvec compressed_bvecs[];
 90 };
 91 
 92 /* the end of a chain of pclusters */
 93 #define Z_EROFS_PCLUSTER_TAIL           ((void *) 0x700 + POISON_POINTER_DELTA)
 94 #define Z_EROFS_PCLUSTER_NIL            (NULL)
 95 
 96 struct z_erofs_decompressqueue {
 97         struct super_block *sb;
 98         atomic_t pending_bios;
 99         z_erofs_next_pcluster_t head;
100 
101         union {
102                 struct completion done;
103                 struct work_struct work;
104                 struct kthread_work kthread_work;
105         } u;
106         bool eio, sync;
107 };
108 
109 static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
110 {
111         return !pcl->obj.index;
112 }
113 
114 static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
115 {
116         return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT;
117 }
118 
119 #define MNGD_MAPPING(sbi)       ((sbi)->managed_cache->i_mapping)
120 static bool erofs_folio_is_managed(struct erofs_sb_info *sbi, struct folio *fo)
121 {
122         return fo->mapping == MNGD_MAPPING(sbi);
123 }
124 
125 /*
126  * bit 30: I/O error occurred on this folio
127  * bit 0 - 29: remaining parts to complete this folio
128  */
129 #define Z_EROFS_FOLIO_EIO                       (1 << 30)
130 
131 static void z_erofs_onlinefolio_init(struct folio *folio)
132 {
133         union {
134                 atomic_t o;
135                 void *v;
136         } u = { .o = ATOMIC_INIT(1) };
137 
138         folio->private = u.v;   /* valid only if file-backed folio is locked */
139 }
140 
141 static void z_erofs_onlinefolio_split(struct folio *folio)
142 {
143         atomic_inc((atomic_t *)&folio->private);
144 }
145 
146 static void z_erofs_onlinefolio_end(struct folio *folio, int err)
147 {
148         int orig, v;
149 
150         do {
151                 orig = atomic_read((atomic_t *)&folio->private);
152                 v = (orig - 1) | (err ? Z_EROFS_FOLIO_EIO : 0);
153         } while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig);
154 
155         if (v & ~Z_EROFS_FOLIO_EIO)
156                 return;
157         folio->private = 0;
158         folio_end_read(folio, !(v & Z_EROFS_FOLIO_EIO));
159 }
160 
161 #define Z_EROFS_ONSTACK_PAGES           32
162 
163 /*
164  * since pclustersize is variable for big pcluster feature, introduce slab
165  * pools implementation for different pcluster sizes.
166  */
167 struct z_erofs_pcluster_slab {
168         struct kmem_cache *slab;
169         unsigned int maxpages;
170         char name[48];
171 };
172 
173 #define _PCLP(n) { .maxpages = n }
174 
175 static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
176         _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
177         _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
178 };
179 
180 struct z_erofs_bvec_iter {
181         struct page *bvpage;
182         struct z_erofs_bvset *bvset;
183         unsigned int nr, cur;
184 };
185 
186 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
187 {
188         if (iter->bvpage)
189                 kunmap_local(iter->bvset);
190         return iter->bvpage;
191 }
192 
193 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
194 {
195         unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec;
196         /* have to access nextpage in advance, otherwise it will be unmapped */
197         struct page *nextpage = iter->bvset->nextpage;
198         struct page *oldpage;
199 
200         DBG_BUGON(!nextpage);
201         oldpage = z_erofs_bvec_iter_end(iter);
202         iter->bvpage = nextpage;
203         iter->bvset = kmap_local_page(nextpage);
204         iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec);
205         iter->cur = 0;
206         return oldpage;
207 }
208 
209 static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
210                                     struct z_erofs_bvset_inline *bvset,
211                                     unsigned int bootstrap_nr,
212                                     unsigned int cur)
213 {
214         *iter = (struct z_erofs_bvec_iter) {
215                 .nr = bootstrap_nr,
216                 .bvset = (struct z_erofs_bvset *)bvset,
217         };
218 
219         while (cur > iter->nr) {
220                 cur -= iter->nr;
221                 z_erofs_bvset_flip(iter);
222         }
223         iter->cur = cur;
224 }
225 
226 static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
227                                 struct z_erofs_bvec *bvec,
228                                 struct page **candidate_bvpage,
229                                 struct page **pagepool)
230 {
231         if (iter->cur >= iter->nr) {
232                 struct page *nextpage = *candidate_bvpage;
233 
234                 if (!nextpage) {
235                         nextpage = erofs_allocpage(pagepool, GFP_KERNEL);
236                         if (!nextpage)
237                                 return -ENOMEM;
238                         set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
239                 }
240                 DBG_BUGON(iter->bvset->nextpage);
241                 iter->bvset->nextpage = nextpage;
242                 z_erofs_bvset_flip(iter);
243 
244                 iter->bvset->nextpage = NULL;
245                 *candidate_bvpage = NULL;
246         }
247         iter->bvset->bvec[iter->cur++] = *bvec;
248         return 0;
249 }
250 
251 static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter,
252                                  struct z_erofs_bvec *bvec,
253                                  struct page **old_bvpage)
254 {
255         if (iter->cur == iter->nr)
256                 *old_bvpage = z_erofs_bvset_flip(iter);
257         else
258                 *old_bvpage = NULL;
259         *bvec = iter->bvset->bvec[iter->cur++];
260 }
261 
262 static void z_erofs_destroy_pcluster_pool(void)
263 {
264         int i;
265 
266         for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
267                 if (!pcluster_pool[i].slab)
268                         continue;
269                 kmem_cache_destroy(pcluster_pool[i].slab);
270                 pcluster_pool[i].slab = NULL;
271         }
272 }
273 
274 static int z_erofs_create_pcluster_pool(void)
275 {
276         struct z_erofs_pcluster_slab *pcs;
277         struct z_erofs_pcluster *a;
278         unsigned int size;
279 
280         for (pcs = pcluster_pool;
281              pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
282                 size = struct_size(a, compressed_bvecs, pcs->maxpages);
283 
284                 sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
285                 pcs->slab = kmem_cache_create(pcs->name, size, 0,
286                                               SLAB_RECLAIM_ACCOUNT, NULL);
287                 if (pcs->slab)
288                         continue;
289 
290                 z_erofs_destroy_pcluster_pool();
291                 return -ENOMEM;
292         }
293         return 0;
294 }
295 
296 static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size)
297 {
298         unsigned int nrpages = PAGE_ALIGN(size) >> PAGE_SHIFT;
299         struct z_erofs_pcluster_slab *pcs = pcluster_pool;
300 
301         for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
302                 struct z_erofs_pcluster *pcl;
303 
304                 if (nrpages > pcs->maxpages)
305                         continue;
306 
307                 pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL);
308                 if (!pcl)
309                         return ERR_PTR(-ENOMEM);
310                 pcl->pclustersize = size;
311                 return pcl;
312         }
313         return ERR_PTR(-EINVAL);
314 }
315 
316 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
317 {
318         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
319         int i;
320 
321         for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
322                 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
323 
324                 if (pclusterpages > pcs->maxpages)
325                         continue;
326 
327                 kmem_cache_free(pcs->slab, pcl);
328                 return;
329         }
330         DBG_BUGON(1);
331 }
332 
333 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
334 
335 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
336 static struct kthread_worker __rcu **z_erofs_pcpu_workers;
337 
338 static void erofs_destroy_percpu_workers(void)
339 {
340         struct kthread_worker *worker;
341         unsigned int cpu;
342 
343         for_each_possible_cpu(cpu) {
344                 worker = rcu_dereference_protected(
345                                         z_erofs_pcpu_workers[cpu], 1);
346                 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
347                 if (worker)
348                         kthread_destroy_worker(worker);
349         }
350         kfree(z_erofs_pcpu_workers);
351 }
352 
353 static struct kthread_worker *erofs_init_percpu_worker(int cpu)
354 {
355         struct kthread_worker *worker =
356                 kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u", cpu);
357 
358         if (IS_ERR(worker))
359                 return worker;
360         if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI))
361                 sched_set_fifo_low(worker->task);
362         return worker;
363 }
364 
365 static int erofs_init_percpu_workers(void)
366 {
367         struct kthread_worker *worker;
368         unsigned int cpu;
369 
370         z_erofs_pcpu_workers = kcalloc(num_possible_cpus(),
371                         sizeof(struct kthread_worker *), GFP_ATOMIC);
372         if (!z_erofs_pcpu_workers)
373                 return -ENOMEM;
374 
375         for_each_online_cpu(cpu) {      /* could miss cpu{off,on}line? */
376                 worker = erofs_init_percpu_worker(cpu);
377                 if (!IS_ERR(worker))
378                         rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
379         }
380         return 0;
381 }
382 #else
383 static inline void erofs_destroy_percpu_workers(void) {}
384 static inline int erofs_init_percpu_workers(void) { return 0; }
385 #endif
386 
387 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD)
388 static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock);
389 static enum cpuhp_state erofs_cpuhp_state;
390 
391 static int erofs_cpu_online(unsigned int cpu)
392 {
393         struct kthread_worker *worker, *old;
394 
395         worker = erofs_init_percpu_worker(cpu);
396         if (IS_ERR(worker))
397                 return PTR_ERR(worker);
398 
399         spin_lock(&z_erofs_pcpu_worker_lock);
400         old = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
401                         lockdep_is_held(&z_erofs_pcpu_worker_lock));
402         if (!old)
403                 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], worker);
404         spin_unlock(&z_erofs_pcpu_worker_lock);
405         if (old)
406                 kthread_destroy_worker(worker);
407         return 0;
408 }
409 
410 static int erofs_cpu_offline(unsigned int cpu)
411 {
412         struct kthread_worker *worker;
413 
414         spin_lock(&z_erofs_pcpu_worker_lock);
415         worker = rcu_dereference_protected(z_erofs_pcpu_workers[cpu],
416                         lockdep_is_held(&z_erofs_pcpu_worker_lock));
417         rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL);
418         spin_unlock(&z_erofs_pcpu_worker_lock);
419 
420         synchronize_rcu();
421         if (worker)
422                 kthread_destroy_worker(worker);
423         return 0;
424 }
425 
426 static int erofs_cpu_hotplug_init(void)
427 {
428         int state;
429 
430         state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
431                         "fs/erofs:online", erofs_cpu_online, erofs_cpu_offline);
432         if (state < 0)
433                 return state;
434 
435         erofs_cpuhp_state = state;
436         return 0;
437 }
438 
439 static void erofs_cpu_hotplug_destroy(void)
440 {
441         if (erofs_cpuhp_state)
442                 cpuhp_remove_state_nocalls(erofs_cpuhp_state);
443 }
444 #else /* !CONFIG_HOTPLUG_CPU || !CONFIG_EROFS_FS_PCPU_KTHREAD */
445 static inline int erofs_cpu_hotplug_init(void) { return 0; }
446 static inline void erofs_cpu_hotplug_destroy(void) {}
447 #endif
448 
449 void z_erofs_exit_subsystem(void)
450 {
451         erofs_cpu_hotplug_destroy();
452         erofs_destroy_percpu_workers();
453         destroy_workqueue(z_erofs_workqueue);
454         z_erofs_destroy_pcluster_pool();
455         z_erofs_exit_decompressor();
456 }
457 
458 int __init z_erofs_init_subsystem(void)
459 {
460         int err = z_erofs_init_decompressor();
461 
462         if (err)
463                 goto err_decompressor;
464 
465         err = z_erofs_create_pcluster_pool();
466         if (err)
467                 goto err_pcluster_pool;
468 
469         z_erofs_workqueue = alloc_workqueue("erofs_worker",
470                         WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus());
471         if (!z_erofs_workqueue) {
472                 err = -ENOMEM;
473                 goto err_workqueue_init;
474         }
475 
476         err = erofs_init_percpu_workers();
477         if (err)
478                 goto err_pcpu_worker;
479 
480         err = erofs_cpu_hotplug_init();
481         if (err < 0)
482                 goto err_cpuhp_init;
483         return err;
484 
485 err_cpuhp_init:
486         erofs_destroy_percpu_workers();
487 err_pcpu_worker:
488         destroy_workqueue(z_erofs_workqueue);
489 err_workqueue_init:
490         z_erofs_destroy_pcluster_pool();
491 err_pcluster_pool:
492         z_erofs_exit_decompressor();
493 err_decompressor:
494         return err;
495 }
496 
497 enum z_erofs_pclustermode {
498         Z_EROFS_PCLUSTER_INFLIGHT,
499         /*
500          * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
501          * could be dispatched into bypass queue later due to uptodated managed
502          * pages. All related online pages cannot be reused for inplace I/O (or
503          * bvpage) since it can be directly decoded without I/O submission.
504          */
505         Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
506         /*
507          * The pcluster was just linked to a decompression chain by us.  It can
508          * also be linked with the remaining pclusters, which means if the
509          * processing page is the tail page of a pcluster, this pcluster can
510          * safely use the whole page (since the previous pcluster is within the
511          * same chain) for in-place I/O, as illustrated below:
512          *  ___________________________________________________
513          * |  tail (partial) page  |    head (partial) page    |
514          * |  (of the current pcl) |   (of the previous pcl)   |
515          * |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____|
516          *
517          * [  (*) the page above can be used as inplace I/O.   ]
518          */
519         Z_EROFS_PCLUSTER_FOLLOWED,
520 };
521 
522 struct z_erofs_decompress_frontend {
523         struct inode *const inode;
524         struct erofs_map_blocks map;
525         struct z_erofs_bvec_iter biter;
526 
527         struct page *pagepool;
528         struct page *candidate_bvpage;
529         struct z_erofs_pcluster *pcl;
530         z_erofs_next_pcluster_t owned_head;
531         enum z_erofs_pclustermode mode;
532 
533         erofs_off_t headoffset;
534 
535         /* a pointer used to pick up inplace I/O pages */
536         unsigned int icur;
537 };
538 
539 #define DECOMPRESS_FRONTEND_INIT(__i) { \
540         .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
541         .mode = Z_EROFS_PCLUSTER_FOLLOWED }
542 
543 static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
544 {
545         unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
546 
547         if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
548                 return false;
549 
550         if (!(fe->map.m_flags & EROFS_MAP_FULL_MAPPED))
551                 return true;
552 
553         if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
554             fe->map.m_la < fe->headoffset)
555                 return true;
556 
557         return false;
558 }
559 
560 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
561 {
562         struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
563         struct z_erofs_pcluster *pcl = fe->pcl;
564         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
565         bool shouldalloc = z_erofs_should_alloc_cache(fe);
566         bool standalone = true;
567         /*
568          * optimistic allocation without direct reclaim since inplace I/O
569          * can be used if low memory otherwise.
570          */
571         gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
572                         __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
573         unsigned int i;
574 
575         if (i_blocksize(fe->inode) != PAGE_SIZE ||
576             fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
577                 return;
578 
579         for (i = 0; i < pclusterpages; ++i) {
580                 struct page *page, *newpage;
581 
582                 /* Inaccurate check w/o locking to avoid unneeded lookups */
583                 if (READ_ONCE(pcl->compressed_bvecs[i].page))
584                         continue;
585 
586                 page = find_get_page(mc, pcl->obj.index + i);
587                 if (!page) {
588                         /* I/O is needed, no possible to decompress directly */
589                         standalone = false;
590                         if (!shouldalloc)
591                                 continue;
592 
593                         /*
594                          * Try cached I/O if allocation succeeds or fallback to
595                          * in-place I/O instead to avoid any direct reclaim.
596                          */
597                         newpage = erofs_allocpage(&fe->pagepool, gfp);
598                         if (!newpage)
599                                 continue;
600                         set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
601                 }
602                 spin_lock(&pcl->obj.lockref.lock);
603                 if (!pcl->compressed_bvecs[i].page) {
604                         pcl->compressed_bvecs[i].page = page ? page : newpage;
605                         spin_unlock(&pcl->obj.lockref.lock);
606                         continue;
607                 }
608                 spin_unlock(&pcl->obj.lockref.lock);
609 
610                 if (page)
611                         put_page(page);
612                 else if (newpage)
613                         erofs_pagepool_add(&fe->pagepool, newpage);
614         }
615 
616         /*
617          * don't do inplace I/O if all compressed pages are available in
618          * managed cache since it can be moved to the bypass queue instead.
619          */
620         if (standalone)
621                 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
622 }
623 
624 /* (erofs_shrinker) disconnect cached encoded data with pclusters */
625 int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
626                                         struct erofs_workgroup *grp)
627 {
628         struct z_erofs_pcluster *const pcl =
629                 container_of(grp, struct z_erofs_pcluster, obj);
630         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
631         struct folio *folio;
632         int i;
633 
634         DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
635         /* Each cached folio contains one page unless bs > ps is supported */
636         for (i = 0; i < pclusterpages; ++i) {
637                 if (pcl->compressed_bvecs[i].page) {
638                         folio = page_folio(pcl->compressed_bvecs[i].page);
639                         /* Avoid reclaiming or migrating this folio */
640                         if (!folio_trylock(folio))
641                                 return -EBUSY;
642 
643                         if (!erofs_folio_is_managed(sbi, folio))
644                                 continue;
645                         pcl->compressed_bvecs[i].page = NULL;
646                         folio_detach_private(folio);
647                         folio_unlock(folio);
648                 }
649         }
650         return 0;
651 }
652 
653 static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
654 {
655         struct z_erofs_pcluster *pcl = folio_get_private(folio);
656         struct z_erofs_bvec *bvec = pcl->compressed_bvecs;
657         struct z_erofs_bvec *end = bvec + z_erofs_pclusterpages(pcl);
658         bool ret;
659 
660         if (!folio_test_private(folio))
661                 return true;
662 
663         ret = false;
664         spin_lock(&pcl->obj.lockref.lock);
665         if (pcl->obj.lockref.count <= 0) {
666                 DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
667                 for (; bvec < end; ++bvec) {
668                         if (bvec->page && page_folio(bvec->page) == folio) {
669                                 bvec->page = NULL;
670                                 folio_detach_private(folio);
671                                 ret = true;
672                                 break;
673                         }
674                 }
675         }
676         spin_unlock(&pcl->obj.lockref.lock);
677         return ret;
678 }
679 
680 /*
681  * It will be called only on inode eviction. In case that there are still some
682  * decompression requests in progress, wait with rescheduling for a bit here.
683  * An extra lock could be introduced instead but it seems unnecessary.
684  */
685 static void z_erofs_cache_invalidate_folio(struct folio *folio,
686                                            size_t offset, size_t length)
687 {
688         const size_t stop = length + offset;
689 
690         /* Check for potential overflow in debug mode */
691         DBG_BUGON(stop > folio_size(folio) || stop < length);
692 
693         if (offset == 0 && stop == folio_size(folio))
694                 while (!z_erofs_cache_release_folio(folio, 0))
695                         cond_resched();
696 }
697 
698 static const struct address_space_operations z_erofs_cache_aops = {
699         .release_folio = z_erofs_cache_release_folio,
700         .invalidate_folio = z_erofs_cache_invalidate_folio,
701 };
702 
703 int erofs_init_managed_cache(struct super_block *sb)
704 {
705         struct inode *const inode = new_inode(sb);
706 
707         if (!inode)
708                 return -ENOMEM;
709 
710         set_nlink(inode, 1);
711         inode->i_size = OFFSET_MAX;
712         inode->i_mapping->a_ops = &z_erofs_cache_aops;
713         mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
714         EROFS_SB(sb)->managed_cache = inode;
715         return 0;
716 }
717 
718 /* callers must be with pcluster lock held */
719 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
720                                struct z_erofs_bvec *bvec, bool exclusive)
721 {
722         struct z_erofs_pcluster *pcl = fe->pcl;
723         int ret;
724 
725         if (exclusive) {
726                 /* give priority for inplaceio to use file pages first */
727                 spin_lock(&pcl->obj.lockref.lock);
728                 while (fe->icur > 0) {
729                         if (pcl->compressed_bvecs[--fe->icur].page)
730                                 continue;
731                         pcl->compressed_bvecs[fe->icur] = *bvec;
732                         spin_unlock(&pcl->obj.lockref.lock);
733                         return 0;
734                 }
735                 spin_unlock(&pcl->obj.lockref.lock);
736 
737                 /* otherwise, check if it can be used as a bvpage */
738                 if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
739                     !fe->candidate_bvpage)
740                         fe->candidate_bvpage = bvec->page;
741         }
742         ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage,
743                                    &fe->pagepool);
744         fe->pcl->vcnt += (ret >= 0);
745         return ret;
746 }
747 
748 static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
749 {
750         struct z_erofs_pcluster *pcl = f->pcl;
751         z_erofs_next_pcluster_t *owned_head = &f->owned_head;
752 
753         /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
754         if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
755                     *owned_head) == Z_EROFS_PCLUSTER_NIL) {
756                 *owned_head = &pcl->next;
757                 /* so we can attach this pcluster to our submission chain. */
758                 f->mode = Z_EROFS_PCLUSTER_FOLLOWED;
759                 return;
760         }
761 
762         /* type 2, it belongs to an ongoing chain */
763         f->mode = Z_EROFS_PCLUSTER_INFLIGHT;
764 }
765 
766 static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
767 {
768         struct erofs_map_blocks *map = &fe->map;
769         struct super_block *sb = fe->inode->i_sb;
770         bool ztailpacking = map->m_flags & EROFS_MAP_META;
771         struct z_erofs_pcluster *pcl;
772         struct erofs_workgroup *grp;
773         int err;
774 
775         if (!(map->m_flags & EROFS_MAP_ENCODED) ||
776             (!ztailpacking && !erofs_blknr(sb, map->m_pa))) {
777                 DBG_BUGON(1);
778                 return -EFSCORRUPTED;
779         }
780 
781         /* no available pcluster, let's allocate one */
782         pcl = z_erofs_alloc_pcluster(map->m_plen);
783         if (IS_ERR(pcl))
784                 return PTR_ERR(pcl);
785 
786         spin_lock_init(&pcl->obj.lockref.lock);
787         pcl->obj.lockref.count = 1;     /* one ref for this request */
788         pcl->algorithmformat = map->m_algorithmformat;
789         pcl->length = 0;
790         pcl->partial = true;
791 
792         /* new pclusters should be claimed as type 1, primary and followed */
793         pcl->next = fe->owned_head;
794         pcl->pageofs_out = map->m_la & ~PAGE_MASK;
795         fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
796 
797         /*
798          * lock all primary followed works before visible to others
799          * and mutex_trylock *never* fails for a new pcluster.
800          */
801         mutex_init(&pcl->lock);
802         DBG_BUGON(!mutex_trylock(&pcl->lock));
803 
804         if (ztailpacking) {
805                 pcl->obj.index = 0;     /* which indicates ztailpacking */
806         } else {
807                 pcl->obj.index = erofs_blknr(sb, map->m_pa);
808 
809                 grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
810                 if (IS_ERR(grp)) {
811                         err = PTR_ERR(grp);
812                         goto err_out;
813                 }
814 
815                 if (grp != &pcl->obj) {
816                         fe->pcl = container_of(grp,
817                                         struct z_erofs_pcluster, obj);
818                         err = -EEXIST;
819                         goto err_out;
820                 }
821         }
822         fe->owned_head = &pcl->next;
823         fe->pcl = pcl;
824         return 0;
825 
826 err_out:
827         mutex_unlock(&pcl->lock);
828         z_erofs_free_pcluster(pcl);
829         return err;
830 }
831 
832 static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
833 {
834         struct erofs_map_blocks *map = &fe->map;
835         struct super_block *sb = fe->inode->i_sb;
836         erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
837         struct erofs_workgroup *grp = NULL;
838         int ret;
839 
840         DBG_BUGON(fe->pcl);
841 
842         /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
843         DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
844 
845         if (!(map->m_flags & EROFS_MAP_META)) {
846                 grp = erofs_find_workgroup(sb, blknr);
847         } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
848                 DBG_BUGON(1);
849                 return -EFSCORRUPTED;
850         }
851 
852         if (grp) {
853                 fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
854                 ret = -EEXIST;
855         } else {
856                 ret = z_erofs_register_pcluster(fe);
857         }
858 
859         if (ret == -EEXIST) {
860                 mutex_lock(&fe->pcl->lock);
861                 z_erofs_try_to_claim_pcluster(fe);
862         } else if (ret) {
863                 return ret;
864         }
865 
866         z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
867                                 Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
868         if (!z_erofs_is_inline_pcluster(fe->pcl)) {
869                 /* bind cache first when cached decompression is preferred */
870                 z_erofs_bind_cache(fe);
871         } else {
872                 void *mptr;
873 
874                 mptr = erofs_read_metabuf(&map->buf, sb, map->m_pa, EROFS_NO_KMAP);
875                 if (IS_ERR(mptr)) {
876                         ret = PTR_ERR(mptr);
877                         erofs_err(sb, "failed to get inline data %d", ret);
878                         return ret;
879                 }
880                 get_page(map->buf.page);
881                 WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
882                 fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK;
883                 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
884         }
885         /* file-backed inplace I/O pages are traversed in reverse order */
886         fe->icur = z_erofs_pclusterpages(fe->pcl);
887         return 0;
888 }
889 
890 /*
891  * keep in mind that no referenced pclusters will be freed
892  * only after a RCU grace period.
893  */
894 static void z_erofs_rcu_callback(struct rcu_head *head)
895 {
896         z_erofs_free_pcluster(container_of(head,
897                         struct z_erofs_pcluster, rcu));
898 }
899 
900 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
901 {
902         struct z_erofs_pcluster *const pcl =
903                 container_of(grp, struct z_erofs_pcluster, obj);
904 
905         call_rcu(&pcl->rcu, z_erofs_rcu_callback);
906 }
907 
908 static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
909 {
910         struct z_erofs_pcluster *pcl = fe->pcl;
911 
912         if (!pcl)
913                 return;
914 
915         z_erofs_bvec_iter_end(&fe->biter);
916         mutex_unlock(&pcl->lock);
917 
918         if (fe->candidate_bvpage)
919                 fe->candidate_bvpage = NULL;
920 
921         /*
922          * if all pending pages are added, don't hold its reference
923          * any longer if the pcluster isn't hosted by ourselves.
924          */
925         if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
926                 erofs_workgroup_put(&pcl->obj);
927 
928         fe->pcl = NULL;
929 }
930 
931 static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio,
932                         unsigned int cur, unsigned int end, erofs_off_t pos)
933 {
934         struct inode *packed_inode = EROFS_SB(sb)->packed_inode;
935         struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
936         unsigned int cnt;
937         u8 *src;
938 
939         if (!packed_inode)
940                 return -EFSCORRUPTED;
941 
942         buf.mapping = packed_inode->i_mapping;
943         for (; cur < end; cur += cnt, pos += cnt) {
944                 cnt = min(end - cur, sb->s_blocksize - erofs_blkoff(sb, pos));
945                 src = erofs_bread(&buf, pos, EROFS_KMAP);
946                 if (IS_ERR(src)) {
947                         erofs_put_metabuf(&buf);
948                         return PTR_ERR(src);
949                 }
950                 memcpy_to_folio(folio, cur, src, cnt);
951         }
952         erofs_put_metabuf(&buf);
953         return 0;
954 }
955 
956 static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
957                               struct folio *folio, bool ra)
958 {
959         struct inode *const inode = f->inode;
960         struct erofs_map_blocks *const map = &f->map;
961         const loff_t offset = folio_pos(folio);
962         const unsigned int bs = i_blocksize(inode);
963         unsigned int end = folio_size(folio), split = 0, cur, pgs;
964         bool tight, excl;
965         int err = 0;
966 
967         tight = (bs == PAGE_SIZE);
968         z_erofs_onlinefolio_init(folio);
969         do {
970                 if (offset + end - 1 < map->m_la ||
971                     offset + end - 1 >= map->m_la + map->m_llen) {
972                         z_erofs_pcluster_end(f);
973                         map->m_la = offset + end - 1;
974                         map->m_llen = 0;
975                         err = z_erofs_map_blocks_iter(inode, map, 0);
976                         if (err)
977                                 break;
978                 }
979 
980                 cur = offset > map->m_la ? 0 : map->m_la - offset;
981                 pgs = round_down(cur, PAGE_SIZE);
982                 /* bump split parts first to avoid several separate cases */
983                 ++split;
984 
985                 if (!(map->m_flags & EROFS_MAP_MAPPED)) {
986                         folio_zero_segment(folio, cur, end);
987                         tight = false;
988                 } else if (map->m_flags & EROFS_MAP_FRAGMENT) {
989                         erofs_off_t fpos = offset + cur - map->m_la;
990 
991                         err = z_erofs_read_fragment(inode->i_sb, folio, cur,
992                                         cur + min(map->m_llen - fpos, end - cur),
993                                         EROFS_I(inode)->z_fragmentoff + fpos);
994                         if (err)
995                                 break;
996                         tight = false;
997                 } else {
998                         if (!f->pcl) {
999                                 err = z_erofs_pcluster_begin(f);
1000                                 if (err)
1001                                         break;
1002                                 f->pcl->besteffort |= !ra;
1003                         }
1004 
1005                         pgs = round_down(end - 1, PAGE_SIZE);
1006                         /*
1007                          * Ensure this partial page belongs to this submit chain
1008                          * rather than other concurrent submit chains or
1009                          * noio(bypass) chains since those chains are handled
1010                          * asynchronously thus it cannot be used for inplace I/O
1011                          * or bvpage (should be processed in the strict order.)
1012                          */
1013                         tight &= (f->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
1014                         excl = false;
1015                         if (cur <= pgs) {
1016                                 excl = (split <= 1) || tight;
1017                                 cur = pgs;
1018                         }
1019 
1020                         err = z_erofs_attach_page(f, &((struct z_erofs_bvec) {
1021                                 .page = folio_page(folio, pgs >> PAGE_SHIFT),
1022                                 .offset = offset + pgs - map->m_la,
1023                                 .end = end - pgs, }), excl);
1024                         if (err)
1025                                 break;
1026 
1027                         z_erofs_onlinefolio_split(folio);
1028                         if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
1029                                 f->pcl->multibases = true;
1030                         if (f->pcl->length < offset + end - map->m_la) {
1031                                 f->pcl->length = offset + end - map->m_la;
1032                                 f->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
1033                         }
1034                         if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
1035                             !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
1036                             f->pcl->length == map->m_llen)
1037                                 f->pcl->partial = false;
1038                 }
1039                 /* shorten the remaining extent to update progress */
1040                 map->m_llen = offset + cur - map->m_la;
1041                 map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
1042                 if (cur <= pgs) {
1043                         split = cur < pgs;
1044                         tight = (bs == PAGE_SIZE);
1045                 }
1046         } while ((end = cur) > 0);
1047         z_erofs_onlinefolio_end(folio, err);
1048         return err;
1049 }
1050 
1051 static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
1052                                        unsigned int readahead_pages)
1053 {
1054         /* auto: enable for read_folio, disable for readahead */
1055         if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
1056             !readahead_pages)
1057                 return true;
1058 
1059         if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) &&
1060             (readahead_pages <= sbi->opt.max_sync_decompress_pages))
1061                 return true;
1062 
1063         return false;
1064 }
1065 
1066 static bool z_erofs_page_is_invalidated(struct page *page)
1067 {
1068         return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page);
1069 }
1070 
1071 struct z_erofs_decompress_backend {
1072         struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
1073         struct super_block *sb;
1074         struct z_erofs_pcluster *pcl;
1075 
1076         /* pages with the longest decompressed length for deduplication */
1077         struct page **decompressed_pages;
1078         /* pages to keep the compressed data */
1079         struct page **compressed_pages;
1080 
1081         struct list_head decompressed_secondary_bvecs;
1082         struct page **pagepool;
1083         unsigned int onstack_used, nr_pages;
1084 };
1085 
1086 struct z_erofs_bvec_item {
1087         struct z_erofs_bvec bvec;
1088         struct list_head list;
1089 };
1090 
1091 static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
1092                                          struct z_erofs_bvec *bvec)
1093 {
1094         struct z_erofs_bvec_item *item;
1095         unsigned int pgnr;
1096 
1097         if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
1098             (bvec->end == PAGE_SIZE ||
1099              bvec->offset + bvec->end == be->pcl->length)) {
1100                 pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
1101                 DBG_BUGON(pgnr >= be->nr_pages);
1102                 if (!be->decompressed_pages[pgnr]) {
1103                         be->decompressed_pages[pgnr] = bvec->page;
1104                         return;
1105                 }
1106         }
1107 
1108         /* (cold path) one pcluster is requested multiple times */
1109         item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL);
1110         item->bvec = *bvec;
1111         list_add(&item->list, &be->decompressed_secondary_bvecs);
1112 }
1113 
1114 static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
1115                                       int err)
1116 {
1117         unsigned int off0 = be->pcl->pageofs_out;
1118         struct list_head *p, *n;
1119 
1120         list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) {
1121                 struct z_erofs_bvec_item *bvi;
1122                 unsigned int end, cur;
1123                 void *dst, *src;
1124 
1125                 bvi = container_of(p, struct z_erofs_bvec_item, list);
1126                 cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0;
1127                 end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset,
1128                             bvi->bvec.end);
1129                 dst = kmap_local_page(bvi->bvec.page);
1130                 while (cur < end) {
1131                         unsigned int pgnr, scur, len;
1132 
1133                         pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT;
1134                         DBG_BUGON(pgnr >= be->nr_pages);
1135 
1136                         scur = bvi->bvec.offset + cur -
1137                                         ((pgnr << PAGE_SHIFT) - off0);
1138                         len = min_t(unsigned int, end - cur, PAGE_SIZE - scur);
1139                         if (!be->decompressed_pages[pgnr]) {
1140                                 err = -EFSCORRUPTED;
1141                                 cur += len;
1142                                 continue;
1143                         }
1144                         src = kmap_local_page(be->decompressed_pages[pgnr]);
1145                         memcpy(dst + cur, src + scur, len);
1146                         kunmap_local(src);
1147                         cur += len;
1148                 }
1149                 kunmap_local(dst);
1150                 z_erofs_onlinefolio_end(page_folio(bvi->bvec.page), err);
1151                 list_del(p);
1152                 kfree(bvi);
1153         }
1154 }
1155 
1156 static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
1157 {
1158         struct z_erofs_pcluster *pcl = be->pcl;
1159         struct z_erofs_bvec_iter biter;
1160         struct page *old_bvpage;
1161         int i;
1162 
1163         z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
1164         for (i = 0; i < pcl->vcnt; ++i) {
1165                 struct z_erofs_bvec bvec;
1166 
1167                 z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage);
1168 
1169                 if (old_bvpage)
1170                         z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1171 
1172                 DBG_BUGON(z_erofs_page_is_invalidated(bvec.page));
1173                 z_erofs_do_decompressed_bvec(be, &bvec);
1174         }
1175 
1176         old_bvpage = z_erofs_bvec_iter_end(&biter);
1177         if (old_bvpage)
1178                 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
1179 }
1180 
1181 static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
1182                                   bool *overlapped)
1183 {
1184         struct z_erofs_pcluster *pcl = be->pcl;
1185         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1186         int i, err = 0;
1187 
1188         *overlapped = false;
1189         for (i = 0; i < pclusterpages; ++i) {
1190                 struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
1191                 struct page *page = bvec->page;
1192 
1193                 /* compressed data ought to be valid before decompressing */
1194                 if (!page) {
1195                         err = -EIO;
1196                         continue;
1197                 }
1198                 be->compressed_pages[i] = page;
1199 
1200                 if (z_erofs_is_inline_pcluster(pcl) ||
1201                     erofs_folio_is_managed(EROFS_SB(be->sb), page_folio(page))) {
1202                         if (!PageUptodate(page))
1203                                 err = -EIO;
1204                         continue;
1205                 }
1206 
1207                 DBG_BUGON(z_erofs_page_is_invalidated(page));
1208                 if (z_erofs_is_shortlived_page(page))
1209                         continue;
1210                 z_erofs_do_decompressed_bvec(be, bvec);
1211                 *overlapped = true;
1212         }
1213         return err;
1214 }
1215 
1216 static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
1217                                        int err)
1218 {
1219         struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
1220         struct z_erofs_pcluster *pcl = be->pcl;
1221         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1222         const struct z_erofs_decompressor *decomp =
1223                                 z_erofs_decomp[pcl->algorithmformat];
1224         int i, j, jtop, err2;
1225         struct page *page;
1226         bool overlapped;
1227 
1228         mutex_lock(&pcl->lock);
1229         be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
1230 
1231         /* allocate (de)compressed page arrays if cannot be kept on stack */
1232         be->decompressed_pages = NULL;
1233         be->compressed_pages = NULL;
1234         be->onstack_used = 0;
1235         if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) {
1236                 be->decompressed_pages = be->onstack_pages;
1237                 be->onstack_used = be->nr_pages;
1238                 memset(be->decompressed_pages, 0,
1239                        sizeof(struct page *) * be->nr_pages);
1240         }
1241 
1242         if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES)
1243                 be->compressed_pages = be->onstack_pages + be->onstack_used;
1244 
1245         if (!be->decompressed_pages)
1246                 be->decompressed_pages =
1247                         kvcalloc(be->nr_pages, sizeof(struct page *),
1248                                  GFP_KERNEL | __GFP_NOFAIL);
1249         if (!be->compressed_pages)
1250                 be->compressed_pages =
1251                         kvcalloc(pclusterpages, sizeof(struct page *),
1252                                  GFP_KERNEL | __GFP_NOFAIL);
1253 
1254         z_erofs_parse_out_bvecs(be);
1255         err2 = z_erofs_parse_in_bvecs(be, &overlapped);
1256         if (err2)
1257                 err = err2;
1258         if (!err)
1259                 err = decomp->decompress(&(struct z_erofs_decompress_req) {
1260                                         .sb = be->sb,
1261                                         .in = be->compressed_pages,
1262                                         .out = be->decompressed_pages,
1263                                         .pageofs_in = pcl->pageofs_in,
1264                                         .pageofs_out = pcl->pageofs_out,
1265                                         .inputsize = pcl->pclustersize,
1266                                         .outputsize = pcl->length,
1267                                         .alg = pcl->algorithmformat,
1268                                         .inplace_io = overlapped,
1269                                         .partial_decoding = pcl->partial,
1270                                         .fillgaps = pcl->multibases,
1271                                         .gfp = pcl->besteffort ?
1272                                                 GFP_KERNEL | __GFP_NOFAIL :
1273                                                 GFP_NOWAIT | __GFP_NORETRY
1274                                  }, be->pagepool);
1275 
1276         /* must handle all compressed pages before actual file pages */
1277         if (z_erofs_is_inline_pcluster(pcl)) {
1278                 page = pcl->compressed_bvecs[0].page;
1279                 WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
1280                 put_page(page);
1281         } else {
1282                 /* managed folios are still left in compressed_bvecs[] */
1283                 for (i = 0; i < pclusterpages; ++i) {
1284                         page = be->compressed_pages[i];
1285                         if (!page ||
1286                             erofs_folio_is_managed(sbi, page_folio(page)))
1287                                 continue;
1288                         (void)z_erofs_put_shortlivedpage(be->pagepool, page);
1289                         WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
1290                 }
1291         }
1292         if (be->compressed_pages < be->onstack_pages ||
1293             be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
1294                 kvfree(be->compressed_pages);
1295 
1296         jtop = 0;
1297         z_erofs_fill_other_copies(be, err);
1298         for (i = 0; i < be->nr_pages; ++i) {
1299                 page = be->decompressed_pages[i];
1300                 if (!page)
1301                         continue;
1302 
1303                 DBG_BUGON(z_erofs_page_is_invalidated(page));
1304                 if (!z_erofs_is_shortlived_page(page)) {
1305                         z_erofs_onlinefolio_end(page_folio(page), err);
1306                         continue;
1307                 }
1308                 if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) {
1309                         erofs_pagepool_add(be->pagepool, page);
1310                         continue;
1311                 }
1312                 for (j = 0; j < jtop && be->decompressed_pages[j] != page; ++j)
1313                         ;
1314                 if (j >= jtop)  /* this bounce page is newly detected */
1315                         be->decompressed_pages[jtop++] = page;
1316         }
1317         while (jtop)
1318                 erofs_pagepool_add(be->pagepool,
1319                                    be->decompressed_pages[--jtop]);
1320         if (be->decompressed_pages != be->onstack_pages)
1321                 kvfree(be->decompressed_pages);
1322 
1323         pcl->length = 0;
1324         pcl->partial = true;
1325         pcl->multibases = false;
1326         pcl->besteffort = false;
1327         pcl->bvset.nextpage = NULL;
1328         pcl->vcnt = 0;
1329 
1330         /* pcluster lock MUST be taken before the following line */
1331         WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
1332         mutex_unlock(&pcl->lock);
1333         return err;
1334 }
1335 
1336 static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
1337                                      struct page **pagepool)
1338 {
1339         struct z_erofs_decompress_backend be = {
1340                 .sb = io->sb,
1341                 .pagepool = pagepool,
1342                 .decompressed_secondary_bvecs =
1343                         LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
1344         };
1345         z_erofs_next_pcluster_t owned = io->head;
1346 
1347         while (owned != Z_EROFS_PCLUSTER_TAIL) {
1348                 DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
1349 
1350                 be.pcl = container_of(owned, struct z_erofs_pcluster, next);
1351                 owned = READ_ONCE(be.pcl->next);
1352 
1353                 z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0);
1354                 if (z_erofs_is_inline_pcluster(be.pcl))
1355                         z_erofs_free_pcluster(be.pcl);
1356                 else
1357                         erofs_workgroup_put(&be.pcl->obj);
1358         }
1359 }
1360 
1361 static void z_erofs_decompressqueue_work(struct work_struct *work)
1362 {
1363         struct z_erofs_decompressqueue *bgq =
1364                 container_of(work, struct z_erofs_decompressqueue, u.work);
1365         struct page *pagepool = NULL;
1366 
1367         DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL);
1368         z_erofs_decompress_queue(bgq, &pagepool);
1369         erofs_release_pages(&pagepool);
1370         kvfree(bgq);
1371 }
1372 
1373 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1374 static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work)
1375 {
1376         z_erofs_decompressqueue_work((struct work_struct *)work);
1377 }
1378 #endif
1379 
1380 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
1381                                        int bios)
1382 {
1383         struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
1384 
1385         /* wake up the caller thread for sync decompression */
1386         if (io->sync) {
1387                 if (!atomic_add_return(bios, &io->pending_bios))
1388                         complete(&io->u.done);
1389                 return;
1390         }
1391 
1392         if (atomic_add_return(bios, &io->pending_bios))
1393                 return;
1394         /* Use (kthread_)work and sync decompression for atomic contexts only */
1395         if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) {
1396 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1397                 struct kthread_worker *worker;
1398 
1399                 rcu_read_lock();
1400                 worker = rcu_dereference(
1401                                 z_erofs_pcpu_workers[raw_smp_processor_id()]);
1402                 if (!worker) {
1403                         INIT_WORK(&io->u.work, z_erofs_decompressqueue_work);
1404                         queue_work(z_erofs_workqueue, &io->u.work);
1405                 } else {
1406                         kthread_queue_work(worker, &io->u.kthread_work);
1407                 }
1408                 rcu_read_unlock();
1409 #else
1410                 queue_work(z_erofs_workqueue, &io->u.work);
1411 #endif
1412                 /* enable sync decompression for readahead */
1413                 if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
1414                         sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
1415                 return;
1416         }
1417         z_erofs_decompressqueue_work(&io->u.work);
1418 }
1419 
1420 static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
1421                                  struct z_erofs_decompress_frontend *f,
1422                                  struct z_erofs_pcluster *pcl,
1423                                  unsigned int nr,
1424                                  struct address_space *mc)
1425 {
1426         gfp_t gfp = mapping_gfp_mask(mc);
1427         bool tocache = false;
1428         struct z_erofs_bvec zbv;
1429         struct address_space *mapping;
1430         struct folio *folio;
1431         int bs = i_blocksize(f->inode);
1432 
1433         /* Except for inplace folios, the entire folio can be used for I/Os */
1434         bvec->bv_offset = 0;
1435         bvec->bv_len = PAGE_SIZE;
1436 repeat:
1437         spin_lock(&pcl->obj.lockref.lock);
1438         zbv = pcl->compressed_bvecs[nr];
1439         spin_unlock(&pcl->obj.lockref.lock);
1440         if (!zbv.page)
1441                 goto out_allocfolio;
1442 
1443         bvec->bv_page = zbv.page;
1444         DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page));
1445 
1446         folio = page_folio(zbv.page);
1447         /*
1448          * Handle preallocated cached folios.  We tried to allocate such folios
1449          * without triggering direct reclaim.  If allocation failed, inplace
1450          * file-backed folios will be used instead.
1451          */
1452         if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) {
1453                 folio->private = 0;
1454                 tocache = true;
1455                 goto out_tocache;
1456         }
1457 
1458         mapping = READ_ONCE(folio->mapping);
1459         /*
1460          * File-backed folios for inplace I/Os are all locked steady,
1461          * therefore it is impossible for `mapping` to be NULL.
1462          */
1463         if (mapping && mapping != mc) {
1464                 if (zbv.offset < 0)
1465                         bvec->bv_offset = round_up(-zbv.offset, bs);
1466                 bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset;
1467                 return;
1468         }
1469 
1470         folio_lock(folio);
1471         if (folio->mapping == mc) {
1472                 /*
1473                  * The cached folio is still in managed cache but without
1474                  * a valid `->private` pcluster hint.  Let's reconnect them.
1475                  */
1476                 if (!folio_test_private(folio)) {
1477                         folio_attach_private(folio, pcl);
1478                         /* compressed_bvecs[] already takes a ref before */
1479                         folio_put(folio);
1480                 }
1481 
1482                 /* no need to submit if it is already up-to-date */
1483                 if (folio_test_uptodate(folio)) {
1484                         folio_unlock(folio);
1485                         bvec->bv_page = NULL;
1486                 }
1487                 return;
1488         }
1489 
1490         /*
1491          * It has been truncated, so it's unsafe to reuse this one. Let's
1492          * allocate a new page for compressed data.
1493          */
1494         DBG_BUGON(folio->mapping);
1495         tocache = true;
1496         folio_unlock(folio);
1497         folio_put(folio);
1498 out_allocfolio:
1499         zbv.page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL);
1500         spin_lock(&pcl->obj.lockref.lock);
1501         if (pcl->compressed_bvecs[nr].page) {
1502                 erofs_pagepool_add(&f->pagepool, zbv.page);
1503                 spin_unlock(&pcl->obj.lockref.lock);
1504                 cond_resched();
1505                 goto repeat;
1506         }
1507         bvec->bv_page = pcl->compressed_bvecs[nr].page = zbv.page;
1508         folio = page_folio(zbv.page);
1509         /* first mark it as a temporary shortlived folio (now 1 ref) */
1510         folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE;
1511         spin_unlock(&pcl->obj.lockref.lock);
1512 out_tocache:
1513         if (!tocache || bs != PAGE_SIZE ||
1514             filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp))
1515                 return;
1516         folio_attach_private(folio, pcl);
1517         /* drop a refcount added by allocpage (then 2 refs in total here) */
1518         folio_put(folio);
1519 }
1520 
1521 static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb,
1522                               struct z_erofs_decompressqueue *fgq, bool *fg)
1523 {
1524         struct z_erofs_decompressqueue *q;
1525 
1526         if (fg && !*fg) {
1527                 q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
1528                 if (!q) {
1529                         *fg = true;
1530                         goto fg_out;
1531                 }
1532 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
1533                 kthread_init_work(&q->u.kthread_work,
1534                                   z_erofs_decompressqueue_kthread_work);
1535 #else
1536                 INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
1537 #endif
1538         } else {
1539 fg_out:
1540                 q = fgq;
1541                 init_completion(&fgq->u.done);
1542                 atomic_set(&fgq->pending_bios, 0);
1543                 q->eio = false;
1544                 q->sync = true;
1545         }
1546         q->sb = sb;
1547         q->head = Z_EROFS_PCLUSTER_TAIL;
1548         return q;
1549 }
1550 
1551 /* define decompression jobqueue types */
1552 enum {
1553         JQ_BYPASS,
1554         JQ_SUBMIT,
1555         NR_JOBQUEUES,
1556 };
1557 
1558 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
1559                                     z_erofs_next_pcluster_t qtail[],
1560                                     z_erofs_next_pcluster_t owned_head)
1561 {
1562         z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
1563         z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
1564 
1565         WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
1566 
1567         WRITE_ONCE(*submit_qtail, owned_head);
1568         WRITE_ONCE(*bypass_qtail, &pcl->next);
1569 
1570         qtail[JQ_BYPASS] = &pcl->next;
1571 }
1572 
1573 static void z_erofs_endio(struct bio *bio)
1574 {
1575         struct z_erofs_decompressqueue *q = bio->bi_private;
1576         blk_status_t err = bio->bi_status;
1577         struct folio_iter fi;
1578 
1579         bio_for_each_folio_all(fi, bio) {
1580                 struct folio *folio = fi.folio;
1581 
1582                 DBG_BUGON(folio_test_uptodate(folio));
1583                 DBG_BUGON(z_erofs_page_is_invalidated(&folio->page));
1584                 if (!erofs_folio_is_managed(EROFS_SB(q->sb), folio))
1585                         continue;
1586 
1587                 if (!err)
1588                         folio_mark_uptodate(folio);
1589                 folio_unlock(folio);
1590         }
1591         if (err)
1592                 q->eio = true;
1593         z_erofs_decompress_kickoff(q, -1);
1594         if (bio->bi_bdev)
1595                 bio_put(bio);
1596 }
1597 
1598 static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1599                                  struct z_erofs_decompressqueue *fgq,
1600                                  bool *force_fg, bool readahead)
1601 {
1602         struct super_block *sb = f->inode->i_sb;
1603         struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
1604         z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
1605         struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
1606         z_erofs_next_pcluster_t owned_head = f->owned_head;
1607         /* bio is NULL initially, so no need to initialize last_{index,bdev} */
1608         erofs_off_t last_pa;
1609         unsigned int nr_bios = 0;
1610         struct bio *bio = NULL;
1611         unsigned long pflags;
1612         int memstall = 0;
1613 
1614         /* No need to read from device for pclusters in the bypass queue. */
1615         q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
1616         q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg);
1617 
1618         qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1619         qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1620 
1621         /* by default, all need io submission */
1622         q[JQ_SUBMIT]->head = owned_head;
1623 
1624         do {
1625                 struct erofs_map_dev mdev;
1626                 struct z_erofs_pcluster *pcl;
1627                 erofs_off_t cur, end;
1628                 struct bio_vec bvec;
1629                 unsigned int i = 0;
1630                 bool bypass = true;
1631 
1632                 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
1633                 pcl = container_of(owned_head, struct z_erofs_pcluster, next);
1634                 owned_head = READ_ONCE(pcl->next);
1635 
1636                 if (z_erofs_is_inline_pcluster(pcl)) {
1637                         move_to_bypass_jobqueue(pcl, qtail, owned_head);
1638                         continue;
1639                 }
1640 
1641                 /* no device id here, thus it will always succeed */
1642                 mdev = (struct erofs_map_dev) {
1643                         .m_pa = erofs_pos(sb, pcl->obj.index),
1644                 };
1645                 (void)erofs_map_dev(sb, &mdev);
1646 
1647                 cur = mdev.m_pa;
1648                 end = cur + pcl->pclustersize;
1649                 do {
1650                         z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc);
1651                         if (!bvec.bv_page)
1652                                 continue;
1653 
1654                         if (bio && (cur != last_pa ||
1655                                     bio->bi_bdev != mdev.m_bdev)) {
1656 io_retry:
1657                                 if (!erofs_is_fscache_mode(sb))
1658                                         submit_bio(bio);
1659                                 else
1660                                         erofs_fscache_submit_bio(bio);
1661 
1662                                 if (memstall) {
1663                                         psi_memstall_leave(&pflags);
1664                                         memstall = 0;
1665                                 }
1666                                 bio = NULL;
1667                         }
1668 
1669                         if (unlikely(PageWorkingset(bvec.bv_page)) &&
1670                             !memstall) {
1671                                 psi_memstall_enter(&pflags);
1672                                 memstall = 1;
1673                         }
1674 
1675                         if (!bio) {
1676                                 bio = erofs_is_fscache_mode(sb) ?
1677                                         erofs_fscache_bio_alloc(&mdev) :
1678                                         bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
1679                                                   REQ_OP_READ, GFP_NOIO);
1680                                 bio->bi_end_io = z_erofs_endio;
1681                                 bio->bi_iter.bi_sector = cur >> 9;
1682                                 bio->bi_private = q[JQ_SUBMIT];
1683                                 if (readahead)
1684                                         bio->bi_opf |= REQ_RAHEAD;
1685                                 ++nr_bios;
1686                         }
1687 
1688                         if (cur + bvec.bv_len > end)
1689                                 bvec.bv_len = end - cur;
1690                         DBG_BUGON(bvec.bv_len < sb->s_blocksize);
1691                         if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len,
1692                                           bvec.bv_offset))
1693                                 goto io_retry;
1694 
1695                         last_pa = cur + bvec.bv_len;
1696                         bypass = false;
1697                 } while ((cur += bvec.bv_len) < end);
1698 
1699                 if (!bypass)
1700                         qtail[JQ_SUBMIT] = &pcl->next;
1701                 else
1702                         move_to_bypass_jobqueue(pcl, qtail, owned_head);
1703         } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
1704 
1705         if (bio) {
1706                 if (!erofs_is_fscache_mode(sb))
1707                         submit_bio(bio);
1708                 else
1709                         erofs_fscache_submit_bio(bio);
1710                 if (memstall)
1711                         psi_memstall_leave(&pflags);
1712         }
1713 
1714         /*
1715          * although background is preferred, no one is pending for submission.
1716          * don't issue decompression but drop it directly instead.
1717          */
1718         if (!*force_fg && !nr_bios) {
1719                 kvfree(q[JQ_SUBMIT]);
1720                 return;
1721         }
1722         z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
1723 }
1724 
1725 static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
1726                              bool force_fg, bool ra)
1727 {
1728         struct z_erofs_decompressqueue io[NR_JOBQUEUES];
1729 
1730         if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
1731                 return;
1732         z_erofs_submit_queue(f, io, &force_fg, ra);
1733 
1734         /* handle bypass queue (no i/o pclusters) immediately */
1735         z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
1736 
1737         if (!force_fg)
1738                 return;
1739 
1740         /* wait until all bios are completed */
1741         wait_for_completion_io(&io[JQ_SUBMIT].u.done);
1742 
1743         /* handle synchronous decompress queue in the caller context */
1744         z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool);
1745 }
1746 
1747 /*
1748  * Since partial uptodate is still unimplemented for now, we have to use
1749  * approximate readmore strategies as a start.
1750  */
1751 static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
1752                 struct readahead_control *rac, bool backmost)
1753 {
1754         struct inode *inode = f->inode;
1755         struct erofs_map_blocks *map = &f->map;
1756         erofs_off_t cur, end, headoffset = f->headoffset;
1757         int err;
1758 
1759         if (backmost) {
1760                 if (rac)
1761                         end = headoffset + readahead_length(rac) - 1;
1762                 else
1763                         end = headoffset + PAGE_SIZE - 1;
1764                 map->m_la = end;
1765                 err = z_erofs_map_blocks_iter(inode, map,
1766                                               EROFS_GET_BLOCKS_READMORE);
1767                 if (err)
1768                         return;
1769 
1770                 /* expand ra for the trailing edge if readahead */
1771                 if (rac) {
1772                         cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
1773                         readahead_expand(rac, headoffset, cur - headoffset);
1774                         return;
1775                 }
1776                 end = round_up(end, PAGE_SIZE);
1777         } else {
1778                 end = round_up(map->m_la, PAGE_SIZE);
1779                 if (!map->m_llen)
1780                         return;
1781         }
1782 
1783         cur = map->m_la + map->m_llen - 1;
1784         while ((cur >= end) && (cur < i_size_read(inode))) {
1785                 pgoff_t index = cur >> PAGE_SHIFT;
1786                 struct folio *folio;
1787 
1788                 folio = erofs_grab_folio_nowait(inode->i_mapping, index);
1789                 if (!IS_ERR_OR_NULL(folio)) {
1790                         if (folio_test_uptodate(folio))
1791                                 folio_unlock(folio);
1792                         else
1793                                 z_erofs_scan_folio(f, folio, !!rac);
1794                         folio_put(folio);
1795                 }
1796 
1797                 if (cur < PAGE_SIZE)
1798                         break;
1799                 cur = (index << PAGE_SHIFT) - 1;
1800         }
1801 }
1802 
1803 static int z_erofs_read_folio(struct file *file, struct folio *folio)
1804 {
1805         struct inode *const inode = folio->mapping->host;
1806         struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1807         struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1808         int err;
1809 
1810         trace_erofs_read_folio(folio, false);
1811         f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
1812 
1813         z_erofs_pcluster_readmore(&f, NULL, true);
1814         err = z_erofs_scan_folio(&f, folio, false);
1815         z_erofs_pcluster_readmore(&f, NULL, false);
1816         z_erofs_pcluster_end(&f);
1817 
1818         /* if some compressed cluster ready, need submit them anyway */
1819         z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false);
1820 
1821         if (err && err != -EINTR)
1822                 erofs_err(inode->i_sb, "read error %d @ %lu of nid %llu",
1823                           err, folio->index, EROFS_I(inode)->nid);
1824 
1825         erofs_put_metabuf(&f.map.buf);
1826         erofs_release_pages(&f.pagepool);
1827         return err;
1828 }
1829 
1830 static void z_erofs_readahead(struct readahead_control *rac)
1831 {
1832         struct inode *const inode = rac->mapping->host;
1833         struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1834         struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1835         struct folio *head = NULL, *folio;
1836         unsigned int nr_folios;
1837         int err;
1838 
1839         f.headoffset = readahead_pos(rac);
1840 
1841         z_erofs_pcluster_readmore(&f, rac, true);
1842         nr_folios = readahead_count(rac);
1843         trace_erofs_readpages(inode, readahead_index(rac), nr_folios, false);
1844 
1845         while ((folio = readahead_folio(rac))) {
1846                 folio->private = head;
1847                 head = folio;
1848         }
1849 
1850         /* traverse in reverse order for best metadata I/O performance */
1851         while (head) {
1852                 folio = head;
1853                 head = folio_get_private(folio);
1854 
1855                 err = z_erofs_scan_folio(&f, folio, true);
1856                 if (err && err != -EINTR)
1857                         erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
1858                                   folio->index, EROFS_I(inode)->nid);
1859         }
1860         z_erofs_pcluster_readmore(&f, rac, false);
1861         z_erofs_pcluster_end(&f);
1862 
1863         z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_folios), true);
1864         erofs_put_metabuf(&f.map.buf);
1865         erofs_release_pages(&f.pagepool);
1866 }
1867 
1868 const struct address_space_operations z_erofs_aops = {
1869         .read_folio = z_erofs_read_folio,
1870         .readahead = z_erofs_readahead,
1871 };
1872 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php