~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/btrfs/compression.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (C) 2008 Oracle.  All rights reserved.
  4  */
  5 
  6 #include <linux/kernel.h>
  7 #include <linux/bio.h>
  8 #include <linux/file.h>
  9 #include <linux/fs.h>
 10 #include <linux/pagemap.h>
 11 #include <linux/pagevec.h>
 12 #include <linux/highmem.h>
 13 #include <linux/kthread.h>
 14 #include <linux/time.h>
 15 #include <linux/init.h>
 16 #include <linux/string.h>
 17 #include <linux/backing-dev.h>
 18 #include <linux/writeback.h>
 19 #include <linux/psi.h>
 20 #include <linux/slab.h>
 21 #include <linux/sched/mm.h>
 22 #include <linux/log2.h>
 23 #include <linux/shrinker.h>
 24 #include <crypto/hash.h>
 25 #include "misc.h"
 26 #include "ctree.h"
 27 #include "fs.h"
 28 #include "btrfs_inode.h"
 29 #include "bio.h"
 30 #include "ordered-data.h"
 31 #include "compression.h"
 32 #include "extent_io.h"
 33 #include "extent_map.h"
 34 #include "subpage.h"
 35 #include "messages.h"
 36 #include "super.h"
 37 
 38 static struct bio_set btrfs_compressed_bioset;
 39 
 40 static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
 41 
 42 const char* btrfs_compress_type2str(enum btrfs_compression_type type)
 43 {
 44         switch (type) {
 45         case BTRFS_COMPRESS_ZLIB:
 46         case BTRFS_COMPRESS_LZO:
 47         case BTRFS_COMPRESS_ZSTD:
 48         case BTRFS_COMPRESS_NONE:
 49                 return btrfs_compress_types[type];
 50         default:
 51                 break;
 52         }
 53 
 54         return NULL;
 55 }
 56 
 57 static inline struct compressed_bio *to_compressed_bio(struct btrfs_bio *bbio)
 58 {
 59         return container_of(bbio, struct compressed_bio, bbio);
 60 }
 61 
 62 static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode,
 63                                                    u64 start, blk_opf_t op,
 64                                                    btrfs_bio_end_io_t end_io)
 65 {
 66         struct btrfs_bio *bbio;
 67 
 68         bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op,
 69                                           GFP_NOFS, &btrfs_compressed_bioset));
 70         btrfs_bio_init(bbio, inode->root->fs_info, end_io, NULL);
 71         bbio->inode = inode;
 72         bbio->file_offset = start;
 73         return to_compressed_bio(bbio);
 74 }
 75 
 76 bool btrfs_compress_is_valid_type(const char *str, size_t len)
 77 {
 78         int i;
 79 
 80         for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
 81                 size_t comp_len = strlen(btrfs_compress_types[i]);
 82 
 83                 if (len < comp_len)
 84                         continue;
 85 
 86                 if (!strncmp(btrfs_compress_types[i], str, comp_len))
 87                         return true;
 88         }
 89         return false;
 90 }
 91 
 92 static int compression_compress_pages(int type, struct list_head *ws,
 93                                       struct address_space *mapping, u64 start,
 94                                       struct folio **folios, unsigned long *out_folios,
 95                                       unsigned long *total_in, unsigned long *total_out)
 96 {
 97         switch (type) {
 98         case BTRFS_COMPRESS_ZLIB:
 99                 return zlib_compress_folios(ws, mapping, start, folios,
100                                             out_folios, total_in, total_out);
101         case BTRFS_COMPRESS_LZO:
102                 return lzo_compress_folios(ws, mapping, start, folios,
103                                            out_folios, total_in, total_out);
104         case BTRFS_COMPRESS_ZSTD:
105                 return zstd_compress_folios(ws, mapping, start, folios,
106                                             out_folios, total_in, total_out);
107         case BTRFS_COMPRESS_NONE:
108         default:
109                 /*
110                  * This can happen when compression races with remount setting
111                  * it to 'no compress', while caller doesn't call
112                  * inode_need_compress() to check if we really need to
113                  * compress.
114                  *
115                  * Not a big deal, just need to inform caller that we
116                  * haven't allocated any pages yet.
117                  */
118                 *out_folios = 0;
119                 return -E2BIG;
120         }
121 }
122 
123 static int compression_decompress_bio(struct list_head *ws,
124                                       struct compressed_bio *cb)
125 {
126         switch (cb->compress_type) {
127         case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
128         case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
129         case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
130         case BTRFS_COMPRESS_NONE:
131         default:
132                 /*
133                  * This can't happen, the type is validated several times
134                  * before we get here.
135                  */
136                 BUG();
137         }
138 }
139 
140 static int compression_decompress(int type, struct list_head *ws,
141                 const u8 *data_in, struct page *dest_page,
142                 unsigned long dest_pgoff, size_t srclen, size_t destlen)
143 {
144         switch (type) {
145         case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
146                                                 dest_pgoff, srclen, destlen);
147         case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_page,
148                                                 dest_pgoff, srclen, destlen);
149         case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
150                                                 dest_pgoff, srclen, destlen);
151         case BTRFS_COMPRESS_NONE:
152         default:
153                 /*
154                  * This can't happen, the type is validated several times
155                  * before we get here.
156                  */
157                 BUG();
158         }
159 }
160 
161 static void btrfs_free_compressed_folios(struct compressed_bio *cb)
162 {
163         for (unsigned int i = 0; i < cb->nr_folios; i++)
164                 btrfs_free_compr_folio(cb->compressed_folios[i]);
165         kfree(cb->compressed_folios);
166 }
167 
168 static int btrfs_decompress_bio(struct compressed_bio *cb);
169 
170 /*
171  * Global cache of last unused pages for compression/decompression.
172  */
173 static struct btrfs_compr_pool {
174         struct shrinker *shrinker;
175         spinlock_t lock;
176         struct list_head list;
177         int count;
178         int thresh;
179 } compr_pool;
180 
181 static unsigned long btrfs_compr_pool_count(struct shrinker *sh, struct shrink_control *sc)
182 {
183         int ret;
184 
185         /*
186          * We must not read the values more than once if 'ret' gets expanded in
187          * the return statement so we don't accidentally return a negative
188          * number, even if the first condition finds it positive.
189          */
190         ret = READ_ONCE(compr_pool.count) - READ_ONCE(compr_pool.thresh);
191 
192         return ret > 0 ? ret : 0;
193 }
194 
195 static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_control *sc)
196 {
197         struct list_head remove;
198         struct list_head *tmp, *next;
199         int freed;
200 
201         if (compr_pool.count == 0)
202                 return SHRINK_STOP;
203 
204         INIT_LIST_HEAD(&remove);
205 
206         /* For now, just simply drain the whole list. */
207         spin_lock(&compr_pool.lock);
208         list_splice_init(&compr_pool.list, &remove);
209         freed = compr_pool.count;
210         compr_pool.count = 0;
211         spin_unlock(&compr_pool.lock);
212 
213         list_for_each_safe(tmp, next, &remove) {
214                 struct page *page = list_entry(tmp, struct page, lru);
215 
216                 ASSERT(page_ref_count(page) == 1);
217                 put_page(page);
218         }
219 
220         return freed;
221 }
222 
223 /*
224  * Common wrappers for page allocation from compression wrappers
225  */
226 struct folio *btrfs_alloc_compr_folio(void)
227 {
228         struct folio *folio = NULL;
229 
230         spin_lock(&compr_pool.lock);
231         if (compr_pool.count > 0) {
232                 folio = list_first_entry(&compr_pool.list, struct folio, lru);
233                 list_del_init(&folio->lru);
234                 compr_pool.count--;
235         }
236         spin_unlock(&compr_pool.lock);
237 
238         if (folio)
239                 return folio;
240 
241         return folio_alloc(GFP_NOFS, 0);
242 }
243 
244 void btrfs_free_compr_folio(struct folio *folio)
245 {
246         bool do_free = false;
247 
248         spin_lock(&compr_pool.lock);
249         if (compr_pool.count > compr_pool.thresh) {
250                 do_free = true;
251         } else {
252                 list_add(&folio->lru, &compr_pool.list);
253                 compr_pool.count++;
254         }
255         spin_unlock(&compr_pool.lock);
256 
257         if (!do_free)
258                 return;
259 
260         ASSERT(folio_ref_count(folio) == 1);
261         folio_put(folio);
262 }
263 
264 static void end_bbio_compressed_read(struct btrfs_bio *bbio)
265 {
266         struct compressed_bio *cb = to_compressed_bio(bbio);
267         blk_status_t status = bbio->bio.bi_status;
268 
269         if (!status)
270                 status = errno_to_blk_status(btrfs_decompress_bio(cb));
271 
272         btrfs_free_compressed_folios(cb);
273         btrfs_bio_end_io(cb->orig_bbio, status);
274         bio_put(&bbio->bio);
275 }
276 
277 /*
278  * Clear the writeback bits on all of the file
279  * pages for a compressed write
280  */
281 static noinline void end_compressed_writeback(const struct compressed_bio *cb)
282 {
283         struct inode *inode = &cb->bbio.inode->vfs_inode;
284         struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
285         unsigned long index = cb->start >> PAGE_SHIFT;
286         unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
287         struct folio_batch fbatch;
288         const int error = blk_status_to_errno(cb->bbio.bio.bi_status);
289         int i;
290         int ret;
291 
292         if (error)
293                 mapping_set_error(inode->i_mapping, error);
294 
295         folio_batch_init(&fbatch);
296         while (index <= end_index) {
297                 ret = filemap_get_folios(inode->i_mapping, &index, end_index,
298                                 &fbatch);
299 
300                 if (ret == 0)
301                         return;
302 
303                 for (i = 0; i < ret; i++) {
304                         struct folio *folio = fbatch.folios[i];
305 
306                         btrfs_folio_clamp_clear_writeback(fs_info, folio,
307                                                           cb->start, cb->len);
308                 }
309                 folio_batch_release(&fbatch);
310         }
311         /* the inode may be gone now */
312 }
313 
314 static void btrfs_finish_compressed_write_work(struct work_struct *work)
315 {
316         struct compressed_bio *cb =
317                 container_of(work, struct compressed_bio, write_end_work);
318 
319         btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len,
320                                     cb->bbio.bio.bi_status == BLK_STS_OK);
321 
322         if (cb->writeback)
323                 end_compressed_writeback(cb);
324         /* Note, our inode could be gone now */
325 
326         btrfs_free_compressed_folios(cb);
327         bio_put(&cb->bbio.bio);
328 }
329 
330 /*
331  * Do the cleanup once all the compressed pages hit the disk.  This will clear
332  * writeback on the file pages and free the compressed pages.
333  *
334  * This also calls the writeback end hooks for the file pages so that metadata
335  * and checksums can be updated in the file.
336  */
337 static void end_bbio_compressed_write(struct btrfs_bio *bbio)
338 {
339         struct compressed_bio *cb = to_compressed_bio(bbio);
340         struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
341 
342         queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
343 }
344 
345 static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
346 {
347         struct bio *bio = &cb->bbio.bio;
348         u32 offset = 0;
349 
350         while (offset < cb->compressed_len) {
351                 int ret;
352                 u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE);
353 
354                 /* Maximum compressed extent is smaller than bio size limit. */
355                 ret = bio_add_folio(bio, cb->compressed_folios[offset >> PAGE_SHIFT],
356                                     len, 0);
357                 ASSERT(ret);
358                 offset += len;
359         }
360 }
361 
362 /*
363  * worker function to build and submit bios for previously compressed pages.
364  * The corresponding pages in the inode should be marked for writeback
365  * and the compressed pages should have a reference on them for dropping
366  * when the IO is complete.
367  *
368  * This also checksums the file bytes and gets things ready for
369  * the end io hooks.
370  */
371 void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
372                                    struct folio **compressed_folios,
373                                    unsigned int nr_folios,
374                                    blk_opf_t write_flags,
375                                    bool writeback)
376 {
377         struct btrfs_inode *inode = ordered->inode;
378         struct btrfs_fs_info *fs_info = inode->root->fs_info;
379         struct compressed_bio *cb;
380 
381         ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize));
382         ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize));
383 
384         cb = alloc_compressed_bio(inode, ordered->file_offset,
385                                   REQ_OP_WRITE | write_flags,
386                                   end_bbio_compressed_write);
387         cb->start = ordered->file_offset;
388         cb->len = ordered->num_bytes;
389         cb->compressed_folios = compressed_folios;
390         cb->compressed_len = ordered->disk_num_bytes;
391         cb->writeback = writeback;
392         INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
393         cb->nr_folios = nr_folios;
394         cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
395         cb->bbio.ordered = ordered;
396         btrfs_add_compressed_bio_folios(cb);
397 
398         btrfs_submit_bio(&cb->bbio, 0);
399 }
400 
401 /*
402  * Add extra pages in the same compressed file extent so that we don't need to
403  * re-read the same extent again and again.
404  *
405  * NOTE: this won't work well for subpage, as for subpage read, we lock the
406  * full page then submit bio for each compressed/regular extents.
407  *
408  * This means, if we have several sectors in the same page points to the same
409  * on-disk compressed data, we will re-read the same extent many times and
410  * this function can only help for the next page.
411  */
412 static noinline int add_ra_bio_pages(struct inode *inode,
413                                      u64 compressed_end,
414                                      struct compressed_bio *cb,
415                                      int *memstall, unsigned long *pflags)
416 {
417         struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
418         unsigned long end_index;
419         struct bio *orig_bio = &cb->orig_bbio->bio;
420         u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size;
421         u64 isize = i_size_read(inode);
422         int ret;
423         struct page *page;
424         struct extent_map *em;
425         struct address_space *mapping = inode->i_mapping;
426         struct extent_map_tree *em_tree;
427         struct extent_io_tree *tree;
428         int sectors_missed = 0;
429 
430         em_tree = &BTRFS_I(inode)->extent_tree;
431         tree = &BTRFS_I(inode)->io_tree;
432 
433         if (isize == 0)
434                 return 0;
435 
436         /*
437          * For current subpage support, we only support 64K page size,
438          * which means maximum compressed extent size (128K) is just 2x page
439          * size.
440          * This makes readahead less effective, so here disable readahead for
441          * subpage for now, until full compressed write is supported.
442          */
443         if (fs_info->sectorsize < PAGE_SIZE)
444                 return 0;
445 
446         end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
447 
448         while (cur < compressed_end) {
449                 u64 page_end;
450                 u64 pg_index = cur >> PAGE_SHIFT;
451                 u32 add_size;
452 
453                 if (pg_index > end_index)
454                         break;
455 
456                 page = xa_load(&mapping->i_pages, pg_index);
457                 if (page && !xa_is_value(page)) {
458                         sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >>
459                                           fs_info->sectorsize_bits;
460 
461                         /* Beyond threshold, no need to continue */
462                         if (sectors_missed > 4)
463                                 break;
464 
465                         /*
466                          * Jump to next page start as we already have page for
467                          * current offset.
468                          */
469                         cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
470                         continue;
471                 }
472 
473                 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
474                                                                  ~__GFP_FS));
475                 if (!page)
476                         break;
477 
478                 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
479                         put_page(page);
480                         /* There is already a page, skip to page end */
481                         cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
482                         continue;
483                 }
484 
485                 if (!*memstall && PageWorkingset(page)) {
486                         psi_memstall_enter(pflags);
487                         *memstall = 1;
488                 }
489 
490                 ret = set_page_extent_mapped(page);
491                 if (ret < 0) {
492                         unlock_page(page);
493                         put_page(page);
494                         break;
495                 }
496 
497                 page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1;
498                 lock_extent(tree, cur, page_end, NULL);
499                 read_lock(&em_tree->lock);
500                 em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
501                 read_unlock(&em_tree->lock);
502 
503                 /*
504                  * At this point, we have a locked page in the page cache for
505                  * these bytes in the file.  But, we have to make sure they map
506                  * to this compressed extent on disk.
507                  */
508                 if (!em || cur < em->start ||
509                     (cur + fs_info->sectorsize > extent_map_end(em)) ||
510                     (extent_map_block_start(em) >> SECTOR_SHIFT) !=
511                     orig_bio->bi_iter.bi_sector) {
512                         free_extent_map(em);
513                         unlock_extent(tree, cur, page_end, NULL);
514                         unlock_page(page);
515                         put_page(page);
516                         break;
517                 }
518                 add_size = min(em->start + em->len, page_end + 1) - cur;
519                 free_extent_map(em);
520 
521                 if (page->index == end_index) {
522                         size_t zero_offset = offset_in_page(isize);
523 
524                         if (zero_offset) {
525                                 int zeros;
526                                 zeros = PAGE_SIZE - zero_offset;
527                                 memzero_page(page, zero_offset, zeros);
528                         }
529                 }
530 
531                 ret = bio_add_page(orig_bio, page, add_size, offset_in_page(cur));
532                 if (ret != add_size) {
533                         unlock_extent(tree, cur, page_end, NULL);
534                         unlock_page(page);
535                         put_page(page);
536                         break;
537                 }
538                 /*
539                  * If it's subpage, we also need to increase its
540                  * subpage::readers number, as at endio we will decrease
541                  * subpage::readers and to unlock the page.
542                  */
543                 if (fs_info->sectorsize < PAGE_SIZE)
544                         btrfs_subpage_start_reader(fs_info, page_folio(page),
545                                                    cur, add_size);
546                 put_page(page);
547                 cur += add_size;
548         }
549         return 0;
550 }
551 
552 /*
553  * for a compressed read, the bio we get passed has all the inode pages
554  * in it.  We don't actually do IO on those pages but allocate new ones
555  * to hold the compressed pages on disk.
556  *
557  * bio->bi_iter.bi_sector points to the compressed extent on disk
558  * bio->bi_io_vec points to all of the inode pages
559  *
560  * After the compressed pages are read, we copy the bytes into the
561  * bio we were passed and then call the bio end_io calls
562  */
563 void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
564 {
565         struct btrfs_inode *inode = bbio->inode;
566         struct btrfs_fs_info *fs_info = inode->root->fs_info;
567         struct extent_map_tree *em_tree = &inode->extent_tree;
568         struct compressed_bio *cb;
569         unsigned int compressed_len;
570         u64 file_offset = bbio->file_offset;
571         u64 em_len;
572         u64 em_start;
573         struct extent_map *em;
574         unsigned long pflags;
575         int memstall = 0;
576         blk_status_t ret;
577         int ret2;
578 
579         /* we need the actual starting offset of this extent in the file */
580         read_lock(&em_tree->lock);
581         em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
582         read_unlock(&em_tree->lock);
583         if (!em) {
584                 ret = BLK_STS_IOERR;
585                 goto out;
586         }
587 
588         ASSERT(extent_map_is_compressed(em));
589         compressed_len = em->disk_num_bytes;
590 
591         cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ,
592                                   end_bbio_compressed_read);
593 
594         cb->start = em->start - em->offset;
595         em_len = em->len;
596         em_start = em->start;
597 
598         cb->len = bbio->bio.bi_iter.bi_size;
599         cb->compressed_len = compressed_len;
600         cb->compress_type = extent_map_compression(em);
601         cb->orig_bbio = bbio;
602 
603         free_extent_map(em);
604 
605         cb->nr_folios = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
606         cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct page *), GFP_NOFS);
607         if (!cb->compressed_folios) {
608                 ret = BLK_STS_RESOURCE;
609                 goto out_free_bio;
610         }
611 
612         ret2 = btrfs_alloc_folio_array(cb->nr_folios, cb->compressed_folios);
613         if (ret2) {
614                 ret = BLK_STS_RESOURCE;
615                 goto out_free_compressed_pages;
616         }
617 
618         add_ra_bio_pages(&inode->vfs_inode, em_start + em_len, cb, &memstall,
619                          &pflags);
620 
621         /* include any pages we added in add_ra-bio_pages */
622         cb->len = bbio->bio.bi_iter.bi_size;
623         cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector;
624         btrfs_add_compressed_bio_folios(cb);
625 
626         if (memstall)
627                 psi_memstall_leave(&pflags);
628 
629         btrfs_submit_bio(&cb->bbio, 0);
630         return;
631 
632 out_free_compressed_pages:
633         kfree(cb->compressed_folios);
634 out_free_bio:
635         bio_put(&cb->bbio.bio);
636 out:
637         btrfs_bio_end_io(bbio, ret);
638 }
639 
640 /*
641  * Heuristic uses systematic sampling to collect data from the input data
642  * range, the logic can be tuned by the following constants:
643  *
644  * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
645  * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
646  */
647 #define SAMPLING_READ_SIZE      (16)
648 #define SAMPLING_INTERVAL       (256)
649 
650 /*
651  * For statistical analysis of the input data we consider bytes that form a
652  * Galois Field of 256 objects. Each object has an attribute count, ie. how
653  * many times the object appeared in the sample.
654  */
655 #define BUCKET_SIZE             (256)
656 
657 /*
658  * The size of the sample is based on a statistical sampling rule of thumb.
659  * The common way is to perform sampling tests as long as the number of
660  * elements in each cell is at least 5.
661  *
662  * Instead of 5, we choose 32 to obtain more accurate results.
663  * If the data contain the maximum number of symbols, which is 256, we obtain a
664  * sample size bound by 8192.
665  *
666  * For a sample of at most 8KB of data per data range: 16 consecutive bytes
667  * from up to 512 locations.
668  */
669 #define MAX_SAMPLE_SIZE         (BTRFS_MAX_UNCOMPRESSED *               \
670                                  SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
671 
672 struct bucket_item {
673         u32 count;
674 };
675 
676 struct heuristic_ws {
677         /* Partial copy of input data */
678         u8 *sample;
679         u32 sample_size;
680         /* Buckets store counters for each byte value */
681         struct bucket_item *bucket;
682         /* Sorting buffer */
683         struct bucket_item *bucket_b;
684         struct list_head list;
685 };
686 
687 static struct workspace_manager heuristic_wsm;
688 
689 static void free_heuristic_ws(struct list_head *ws)
690 {
691         struct heuristic_ws *workspace;
692 
693         workspace = list_entry(ws, struct heuristic_ws, list);
694 
695         kvfree(workspace->sample);
696         kfree(workspace->bucket);
697         kfree(workspace->bucket_b);
698         kfree(workspace);
699 }
700 
701 static struct list_head *alloc_heuristic_ws(unsigned int level)
702 {
703         struct heuristic_ws *ws;
704 
705         ws = kzalloc(sizeof(*ws), GFP_KERNEL);
706         if (!ws)
707                 return ERR_PTR(-ENOMEM);
708 
709         ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
710         if (!ws->sample)
711                 goto fail;
712 
713         ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
714         if (!ws->bucket)
715                 goto fail;
716 
717         ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
718         if (!ws->bucket_b)
719                 goto fail;
720 
721         INIT_LIST_HEAD(&ws->list);
722         return &ws->list;
723 fail:
724         free_heuristic_ws(&ws->list);
725         return ERR_PTR(-ENOMEM);
726 }
727 
728 const struct btrfs_compress_op btrfs_heuristic_compress = {
729         .workspace_manager = &heuristic_wsm,
730 };
731 
732 static const struct btrfs_compress_op * const btrfs_compress_op[] = {
733         /* The heuristic is represented as compression type 0 */
734         &btrfs_heuristic_compress,
735         &btrfs_zlib_compress,
736         &btrfs_lzo_compress,
737         &btrfs_zstd_compress,
738 };
739 
740 static struct list_head *alloc_workspace(int type, unsigned int level)
741 {
742         switch (type) {
743         case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
744         case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
745         case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace(level);
746         case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
747         default:
748                 /*
749                  * This can't happen, the type is validated several times
750                  * before we get here.
751                  */
752                 BUG();
753         }
754 }
755 
756 static void free_workspace(int type, struct list_head *ws)
757 {
758         switch (type) {
759         case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
760         case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
761         case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
762         case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
763         default:
764                 /*
765                  * This can't happen, the type is validated several times
766                  * before we get here.
767                  */
768                 BUG();
769         }
770 }
771 
772 static void btrfs_init_workspace_manager(int type)
773 {
774         struct workspace_manager *wsm;
775         struct list_head *workspace;
776 
777         wsm = btrfs_compress_op[type]->workspace_manager;
778         INIT_LIST_HEAD(&wsm->idle_ws);
779         spin_lock_init(&wsm->ws_lock);
780         atomic_set(&wsm->total_ws, 0);
781         init_waitqueue_head(&wsm->ws_wait);
782 
783         /*
784          * Preallocate one workspace for each compression type so we can
785          * guarantee forward progress in the worst case
786          */
787         workspace = alloc_workspace(type, 0);
788         if (IS_ERR(workspace)) {
789                 pr_warn(
790         "BTRFS: cannot preallocate compression workspace, will try later\n");
791         } else {
792                 atomic_set(&wsm->total_ws, 1);
793                 wsm->free_ws = 1;
794                 list_add(workspace, &wsm->idle_ws);
795         }
796 }
797 
798 static void btrfs_cleanup_workspace_manager(int type)
799 {
800         struct workspace_manager *wsman;
801         struct list_head *ws;
802 
803         wsman = btrfs_compress_op[type]->workspace_manager;
804         while (!list_empty(&wsman->idle_ws)) {
805                 ws = wsman->idle_ws.next;
806                 list_del(ws);
807                 free_workspace(type, ws);
808                 atomic_dec(&wsman->total_ws);
809         }
810 }
811 
812 /*
813  * This finds an available workspace or allocates a new one.
814  * If it's not possible to allocate a new one, waits until there's one.
815  * Preallocation makes a forward progress guarantees and we do not return
816  * errors.
817  */
818 struct list_head *btrfs_get_workspace(int type, unsigned int level)
819 {
820         struct workspace_manager *wsm;
821         struct list_head *workspace;
822         int cpus = num_online_cpus();
823         unsigned nofs_flag;
824         struct list_head *idle_ws;
825         spinlock_t *ws_lock;
826         atomic_t *total_ws;
827         wait_queue_head_t *ws_wait;
828         int *free_ws;
829 
830         wsm = btrfs_compress_op[type]->workspace_manager;
831         idle_ws  = &wsm->idle_ws;
832         ws_lock  = &wsm->ws_lock;
833         total_ws = &wsm->total_ws;
834         ws_wait  = &wsm->ws_wait;
835         free_ws  = &wsm->free_ws;
836 
837 again:
838         spin_lock(ws_lock);
839         if (!list_empty(idle_ws)) {
840                 workspace = idle_ws->next;
841                 list_del(workspace);
842                 (*free_ws)--;
843                 spin_unlock(ws_lock);
844                 return workspace;
845 
846         }
847         if (atomic_read(total_ws) > cpus) {
848                 DEFINE_WAIT(wait);
849 
850                 spin_unlock(ws_lock);
851                 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
852                 if (atomic_read(total_ws) > cpus && !*free_ws)
853                         schedule();
854                 finish_wait(ws_wait, &wait);
855                 goto again;
856         }
857         atomic_inc(total_ws);
858         spin_unlock(ws_lock);
859 
860         /*
861          * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
862          * to turn it off here because we might get called from the restricted
863          * context of btrfs_compress_bio/btrfs_compress_pages
864          */
865         nofs_flag = memalloc_nofs_save();
866         workspace = alloc_workspace(type, level);
867         memalloc_nofs_restore(nofs_flag);
868 
869         if (IS_ERR(workspace)) {
870                 atomic_dec(total_ws);
871                 wake_up(ws_wait);
872 
873                 /*
874                  * Do not return the error but go back to waiting. There's a
875                  * workspace preallocated for each type and the compression
876                  * time is bounded so we get to a workspace eventually. This
877                  * makes our caller's life easier.
878                  *
879                  * To prevent silent and low-probability deadlocks (when the
880                  * initial preallocation fails), check if there are any
881                  * workspaces at all.
882                  */
883                 if (atomic_read(total_ws) == 0) {
884                         static DEFINE_RATELIMIT_STATE(_rs,
885                                         /* once per minute */ 60 * HZ,
886                                         /* no burst */ 1);
887 
888                         if (__ratelimit(&_rs)) {
889                                 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
890                         }
891                 }
892                 goto again;
893         }
894         return workspace;
895 }
896 
897 static struct list_head *get_workspace(int type, int level)
898 {
899         switch (type) {
900         case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
901         case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
902         case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
903         case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
904         default:
905                 /*
906                  * This can't happen, the type is validated several times
907                  * before we get here.
908                  */
909                 BUG();
910         }
911 }
912 
913 /*
914  * put a workspace struct back on the list or free it if we have enough
915  * idle ones sitting around
916  */
917 void btrfs_put_workspace(int type, struct list_head *ws)
918 {
919         struct workspace_manager *wsm;
920         struct list_head *idle_ws;
921         spinlock_t *ws_lock;
922         atomic_t *total_ws;
923         wait_queue_head_t *ws_wait;
924         int *free_ws;
925 
926         wsm = btrfs_compress_op[type]->workspace_manager;
927         idle_ws  = &wsm->idle_ws;
928         ws_lock  = &wsm->ws_lock;
929         total_ws = &wsm->total_ws;
930         ws_wait  = &wsm->ws_wait;
931         free_ws  = &wsm->free_ws;
932 
933         spin_lock(ws_lock);
934         if (*free_ws <= num_online_cpus()) {
935                 list_add(ws, idle_ws);
936                 (*free_ws)++;
937                 spin_unlock(ws_lock);
938                 goto wake;
939         }
940         spin_unlock(ws_lock);
941 
942         free_workspace(type, ws);
943         atomic_dec(total_ws);
944 wake:
945         cond_wake_up(ws_wait);
946 }
947 
948 static void put_workspace(int type, struct list_head *ws)
949 {
950         switch (type) {
951         case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
952         case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
953         case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
954         case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
955         default:
956                 /*
957                  * This can't happen, the type is validated several times
958                  * before we get here.
959                  */
960                 BUG();
961         }
962 }
963 
964 /*
965  * Adjust @level according to the limits of the compression algorithm or
966  * fallback to default
967  */
968 static unsigned int btrfs_compress_set_level(int type, unsigned level)
969 {
970         const struct btrfs_compress_op *ops = btrfs_compress_op[type];
971 
972         if (level == 0)
973                 level = ops->default_level;
974         else
975                 level = min(level, ops->max_level);
976 
977         return level;
978 }
979 
980 /* Wrapper around find_get_page(), with extra error message. */
981 int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
982                                      struct folio **in_folio_ret)
983 {
984         struct folio *in_folio;
985 
986         /*
987          * The compressed write path should have the folio locked already, thus
988          * we only need to grab one reference.
989          */
990         in_folio = filemap_get_folio(mapping, start >> PAGE_SHIFT);
991         if (IS_ERR(in_folio)) {
992                 struct btrfs_inode *inode = BTRFS_I(mapping->host);
993 
994                 btrfs_crit(inode->root->fs_info,
995                 "failed to get page cache, root %lld ino %llu file offset %llu",
996                            btrfs_root_id(inode->root), btrfs_ino(inode), start);
997                 return -ENOENT;
998         }
999         *in_folio_ret = in_folio;
1000         return 0;
1001 }
1002 
1003 /*
1004  * Given an address space and start and length, compress the bytes into @pages
1005  * that are allocated on demand.
1006  *
1007  * @type_level is encoded algorithm and level, where level 0 means whatever
1008  * default the algorithm chooses and is opaque here;
1009  * - compression algo are 0-3
1010  * - the level are bits 4-7
1011  *
1012  * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1013  * and returns number of actually allocated pages
1014  *
1015  * @total_in is used to return the number of bytes actually read.  It
1016  * may be smaller than the input length if we had to exit early because we
1017  * ran out of room in the pages array or because we cross the
1018  * max_out threshold.
1019  *
1020  * @total_out is an in/out parameter, must be set to the input length and will
1021  * be also used to return the total number of compressed bytes
1022  */
1023 int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping,
1024                          u64 start, struct folio **folios, unsigned long *out_folios,
1025                          unsigned long *total_in, unsigned long *total_out)
1026 {
1027         int type = btrfs_compress_type(type_level);
1028         int level = btrfs_compress_level(type_level);
1029         struct list_head *workspace;
1030         int ret;
1031 
1032         level = btrfs_compress_set_level(type, level);
1033         workspace = get_workspace(type, level);
1034         ret = compression_compress_pages(type, workspace, mapping, start, folios,
1035                                          out_folios, total_in, total_out);
1036         put_workspace(type, workspace);
1037         return ret;
1038 }
1039 
1040 static int btrfs_decompress_bio(struct compressed_bio *cb)
1041 {
1042         struct list_head *workspace;
1043         int ret;
1044         int type = cb->compress_type;
1045 
1046         workspace = get_workspace(type, 0);
1047         ret = compression_decompress_bio(workspace, cb);
1048         put_workspace(type, workspace);
1049 
1050         if (!ret)
1051                 zero_fill_bio(&cb->orig_bbio->bio);
1052         return ret;
1053 }
1054 
1055 /*
1056  * a less complex decompression routine.  Our compressed data fits in a
1057  * single page, and we want to read a single page out of it.
1058  * start_byte tells us the offset into the compressed data we're interested in
1059  */
1060 int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
1061                      unsigned long dest_pgoff, size_t srclen, size_t destlen)
1062 {
1063         struct btrfs_fs_info *fs_info = page_to_fs_info(dest_page);
1064         struct list_head *workspace;
1065         const u32 sectorsize = fs_info->sectorsize;
1066         int ret;
1067 
1068         /*
1069          * The full destination page range should not exceed the page size.
1070          * And the @destlen should not exceed sectorsize, as this is only called for
1071          * inline file extents, which should not exceed sectorsize.
1072          */
1073         ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize);
1074 
1075         workspace = get_workspace(type, 0);
1076         ret = compression_decompress(type, workspace, data_in, dest_page,
1077                                      dest_pgoff, srclen, destlen);
1078         put_workspace(type, workspace);
1079 
1080         return ret;
1081 }
1082 
1083 int __init btrfs_init_compress(void)
1084 {
1085         if (bioset_init(&btrfs_compressed_bioset, BIO_POOL_SIZE,
1086                         offsetof(struct compressed_bio, bbio.bio),
1087                         BIOSET_NEED_BVECS))
1088                 return -ENOMEM;
1089 
1090         compr_pool.shrinker = shrinker_alloc(SHRINKER_NONSLAB, "btrfs-compr-pages");
1091         if (!compr_pool.shrinker)
1092                 return -ENOMEM;
1093 
1094         btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1095         btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1096         btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1097         zstd_init_workspace_manager();
1098 
1099         spin_lock_init(&compr_pool.lock);
1100         INIT_LIST_HEAD(&compr_pool.list);
1101         compr_pool.count = 0;
1102         /* 128K / 4K = 32, for 8 threads is 256 pages. */
1103         compr_pool.thresh = BTRFS_MAX_COMPRESSED / PAGE_SIZE * 8;
1104         compr_pool.shrinker->count_objects = btrfs_compr_pool_count;
1105         compr_pool.shrinker->scan_objects = btrfs_compr_pool_scan;
1106         compr_pool.shrinker->batch = 32;
1107         compr_pool.shrinker->seeks = DEFAULT_SEEKS;
1108         shrinker_register(compr_pool.shrinker);
1109 
1110         return 0;
1111 }
1112 
1113 void __cold btrfs_exit_compress(void)
1114 {
1115         /* For now scan drains all pages and does not touch the parameters. */
1116         btrfs_compr_pool_scan(NULL, NULL);
1117         shrinker_free(compr_pool.shrinker);
1118 
1119         btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1120         btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1121         btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1122         zstd_cleanup_workspace_manager();
1123         bioset_exit(&btrfs_compressed_bioset);
1124 }
1125 
1126 /*
1127  * Copy decompressed data from working buffer to pages.
1128  *
1129  * @buf:                The decompressed data buffer
1130  * @buf_len:            The decompressed data length
1131  * @decompressed:       Number of bytes that are already decompressed inside the
1132  *                      compressed extent
1133  * @cb:                 The compressed extent descriptor
1134  * @orig_bio:           The original bio that the caller wants to read for
1135  *
1136  * An easier to understand graph is like below:
1137  *
1138  *              |<- orig_bio ->|     |<- orig_bio->|
1139  *      |<-------      full decompressed extent      ----->|
1140  *      |<-----------    @cb range   ---->|
1141  *      |                       |<-- @buf_len -->|
1142  *      |<--- @decompressed --->|
1143  *
1144  * Note that, @cb can be a subpage of the full decompressed extent, but
1145  * @cb->start always has the same as the orig_file_offset value of the full
1146  * decompressed extent.
1147  *
1148  * When reading compressed extent, we have to read the full compressed extent,
1149  * while @orig_bio may only want part of the range.
1150  * Thus this function will ensure only data covered by @orig_bio will be copied
1151  * to.
1152  *
1153  * Return 0 if we have copied all needed contents for @orig_bio.
1154  * Return >0 if we need continue decompress.
1155  */
1156 int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
1157                               struct compressed_bio *cb, u32 decompressed)
1158 {
1159         struct bio *orig_bio = &cb->orig_bbio->bio;
1160         /* Offset inside the full decompressed extent */
1161         u32 cur_offset;
1162 
1163         cur_offset = decompressed;
1164         /* The main loop to do the copy */
1165         while (cur_offset < decompressed + buf_len) {
1166                 struct bio_vec bvec;
1167                 size_t copy_len;
1168                 u32 copy_start;
1169                 /* Offset inside the full decompressed extent */
1170                 u32 bvec_offset;
1171 
1172                 bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
1173                 /*
1174                  * cb->start may underflow, but subtracting that value can still
1175                  * give us correct offset inside the full decompressed extent.
1176                  */
1177                 bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
1178 
1179                 /* Haven't reached the bvec range, exit */
1180                 if (decompressed + buf_len <= bvec_offset)
1181                         return 1;
1182 
1183                 copy_start = max(cur_offset, bvec_offset);
1184                 copy_len = min(bvec_offset + bvec.bv_len,
1185                                decompressed + buf_len) - copy_start;
1186                 ASSERT(copy_len);
1187 
1188                 /*
1189                  * Extra range check to ensure we didn't go beyond
1190                  * @buf + @buf_len.
1191                  */
1192                 ASSERT(copy_start - decompressed < buf_len);
1193                 memcpy_to_page(bvec.bv_page, bvec.bv_offset,
1194                                buf + copy_start - decompressed, copy_len);
1195                 cur_offset += copy_len;
1196 
1197                 bio_advance(orig_bio, copy_len);
1198                 /* Finished the bio */
1199                 if (!orig_bio->bi_iter.bi_size)
1200                         return 0;
1201         }
1202         return 1;
1203 }
1204 
1205 /*
1206  * Shannon Entropy calculation
1207  *
1208  * Pure byte distribution analysis fails to determine compressibility of data.
1209  * Try calculating entropy to estimate the average minimum number of bits
1210  * needed to encode the sampled data.
1211  *
1212  * For convenience, return the percentage of needed bits, instead of amount of
1213  * bits directly.
1214  *
1215  * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1216  *                          and can be compressible with high probability
1217  *
1218  * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1219  *
1220  * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1221  */
1222 #define ENTROPY_LVL_ACEPTABLE           (65)
1223 #define ENTROPY_LVL_HIGH                (80)
1224 
1225 /*
1226  * For increasead precision in shannon_entropy calculation,
1227  * let's do pow(n, M) to save more digits after comma:
1228  *
1229  * - maximum int bit length is 64
1230  * - ilog2(MAX_SAMPLE_SIZE)     -> 13
1231  * - 13 * 4 = 52 < 64           -> M = 4
1232  *
1233  * So use pow(n, 4).
1234  */
1235 static inline u32 ilog2_w(u64 n)
1236 {
1237         return ilog2(n * n * n * n);
1238 }
1239 
1240 static u32 shannon_entropy(struct heuristic_ws *ws)
1241 {
1242         const u32 entropy_max = 8 * ilog2_w(2);
1243         u32 entropy_sum = 0;
1244         u32 p, p_base, sz_base;
1245         u32 i;
1246 
1247         sz_base = ilog2_w(ws->sample_size);
1248         for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1249                 p = ws->bucket[i].count;
1250                 p_base = ilog2_w(p);
1251                 entropy_sum += p * (sz_base - p_base);
1252         }
1253 
1254         entropy_sum /= ws->sample_size;
1255         return entropy_sum * 100 / entropy_max;
1256 }
1257 
1258 #define RADIX_BASE              4U
1259 #define COUNTERS_SIZE           (1U << RADIX_BASE)
1260 
1261 static u8 get4bits(u64 num, int shift) {
1262         u8 low4bits;
1263 
1264         num >>= shift;
1265         /* Reverse order */
1266         low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1267         return low4bits;
1268 }
1269 
1270 /*
1271  * Use 4 bits as radix base
1272  * Use 16 u32 counters for calculating new position in buf array
1273  *
1274  * @array     - array that will be sorted
1275  * @array_buf - buffer array to store sorting results
1276  *              must be equal in size to @array
1277  * @num       - array size
1278  */
1279 static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1280                        int num)
1281 {
1282         u64 max_num;
1283         u64 buf_num;
1284         u32 counters[COUNTERS_SIZE];
1285         u32 new_addr;
1286         u32 addr;
1287         int bitlen;
1288         int shift;
1289         int i;
1290 
1291         /*
1292          * Try avoid useless loop iterations for small numbers stored in big
1293          * counters.  Example: 48 33 4 ... in 64bit array
1294          */
1295         max_num = array[0].count;
1296         for (i = 1; i < num; i++) {
1297                 buf_num = array[i].count;
1298                 if (buf_num > max_num)
1299                         max_num = buf_num;
1300         }
1301 
1302         buf_num = ilog2(max_num);
1303         bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1304 
1305         shift = 0;
1306         while (shift < bitlen) {
1307                 memset(counters, 0, sizeof(counters));
1308 
1309                 for (i = 0; i < num; i++) {
1310                         buf_num = array[i].count;
1311                         addr = get4bits(buf_num, shift);
1312                         counters[addr]++;
1313                 }
1314 
1315                 for (i = 1; i < COUNTERS_SIZE; i++)
1316                         counters[i] += counters[i - 1];
1317 
1318                 for (i = num - 1; i >= 0; i--) {
1319                         buf_num = array[i].count;
1320                         addr = get4bits(buf_num, shift);
1321                         counters[addr]--;
1322                         new_addr = counters[addr];
1323                         array_buf[new_addr] = array[i];
1324                 }
1325 
1326                 shift += RADIX_BASE;
1327 
1328                 /*
1329                  * Normal radix expects to move data from a temporary array, to
1330                  * the main one.  But that requires some CPU time. Avoid that
1331                  * by doing another sort iteration to original array instead of
1332                  * memcpy()
1333                  */
1334                 memset(counters, 0, sizeof(counters));
1335 
1336                 for (i = 0; i < num; i ++) {
1337                         buf_num = array_buf[i].count;
1338                         addr = get4bits(buf_num, shift);
1339                         counters[addr]++;
1340                 }
1341 
1342                 for (i = 1; i < COUNTERS_SIZE; i++)
1343                         counters[i] += counters[i - 1];
1344 
1345                 for (i = num - 1; i >= 0; i--) {
1346                         buf_num = array_buf[i].count;
1347                         addr = get4bits(buf_num, shift);
1348                         counters[addr]--;
1349                         new_addr = counters[addr];
1350                         array[new_addr] = array_buf[i];
1351                 }
1352 
1353                 shift += RADIX_BASE;
1354         }
1355 }
1356 
1357 /*
1358  * Size of the core byte set - how many bytes cover 90% of the sample
1359  *
1360  * There are several types of structured binary data that use nearly all byte
1361  * values. The distribution can be uniform and counts in all buckets will be
1362  * nearly the same (eg. encrypted data). Unlikely to be compressible.
1363  *
1364  * Other possibility is normal (Gaussian) distribution, where the data could
1365  * be potentially compressible, but we have to take a few more steps to decide
1366  * how much.
1367  *
1368  * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1369  *                       compression algo can easy fix that
1370  * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1371  *                       probability is not compressible
1372  */
1373 #define BYTE_CORE_SET_LOW               (64)
1374 #define BYTE_CORE_SET_HIGH              (200)
1375 
1376 static int byte_core_set_size(struct heuristic_ws *ws)
1377 {
1378         u32 i;
1379         u32 coreset_sum = 0;
1380         const u32 core_set_threshold = ws->sample_size * 90 / 100;
1381         struct bucket_item *bucket = ws->bucket;
1382 
1383         /* Sort in reverse order */
1384         radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1385 
1386         for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1387                 coreset_sum += bucket[i].count;
1388 
1389         if (coreset_sum > core_set_threshold)
1390                 return i;
1391 
1392         for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1393                 coreset_sum += bucket[i].count;
1394                 if (coreset_sum > core_set_threshold)
1395                         break;
1396         }
1397 
1398         return i;
1399 }
1400 
1401 /*
1402  * Count byte values in buckets.
1403  * This heuristic can detect textual data (configs, xml, json, html, etc).
1404  * Because in most text-like data byte set is restricted to limited number of
1405  * possible characters, and that restriction in most cases makes data easy to
1406  * compress.
1407  *
1408  * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1409  *      less - compressible
1410  *      more - need additional analysis
1411  */
1412 #define BYTE_SET_THRESHOLD              (64)
1413 
1414 static u32 byte_set_size(const struct heuristic_ws *ws)
1415 {
1416         u32 i;
1417         u32 byte_set_size = 0;
1418 
1419         for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1420                 if (ws->bucket[i].count > 0)
1421                         byte_set_size++;
1422         }
1423 
1424         /*
1425          * Continue collecting count of byte values in buckets.  If the byte
1426          * set size is bigger then the threshold, it's pointless to continue,
1427          * the detection technique would fail for this type of data.
1428          */
1429         for (; i < BUCKET_SIZE; i++) {
1430                 if (ws->bucket[i].count > 0) {
1431                         byte_set_size++;
1432                         if (byte_set_size > BYTE_SET_THRESHOLD)
1433                                 return byte_set_size;
1434                 }
1435         }
1436 
1437         return byte_set_size;
1438 }
1439 
1440 static bool sample_repeated_patterns(struct heuristic_ws *ws)
1441 {
1442         const u32 half_of_sample = ws->sample_size / 2;
1443         const u8 *data = ws->sample;
1444 
1445         return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1446 }
1447 
1448 static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1449                                      struct heuristic_ws *ws)
1450 {
1451         struct page *page;
1452         u64 index, index_end;
1453         u32 i, curr_sample_pos;
1454         u8 *in_data;
1455 
1456         /*
1457          * Compression handles the input data by chunks of 128KiB
1458          * (defined by BTRFS_MAX_UNCOMPRESSED)
1459          *
1460          * We do the same for the heuristic and loop over the whole range.
1461          *
1462          * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1463          * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1464          */
1465         if (end - start > BTRFS_MAX_UNCOMPRESSED)
1466                 end = start + BTRFS_MAX_UNCOMPRESSED;
1467 
1468         index = start >> PAGE_SHIFT;
1469         index_end = end >> PAGE_SHIFT;
1470 
1471         /* Don't miss unaligned end */
1472         if (!PAGE_ALIGNED(end))
1473                 index_end++;
1474 
1475         curr_sample_pos = 0;
1476         while (index < index_end) {
1477                 page = find_get_page(inode->i_mapping, index);
1478                 in_data = kmap_local_page(page);
1479                 /* Handle case where the start is not aligned to PAGE_SIZE */
1480                 i = start % PAGE_SIZE;
1481                 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1482                         /* Don't sample any garbage from the last page */
1483                         if (start > end - SAMPLING_READ_SIZE)
1484                                 break;
1485                         memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1486                                         SAMPLING_READ_SIZE);
1487                         i += SAMPLING_INTERVAL;
1488                         start += SAMPLING_INTERVAL;
1489                         curr_sample_pos += SAMPLING_READ_SIZE;
1490                 }
1491                 kunmap_local(in_data);
1492                 put_page(page);
1493 
1494                 index++;
1495         }
1496 
1497         ws->sample_size = curr_sample_pos;
1498 }
1499 
1500 /*
1501  * Compression heuristic.
1502  *
1503  * The following types of analysis can be performed:
1504  * - detect mostly zero data
1505  * - detect data with low "byte set" size (text, etc)
1506  * - detect data with low/high "core byte" set
1507  *
1508  * Return non-zero if the compression should be done, 0 otherwise.
1509  */
1510 int btrfs_compress_heuristic(struct btrfs_inode *inode, u64 start, u64 end)
1511 {
1512         struct list_head *ws_list = get_workspace(0, 0);
1513         struct heuristic_ws *ws;
1514         u32 i;
1515         u8 byte;
1516         int ret = 0;
1517 
1518         ws = list_entry(ws_list, struct heuristic_ws, list);
1519 
1520         heuristic_collect_sample(&inode->vfs_inode, start, end, ws);
1521 
1522         if (sample_repeated_patterns(ws)) {
1523                 ret = 1;
1524                 goto out;
1525         }
1526 
1527         memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1528 
1529         for (i = 0; i < ws->sample_size; i++) {
1530                 byte = ws->sample[i];
1531                 ws->bucket[byte].count++;
1532         }
1533 
1534         i = byte_set_size(ws);
1535         if (i < BYTE_SET_THRESHOLD) {
1536                 ret = 2;
1537                 goto out;
1538         }
1539 
1540         i = byte_core_set_size(ws);
1541         if (i <= BYTE_CORE_SET_LOW) {
1542                 ret = 3;
1543                 goto out;
1544         }
1545 
1546         if (i >= BYTE_CORE_SET_HIGH) {
1547                 ret = 0;
1548                 goto out;
1549         }
1550 
1551         i = shannon_entropy(ws);
1552         if (i <= ENTROPY_LVL_ACEPTABLE) {
1553                 ret = 4;
1554                 goto out;
1555         }
1556 
1557         /*
1558          * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1559          * needed to give green light to compression.
1560          *
1561          * For now just assume that compression at that level is not worth the
1562          * resources because:
1563          *
1564          * 1. it is possible to defrag the data later
1565          *
1566          * 2. the data would turn out to be hardly compressible, eg. 150 byte
1567          * values, every bucket has counter at level ~54. The heuristic would
1568          * be confused. This can happen when data have some internal repeated
1569          * patterns like "abbacbbc...". This can be detected by analyzing
1570          * pairs of bytes, which is too costly.
1571          */
1572         if (i < ENTROPY_LVL_HIGH) {
1573                 ret = 5;
1574                 goto out;
1575         } else {
1576                 ret = 0;
1577                 goto out;
1578         }
1579 
1580 out:
1581         put_workspace(0, ws_list);
1582         return ret;
1583 }
1584 
1585 /*
1586  * Convert the compression suffix (eg. after "zlib" starting with ":") to
1587  * level, unrecognized string will set the default level
1588  */
1589 unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1590 {
1591         unsigned int level = 0;
1592         int ret;
1593 
1594         if (!type)
1595                 return 0;
1596 
1597         if (str[0] == ':') {
1598                 ret = kstrtouint(str + 1, 10, &level);
1599                 if (ret)
1600                         level = 0;
1601         }
1602 
1603         level = btrfs_compress_set_level(type, level);
1604 
1605         return level;
1606 }
1607 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php