1 // SPDX-License-Identifier: GPL-2.0-or-later 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 2 /* 3 * Squashfs - a compressed read only filesyste 3 * Squashfs - a compressed read only filesystem for Linux 4 * 4 * 5 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 5 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 6 * Phillip Lougher <phillip@squashfs.org.uk> 6 * Phillip Lougher <phillip@squashfs.org.uk> 7 * 7 * 8 * block.c 8 * block.c 9 */ 9 */ 10 10 11 /* 11 /* 12 * This file implements the low-level routines 12 * This file implements the low-level routines to read and decompress 13 * datablocks and metadata blocks. 13 * datablocks and metadata blocks. 14 */ 14 */ 15 15 16 #include <linux/blkdev.h> 16 #include <linux/blkdev.h> 17 #include <linux/fs.h> 17 #include <linux/fs.h> 18 #include <linux/vfs.h> 18 #include <linux/vfs.h> 19 #include <linux/slab.h> 19 #include <linux/slab.h> 20 #include <linux/pagemap.h> 20 #include <linux/pagemap.h> 21 #include <linux/string.h> 21 #include <linux/string.h> 22 #include <linux/bio.h> 22 #include <linux/bio.h> 23 23 24 #include "squashfs_fs.h" 24 #include "squashfs_fs.h" 25 #include "squashfs_fs_sb.h" 25 #include "squashfs_fs_sb.h" 26 #include "squashfs.h" 26 #include "squashfs.h" 27 #include "decompressor.h" 27 #include "decompressor.h" 28 #include "page_actor.h" 28 #include "page_actor.h" 29 29 30 /* 30 /* 31 * Returns the amount of bytes copied to the p 31 * Returns the amount of bytes copied to the page actor. 32 */ 32 */ 33 static int copy_bio_to_actor(struct bio *bio, 33 static int copy_bio_to_actor(struct bio *bio, 34 struct squashfs_p 34 struct squashfs_page_actor *actor, 35 int offset, int r 35 int offset, int req_length) 36 { 36 { 37 void *actor_addr; 37 void *actor_addr; 38 struct bvec_iter_all iter_all = {}; 38 struct bvec_iter_all iter_all = {}; 39 struct bio_vec *bvec = bvec_init_iter_ 39 struct bio_vec *bvec = bvec_init_iter_all(&iter_all); 40 int copied_bytes = 0; 40 int copied_bytes = 0; 41 int actor_offset = 0; 41 int actor_offset = 0; 42 42 43 squashfs_actor_nobuff(actor); 43 squashfs_actor_nobuff(actor); 44 actor_addr = squashfs_first_page(actor 44 actor_addr = squashfs_first_page(actor); 45 45 46 if (WARN_ON_ONCE(!bio_next_segment(bio 46 if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) 47 return 0; 47 return 0; 48 48 49 while (copied_bytes < req_length) { 49 while (copied_bytes < req_length) { 50 int bytes_to_copy = min_t(int, 50 int bytes_to_copy = min_t(int, bvec->bv_len - offset, 51 PAGE 51 PAGE_SIZE - actor_offset); 52 52 53 bytes_to_copy = min_t(int, byt 53 bytes_to_copy = min_t(int, bytes_to_copy, 54 req_leng 54 req_length - copied_bytes); 55 if (!IS_ERR(actor_addr)) 55 if (!IS_ERR(actor_addr)) 56 memcpy(actor_addr + ac 56 memcpy(actor_addr + actor_offset, bvec_virt(bvec) + 57 offset 57 offset, bytes_to_copy); 58 58 59 actor_offset += bytes_to_copy; 59 actor_offset += bytes_to_copy; 60 copied_bytes += bytes_to_copy; 60 copied_bytes += bytes_to_copy; 61 offset += bytes_to_copy; 61 offset += bytes_to_copy; 62 62 63 if (actor_offset >= PAGE_SIZE) 63 if (actor_offset >= PAGE_SIZE) { 64 actor_addr = squashfs_ 64 actor_addr = squashfs_next_page(actor); 65 if (!actor_addr) 65 if (!actor_addr) 66 break; 66 break; 67 actor_offset = 0; 67 actor_offset = 0; 68 } 68 } 69 if (offset >= bvec->bv_len) { 69 if (offset >= bvec->bv_len) { 70 if (!bio_next_segment( 70 if (!bio_next_segment(bio, &iter_all)) 71 break; 71 break; 72 offset = 0; 72 offset = 0; 73 } 73 } 74 } 74 } 75 squashfs_finish_page(actor); 75 squashfs_finish_page(actor); 76 return copied_bytes; 76 return copied_bytes; 77 } 77 } 78 78 79 static int squashfs_bio_read_cached(struct bio 79 static int squashfs_bio_read_cached(struct bio *fullbio, 80 struct address_space *cache_ma 80 struct address_space *cache_mapping, u64 index, int length, 81 u64 read_start, u64 read_end, 81 u64 read_start, u64 read_end, int page_count) 82 { 82 { 83 struct page *head_to_cache = NULL, *ta 83 struct page *head_to_cache = NULL, *tail_to_cache = NULL; 84 struct block_device *bdev = fullbio->b 84 struct block_device *bdev = fullbio->bi_bdev; 85 int start_idx = 0, end_idx = 0; 85 int start_idx = 0, end_idx = 0; 86 struct bvec_iter_all iter_all; 86 struct bvec_iter_all iter_all; 87 struct bio *bio = NULL; 87 struct bio *bio = NULL; 88 struct bio_vec *bv; 88 struct bio_vec *bv; 89 int idx = 0; 89 int idx = 0; 90 int err = 0; 90 int err = 0; 91 91 92 bio_for_each_segment_all(bv, fullbio, 92 bio_for_each_segment_all(bv, fullbio, iter_all) { 93 struct page *page = bv->bv_pag 93 struct page *page = bv->bv_page; 94 94 95 if (page->mapping == cache_map 95 if (page->mapping == cache_mapping) { 96 idx++; 96 idx++; 97 continue; 97 continue; 98 } 98 } 99 99 100 /* 100 /* 101 * We only use this when the d 101 * We only use this when the device block size is the same as 102 * the page size, so read_star 102 * the page size, so read_start and read_end cover full pages. 103 * 103 * 104 * Compare these to the origin 104 * Compare these to the original required index and length to 105 * only cache pages which were 105 * only cache pages which were requested partially, since these 106 * are the ones which are like 106 * are the ones which are likely to be needed when reading 107 * adjacent blocks. 107 * adjacent blocks. 108 */ 108 */ 109 if (idx == 0 && index != read_ 109 if (idx == 0 && index != read_start) 110 head_to_cache = page; 110 head_to_cache = page; 111 else if (idx == page_count - 1 111 else if (idx == page_count - 1 && index + length != read_end) 112 tail_to_cache = page; 112 tail_to_cache = page; 113 113 114 if (!bio || idx != end_idx) { 114 if (!bio || idx != end_idx) { 115 struct bio *new = bio_ 115 struct bio *new = bio_alloc_clone(bdev, fullbio, 116 116 GFP_NOIO, &fs_bio_set); 117 117 118 if (bio) { 118 if (bio) { 119 bio_trim(bio, 119 bio_trim(bio, start_idx * PAGE_SECTORS, 120 (end_ 120 (end_idx - start_idx) * PAGE_SECTORS); 121 bio_chain(bio, 121 bio_chain(bio, new); 122 submit_bio(bio 122 submit_bio(bio); 123 } 123 } 124 124 125 bio = new; 125 bio = new; 126 start_idx = idx; 126 start_idx = idx; 127 } 127 } 128 128 129 idx++; 129 idx++; 130 end_idx = idx; 130 end_idx = idx; 131 } 131 } 132 132 133 if (bio) { 133 if (bio) { 134 bio_trim(bio, start_idx * PAGE 134 bio_trim(bio, start_idx * PAGE_SECTORS, 135 (end_idx - start_idx) 135 (end_idx - start_idx) * PAGE_SECTORS); 136 err = submit_bio_wait(bio); 136 err = submit_bio_wait(bio); 137 bio_put(bio); 137 bio_put(bio); 138 } 138 } 139 139 140 if (err) 140 if (err) 141 return err; 141 return err; 142 142 143 if (head_to_cache) { 143 if (head_to_cache) { 144 int ret = add_to_page_cache_lr 144 int ret = add_to_page_cache_lru(head_to_cache, cache_mapping, 145 145 read_start >> PAGE_SHIFT, 146 146 GFP_NOIO); 147 147 148 if (!ret) { 148 if (!ret) { 149 SetPageUptodate(head_t 149 SetPageUptodate(head_to_cache); 150 unlock_page(head_to_ca 150 unlock_page(head_to_cache); 151 } 151 } 152 152 153 } 153 } 154 154 155 if (tail_to_cache) { 155 if (tail_to_cache) { 156 int ret = add_to_page_cache_lr 156 int ret = add_to_page_cache_lru(tail_to_cache, cache_mapping, 157 157 (read_end >> PAGE_SHIFT) - 1, 158 158 GFP_NOIO); 159 159 160 if (!ret) { 160 if (!ret) { 161 SetPageUptodate(tail_t 161 SetPageUptodate(tail_to_cache); 162 unlock_page(tail_to_ca 162 unlock_page(tail_to_cache); 163 } 163 } 164 } 164 } 165 165 166 return 0; 166 return 0; 167 } 167 } 168 168 169 static struct page *squashfs_get_cache_page(st 169 static struct page *squashfs_get_cache_page(struct address_space *mapping, 170 pg 170 pgoff_t index) 171 { 171 { 172 struct page *page; 172 struct page *page; 173 173 174 if (!mapping) 174 if (!mapping) 175 return NULL; 175 return NULL; 176 176 177 page = find_get_page(mapping, index); 177 page = find_get_page(mapping, index); 178 if (!page) 178 if (!page) 179 return NULL; 179 return NULL; 180 180 181 if (!PageUptodate(page)) { 181 if (!PageUptodate(page)) { 182 put_page(page); 182 put_page(page); 183 return NULL; 183 return NULL; 184 } 184 } 185 185 186 return page; 186 return page; 187 } 187 } 188 188 189 static int squashfs_bio_read(struct super_bloc 189 static int squashfs_bio_read(struct super_block *sb, u64 index, int length, 190 struct bio **biop 190 struct bio **biop, int *block_offset) 191 { 191 { 192 struct squashfs_sb_info *msblk = sb->s 192 struct squashfs_sb_info *msblk = sb->s_fs_info; 193 struct address_space *cache_mapping = 193 struct address_space *cache_mapping = msblk->cache_mapping; 194 const u64 read_start = round_down(inde 194 const u64 read_start = round_down(index, msblk->devblksize); 195 const sector_t block = read_start >> m 195 const sector_t block = read_start >> msblk->devblksize_log2; 196 const u64 read_end = round_up(index + 196 const u64 read_end = round_up(index + length, msblk->devblksize); 197 const sector_t block_end = read_end >> 197 const sector_t block_end = read_end >> msblk->devblksize_log2; 198 int offset = read_start - round_down(i 198 int offset = read_start - round_down(index, PAGE_SIZE); 199 int total_len = (block_end - block) << 199 int total_len = (block_end - block) << msblk->devblksize_log2; 200 const int page_count = DIV_ROUND_UP(to 200 const int page_count = DIV_ROUND_UP(total_len + offset, PAGE_SIZE); 201 int error, i; 201 int error, i; 202 struct bio *bio; 202 struct bio *bio; 203 203 204 bio = bio_kmalloc(page_count, GFP_NOIO 204 bio = bio_kmalloc(page_count, GFP_NOIO); 205 if (!bio) 205 if (!bio) 206 return -ENOMEM; 206 return -ENOMEM; 207 bio_init(bio, sb->s_bdev, bio->bi_inli 207 bio_init(bio, sb->s_bdev, bio->bi_inline_vecs, page_count, REQ_OP_READ); 208 bio->bi_iter.bi_sector = block * (msbl 208 bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT); 209 209 210 for (i = 0; i < page_count; ++i) { 210 for (i = 0; i < page_count; ++i) { 211 unsigned int len = 211 unsigned int len = 212 min_t(unsigned int, PA 212 min_t(unsigned int, PAGE_SIZE - offset, total_len); 213 pgoff_t index = (read_start >> 213 pgoff_t index = (read_start >> PAGE_SHIFT) + i; 214 struct page *page; 214 struct page *page; 215 215 216 page = squashfs_get_cache_page 216 page = squashfs_get_cache_page(cache_mapping, index); 217 if (!page) 217 if (!page) 218 page = alloc_page(GFP_ 218 page = alloc_page(GFP_NOIO); 219 219 220 if (!page) { 220 if (!page) { 221 error = -ENOMEM; 221 error = -ENOMEM; 222 goto out_free_bio; 222 goto out_free_bio; 223 } 223 } 224 224 225 /* 225 /* 226 * Use the __ version to avoid 226 * Use the __ version to avoid merging since we need each page 227 * to be separate when we chec 227 * to be separate when we check for and avoid cached pages. 228 */ 228 */ 229 __bio_add_page(bio, page, len, 229 __bio_add_page(bio, page, len, offset); 230 offset = 0; 230 offset = 0; 231 total_len -= len; 231 total_len -= len; 232 } 232 } 233 233 234 if (cache_mapping) 234 if (cache_mapping) 235 error = squashfs_bio_read_cach 235 error = squashfs_bio_read_cached(bio, cache_mapping, index, 236 236 length, read_start, read_end, 237 237 page_count); 238 else 238 else 239 error = submit_bio_wait(bio); 239 error = submit_bio_wait(bio); 240 if (error) 240 if (error) 241 goto out_free_bio; 241 goto out_free_bio; 242 242 243 *biop = bio; 243 *biop = bio; 244 *block_offset = index & ((1 << msblk-> 244 *block_offset = index & ((1 << msblk->devblksize_log2) - 1); 245 return 0; 245 return 0; 246 246 247 out_free_bio: 247 out_free_bio: 248 bio_free_pages(bio); 248 bio_free_pages(bio); 249 bio_uninit(bio); 249 bio_uninit(bio); 250 kfree(bio); 250 kfree(bio); 251 return error; 251 return error; 252 } 252 } 253 253 254 /* 254 /* 255 * Read and decompress a metadata block or dat 255 * Read and decompress a metadata block or datablock. Length is non-zero 256 * if a datablock is being read (the size is s 256 * if a datablock is being read (the size is stored elsewhere in the 257 * filesystem), otherwise the length is obtain 257 * filesystem), otherwise the length is obtained from the first two bytes of 258 * the metadata block. A bit in the length fi 258 * the metadata block. A bit in the length field indicates if the block 259 * is stored uncompressed in the filesystem (u 259 * is stored uncompressed in the filesystem (usually because compression 260 * generated a larger block - this does occasi 260 * generated a larger block - this does occasionally happen with compression 261 * algorithms). 261 * algorithms). 262 */ 262 */ 263 int squashfs_read_data(struct super_block *sb, 263 int squashfs_read_data(struct super_block *sb, u64 index, int length, 264 u64 *next_index, struct 264 u64 *next_index, struct squashfs_page_actor *output) 265 { 265 { 266 struct squashfs_sb_info *msblk = sb->s 266 struct squashfs_sb_info *msblk = sb->s_fs_info; 267 struct bio *bio = NULL; 267 struct bio *bio = NULL; 268 int compressed; 268 int compressed; 269 int res; 269 int res; 270 int offset; 270 int offset; 271 271 272 if (length) { 272 if (length) { 273 /* 273 /* 274 * Datablock. 274 * Datablock. 275 */ 275 */ 276 compressed = SQUASHFS_COMPRESS 276 compressed = SQUASHFS_COMPRESSED_BLOCK(length); 277 length = SQUASHFS_COMPRESSED_S 277 length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length); 278 TRACE("Block @ 0x%llx, %scompr 278 TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n", 279 index, compressed ? "" 279 index, compressed ? "" : "un", length, output->length); 280 } else { 280 } else { 281 /* 281 /* 282 * Metadata block. 282 * Metadata block. 283 */ 283 */ 284 const u8 *data; 284 const u8 *data; 285 struct bvec_iter_all iter_all 285 struct bvec_iter_all iter_all = {}; 286 struct bio_vec *bvec = bvec_in 286 struct bio_vec *bvec = bvec_init_iter_all(&iter_all); 287 287 288 if (index + 2 > msblk->bytes_u 288 if (index + 2 > msblk->bytes_used) { 289 res = -EIO; 289 res = -EIO; 290 goto out; 290 goto out; 291 } 291 } 292 res = squashfs_bio_read(sb, in 292 res = squashfs_bio_read(sb, index, 2, &bio, &offset); 293 if (res) 293 if (res) 294 goto out; 294 goto out; 295 295 296 if (WARN_ON_ONCE(!bio_next_seg 296 if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) { 297 res = -EIO; 297 res = -EIO; 298 goto out_free_bio; 298 goto out_free_bio; 299 } 299 } 300 /* Extract the length of the m 300 /* Extract the length of the metadata block */ 301 data = bvec_virt(bvec); 301 data = bvec_virt(bvec); 302 length = data[offset]; 302 length = data[offset]; 303 if (offset < bvec->bv_len - 1) 303 if (offset < bvec->bv_len - 1) { 304 length |= data[offset 304 length |= data[offset + 1] << 8; 305 } else { 305 } else { 306 if (WARN_ON_ONCE(!bio_ 306 if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) { 307 res = -EIO; 307 res = -EIO; 308 goto out_free_ 308 goto out_free_bio; 309 } 309 } 310 data = bvec_virt(bvec) 310 data = bvec_virt(bvec); 311 length |= data[0] << 8 311 length |= data[0] << 8; 312 } 312 } 313 bio_free_pages(bio); 313 bio_free_pages(bio); 314 bio_uninit(bio); 314 bio_uninit(bio); 315 kfree(bio); 315 kfree(bio); 316 316 317 compressed = SQUASHFS_COMPRESS 317 compressed = SQUASHFS_COMPRESSED(length); 318 length = SQUASHFS_COMPRESSED_S 318 length = SQUASHFS_COMPRESSED_SIZE(length); 319 index += 2; 319 index += 2; 320 320 321 TRACE("Block @ 0x%llx, %scompr 321 TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2, 322 compressed ? "" : "un", 322 compressed ? "" : "un", length); 323 } 323 } 324 if (length <= 0 || length > output->le 324 if (length <= 0 || length > output->length || 325 (index + length) > msb 325 (index + length) > msblk->bytes_used) { 326 res = -EIO; 326 res = -EIO; 327 goto out; 327 goto out; 328 } 328 } 329 329 330 if (next_index) 330 if (next_index) 331 *next_index = index + length; 331 *next_index = index + length; 332 332 333 res = squashfs_bio_read(sb, index, len 333 res = squashfs_bio_read(sb, index, length, &bio, &offset); 334 if (res) 334 if (res) 335 goto out; 335 goto out; 336 336 337 if (compressed) { 337 if (compressed) { 338 if (!msblk->stream) { 338 if (!msblk->stream) { 339 res = -EIO; 339 res = -EIO; 340 goto out_free_bio; 340 goto out_free_bio; 341 } 341 } 342 res = msblk->thread_ops->decom 342 res = msblk->thread_ops->decompress(msblk, bio, offset, length, output); 343 } else { 343 } else { 344 res = copy_bio_to_actor(bio, o 344 res = copy_bio_to_actor(bio, output, offset, length); 345 } 345 } 346 346 347 out_free_bio: 347 out_free_bio: 348 bio_free_pages(bio); 348 bio_free_pages(bio); 349 bio_uninit(bio); 349 bio_uninit(bio); 350 kfree(bio); 350 kfree(bio); 351 out: 351 out: 352 if (res < 0) { 352 if (res < 0) { 353 ERROR("Failed to read block 0x 353 ERROR("Failed to read block 0x%llx: %d\n", index, res); 354 if (msblk->panic_on_errors) 354 if (msblk->panic_on_errors) 355 panic("squashfs read f 355 panic("squashfs read failed"); 356 } 356 } 357 357 358 return res; 358 return res; 359 } 359 } 360 360
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.