1 // SPDX-License-Identifier: GPL-2.0-only 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 4 * https://www.huawei.com/ 5 * Copyright (C) 2021, Alibaba Cloud 5 * Copyright (C) 2021, Alibaba Cloud 6 */ 6 */ 7 #include "internal.h" 7 #include "internal.h" 8 #include <linux/sched/mm.h> !! 8 #include <linux/prefetch.h> >> 9 #include <linux/dax.h> 9 #include <trace/events/erofs.h> 10 #include <trace/events/erofs.h> 10 11 11 void erofs_unmap_metabuf(struct erofs_buf *buf !! 12 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr) 12 { 13 { 13 if (buf->kmap_type == EROFS_KMAP) !! 14 struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping; 14 kunmap_local(buf->base); !! 15 struct page *page; 15 buf->base = NULL; << 16 buf->kmap_type = EROFS_NO_KMAP; << 17 } << 18 << 19 void erofs_put_metabuf(struct erofs_buf *buf) << 20 { << 21 if (!buf->page) << 22 return; << 23 erofs_unmap_metabuf(buf); << 24 folio_put(page_folio(buf->page)); << 25 buf->page = NULL; << 26 } << 27 << 28 void *erofs_bread(struct erofs_buf *buf, erofs << 29 enum erofs_kmap_type type) << 30 { << 31 pgoff_t index = offset >> PAGE_SHIFT; << 32 struct folio *folio = NULL; << 33 << 34 if (buf->page) { << 35 folio = page_folio(buf->page); << 36 if (folio_file_page(folio, ind << 37 erofs_unmap_metabuf(bu << 38 } << 39 if (!folio || !folio_contains(folio, i << 40 erofs_put_metabuf(buf); << 41 folio = read_mapping_folio(buf << 42 if (IS_ERR(folio)) << 43 return folio; << 44 } << 45 buf->page = folio_file_page(folio, ind << 46 << 47 if (buf->kmap_type == EROFS_NO_KMAP) { << 48 if (type == EROFS_KMAP) << 49 buf->base = kmap_local << 50 buf->kmap_type = type; << 51 } else if (buf->kmap_type != type) { << 52 DBG_BUGON(1); << 53 return ERR_PTR(-EFAULT); << 54 } << 55 if (type == EROFS_NO_KMAP) << 56 return NULL; << 57 return buf->base + (offset & ~PAGE_MAS << 58 } << 59 << 60 void erofs_init_metabuf(struct erofs_buf *buf, << 61 { << 62 struct erofs_sb_info *sbi = EROFS_SB(s << 63 << 64 if (erofs_is_fileio_mode(sbi)) << 65 buf->mapping = file_inode(sbi- << 66 else if (erofs_is_fscache_mode(sb)) << 67 buf->mapping = sbi->s_fscache- << 68 else << 69 buf->mapping = sb->s_bdev->bd_ << 70 } << 71 16 72 void *erofs_read_metabuf(struct erofs_buf *buf !! 17 page = read_cache_page_gfp(mapping, blkaddr, 73 erofs_off_t offset, e !! 18 mapping_gfp_constraint(mapping, ~__GFP_FS)); 74 { !! 19 /* should already be PageUptodate */ 75 erofs_init_metabuf(buf, sb); !! 20 if (!IS_ERR(page)) 76 return erofs_bread(buf, offset, type); !! 21 lock_page(page); >> 22 return page; 77 } 23 } 78 24 79 static int erofs_map_blocks_flatmode(struct in 25 static int erofs_map_blocks_flatmode(struct inode *inode, 80 struct er !! 26 struct erofs_map_blocks *map, >> 27 int flags) 81 { 28 { >> 29 int err = 0; >> 30 erofs_blk_t nblocks, lastblk; >> 31 u64 offset = map->m_la; 82 struct erofs_inode *vi = EROFS_I(inode 32 struct erofs_inode *vi = EROFS_I(inode); 83 struct super_block *sb = inode->i_sb; << 84 bool tailendpacking = (vi->datalayout 33 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE); 85 erofs_blk_t lastblk = erofs_iblks(inod << 86 34 87 map->m_flags = EROFS_MAP_MAPPED; !! 35 trace_erofs_map_blocks_flatmode_enter(inode, map, flags); 88 if (map->m_la < erofs_pos(sb, lastblk) !! 36 89 map->m_pa = erofs_pos(sb, vi-> !! 37 nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 90 map->m_plen = erofs_pos(sb, la !! 38 lastblk = nblocks - tailendpacking; 91 } else { !! 39 92 DBG_BUGON(!tailendpacking); !! 40 /* there is no hole in flatmode */ 93 map->m_pa = erofs_iloc(inode) !! 41 map->m_flags = EROFS_MAP_MAPPED; 94 vi->xattr_isize + erof !! 42 95 map->m_plen = inode->i_size - !! 43 if (offset < blknr_to_addr(lastblk)) { 96 !! 44 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la; 97 /* inline data should be locat !! 45 map->m_plen = blknr_to_addr(lastblk) - offset; 98 if (erofs_blkoff(sb, map->m_pa !! 46 } else if (tailendpacking) { 99 erofs_err(sb, "inline !! 47 /* 2 - inode inline B: inode, [xattrs], inline last blk... */ >> 48 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); >> 49 >> 50 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize + >> 51 vi->xattr_isize + erofs_blkoff(map->m_la); >> 52 map->m_plen = inode->i_size - offset; >> 53 >> 54 /* inline data should be located in one meta block */ >> 55 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) { >> 56 erofs_err(inode->i_sb, >> 57 "inline data cross block boundary @ nid %llu", >> 58 vi->nid); 100 DBG_BUGON(1); 59 DBG_BUGON(1); 101 return -EFSCORRUPTED; !! 60 err = -EFSCORRUPTED; >> 61 goto err_out; 102 } 62 } >> 63 103 map->m_flags |= EROFS_MAP_META 64 map->m_flags |= EROFS_MAP_META; >> 65 } else { >> 66 erofs_err(inode->i_sb, >> 67 "internal error @ nid: %llu (size %llu), m_la 0x%llx", >> 68 vi->nid, inode->i_size, map->m_la); >> 69 DBG_BUGON(1); >> 70 err = -EIO; >> 71 goto err_out; 104 } 72 } 105 return 0; !! 73 >> 74 map->m_llen = map->m_plen; >> 75 err_out: >> 76 trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0); >> 77 return err; 106 } 78 } 107 79 108 int erofs_map_blocks(struct inode *inode, stru !! 80 static int erofs_map_blocks(struct inode *inode, >> 81 struct erofs_map_blocks *map, int flags) 109 { 82 { 110 struct super_block *sb = inode->i_sb; 83 struct super_block *sb = inode->i_sb; 111 struct erofs_inode *vi = EROFS_I(inode 84 struct erofs_inode *vi = EROFS_I(inode); 112 struct erofs_inode_chunk_index *idx; 85 struct erofs_inode_chunk_index *idx; 113 struct erofs_buf buf = __EROFS_BUF_INI !! 86 struct page *page; 114 u64 chunknr; 87 u64 chunknr; 115 unsigned int unit; 88 unsigned int unit; 116 erofs_off_t pos; 89 erofs_off_t pos; 117 void *kaddr; << 118 int err = 0; 90 int err = 0; 119 91 120 trace_erofs_map_blocks_enter(inode, ma << 121 map->m_deviceid = 0; 92 map->m_deviceid = 0; 122 if (map->m_la >= inode->i_size) { 93 if (map->m_la >= inode->i_size) { 123 /* leave out-of-bound access u 94 /* leave out-of-bound access unmapped */ 124 map->m_flags = 0; 95 map->m_flags = 0; 125 map->m_plen = map->m_llen; !! 96 map->m_plen = 0; 126 goto out; 97 goto out; 127 } 98 } 128 99 129 if (vi->datalayout != EROFS_INODE_CHUN !! 100 if (vi->datalayout != EROFS_INODE_CHUNK_BASED) 130 err = erofs_map_blocks_flatmod !! 101 return erofs_map_blocks_flatmode(inode, map, flags); 131 goto out; << 132 } << 133 102 134 if (vi->chunkformat & EROFS_CHUNK_FORM 103 if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) 135 unit = sizeof(*idx); 104 unit = sizeof(*idx); /* chunk index */ 136 else 105 else 137 unit = EROFS_BLOCK_MAP_ENTRY_S 106 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */ 138 107 139 chunknr = map->m_la >> vi->chunkbits; 108 chunknr = map->m_la >> vi->chunkbits; 140 pos = ALIGN(erofs_iloc(inode) + vi->in !! 109 pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + 141 vi->xattr_isize, unit) + u 110 vi->xattr_isize, unit) + unit * chunknr; 142 111 143 kaddr = erofs_read_metabuf(&buf, sb, p !! 112 page = erofs_get_meta_page(inode->i_sb, erofs_blknr(pos)); 144 if (IS_ERR(kaddr)) { !! 113 if (IS_ERR(page)) 145 err = PTR_ERR(kaddr); !! 114 return PTR_ERR(page); 146 goto out; !! 115 147 } << 148 map->m_la = chunknr << vi->chunkbits; 116 map->m_la = chunknr << vi->chunkbits; 149 map->m_plen = min_t(erofs_off_t, 1UL < 117 map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits, 150 round_up(inode->i_size !! 118 roundup(inode->i_size - map->m_la, EROFS_BLKSIZ)); 151 119 152 /* handle block map */ 120 /* handle block map */ 153 if (!(vi->chunkformat & EROFS_CHUNK_FO 121 if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) { 154 __le32 *blkaddr = kaddr; !! 122 __le32 *blkaddr = page_address(page) + erofs_blkoff(pos); 155 123 156 if (le32_to_cpu(*blkaddr) == E 124 if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) { 157 map->m_flags = 0; 125 map->m_flags = 0; 158 } else { 126 } else { 159 map->m_pa = erofs_pos( !! 127 map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr)); 160 map->m_flags = EROFS_M 128 map->m_flags = EROFS_MAP_MAPPED; 161 } 129 } 162 goto out_unlock; 130 goto out_unlock; 163 } 131 } 164 /* parse chunk indexes */ 132 /* parse chunk indexes */ 165 idx = kaddr; !! 133 idx = page_address(page) + erofs_blkoff(pos); 166 switch (le32_to_cpu(idx->blkaddr)) { 134 switch (le32_to_cpu(idx->blkaddr)) { 167 case EROFS_NULL_ADDR: 135 case EROFS_NULL_ADDR: 168 map->m_flags = 0; 136 map->m_flags = 0; 169 break; 137 break; 170 default: 138 default: 171 map->m_deviceid = le16_to_cpu( 139 map->m_deviceid = le16_to_cpu(idx->device_id) & 172 EROFS_SB(sb)->device_i 140 EROFS_SB(sb)->device_id_mask; 173 map->m_pa = erofs_pos(sb, le32 !! 141 map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr)); 174 map->m_flags = EROFS_MAP_MAPPE 142 map->m_flags = EROFS_MAP_MAPPED; 175 break; 143 break; 176 } 144 } 177 out_unlock: 145 out_unlock: 178 erofs_put_metabuf(&buf); !! 146 unlock_page(page); >> 147 put_page(page); 179 out: 148 out: 180 if (!err) !! 149 map->m_llen = map->m_plen; 181 map->m_llen = map->m_plen; << 182 trace_erofs_map_blocks_exit(inode, map << 183 return err; 150 return err; 184 } 151 } 185 152 186 static void erofs_fill_from_devinfo(struct ero << 187 struct ero << 188 { << 189 map->m_bdev = NULL; << 190 map->m_fp = NULL; << 191 if (dif->file) { << 192 if (S_ISBLK(file_inode(dif->fi << 193 map->m_bdev = file_bde << 194 else << 195 map->m_fp = dif->file; << 196 } << 197 map->m_daxdev = dif->dax_dev; << 198 map->m_dax_part_off = dif->dax_part_of << 199 map->m_fscache = dif->fscache; << 200 } << 201 << 202 int erofs_map_dev(struct super_block *sb, stru 153 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) 203 { 154 { 204 struct erofs_dev_context *devs = EROFS 155 struct erofs_dev_context *devs = EROFS_SB(sb)->devs; 205 struct erofs_device_info *dif; 156 struct erofs_device_info *dif; 206 erofs_off_t startoff, length; << 207 int id; 157 int id; 208 158 >> 159 /* primary device by default */ 209 map->m_bdev = sb->s_bdev; 160 map->m_bdev = sb->s_bdev; 210 map->m_daxdev = EROFS_SB(sb)->dax_dev; 161 map->m_daxdev = EROFS_SB(sb)->dax_dev; 211 map->m_dax_part_off = EROFS_SB(sb)->da << 212 map->m_fscache = EROFS_SB(sb)->s_fscac << 213 map->m_fp = EROFS_SB(sb)->fdev; << 214 162 215 if (map->m_deviceid) { 163 if (map->m_deviceid) { 216 down_read(&devs->rwsem); 164 down_read(&devs->rwsem); 217 dif = idr_find(&devs->tree, ma 165 dif = idr_find(&devs->tree, map->m_deviceid - 1); 218 if (!dif) { 166 if (!dif) { 219 up_read(&devs->rwsem); 167 up_read(&devs->rwsem); 220 return -ENODEV; 168 return -ENODEV; 221 } 169 } 222 if (devs->flatdev) { !! 170 map->m_bdev = dif->bdev; 223 map->m_pa += erofs_pos !! 171 map->m_daxdev = dif->dax_dev; 224 up_read(&devs->rwsem); << 225 return 0; << 226 } << 227 erofs_fill_from_devinfo(map, d << 228 up_read(&devs->rwsem); 172 up_read(&devs->rwsem); 229 } else if (devs->extra_devices && !dev !! 173 } else if (devs->extra_devices) { 230 down_read(&devs->rwsem); 174 down_read(&devs->rwsem); 231 idr_for_each_entry(&devs->tree 175 idr_for_each_entry(&devs->tree, dif, id) { >> 176 erofs_off_t startoff, length; >> 177 232 if (!dif->mapped_blkad 178 if (!dif->mapped_blkaddr) 233 continue; 179 continue; >> 180 startoff = blknr_to_addr(dif->mapped_blkaddr); >> 181 length = blknr_to_addr(dif->blocks); 234 182 235 startoff = erofs_pos(s << 236 length = erofs_pos(sb, << 237 if (map->m_pa >= start 183 if (map->m_pa >= startoff && 238 map->m_pa < starto 184 map->m_pa < startoff + length) { 239 map->m_pa -= s 185 map->m_pa -= startoff; 240 erofs_fill_fro !! 186 map->m_bdev = dif->bdev; >> 187 map->m_daxdev = dif->dax_dev; 241 break; 188 break; 242 } 189 } 243 } 190 } 244 up_read(&devs->rwsem); 191 up_read(&devs->rwsem); 245 } 192 } 246 return 0; 193 return 0; 247 } 194 } 248 195 249 /* << 250 * bit 30: I/O error occurred on this folio << 251 * bit 0 - 29: remaining parts to complete thi << 252 */ << 253 #define EROFS_ONLINEFOLIO_EIO << 254 << 255 void erofs_onlinefolio_init(struct folio *foli << 256 { << 257 union { << 258 atomic_t o; << 259 void *v; << 260 } u = { .o = ATOMIC_INIT(1) }; << 261 << 262 folio->private = u.v; /* valid only << 263 } << 264 << 265 void erofs_onlinefolio_split(struct folio *fol << 266 { << 267 atomic_inc((atomic_t *)&folio->private << 268 } << 269 << 270 void erofs_onlinefolio_end(struct folio *folio << 271 { << 272 int orig, v; << 273 << 274 do { << 275 orig = atomic_read((atomic_t * << 276 v = (orig - 1) | (err ? EROFS_ << 277 } while (atomic_cmpxchg((atomic_t *)&f << 278 << 279 if (v & ~EROFS_ONLINEFOLIO_EIO) << 280 return; << 281 folio->private = 0; << 282 folio_end_read(folio, !(v & EROFS_ONLI << 283 } << 284 << 285 static int erofs_iomap_begin(struct inode *ino 196 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, 286 unsigned int flags, struct iom 197 unsigned int flags, struct iomap *iomap, struct iomap *srcmap) 287 { 198 { 288 int ret; 199 int ret; 289 struct super_block *sb = inode->i_sb; << 290 struct erofs_map_blocks map; 200 struct erofs_map_blocks map; 291 struct erofs_map_dev mdev; 201 struct erofs_map_dev mdev; 292 202 293 map.m_la = offset; 203 map.m_la = offset; 294 map.m_llen = length; 204 map.m_llen = length; 295 205 296 ret = erofs_map_blocks(inode, &map); !! 206 ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); 297 if (ret < 0) 207 if (ret < 0) 298 return ret; 208 return ret; 299 209 300 mdev = (struct erofs_map_dev) { 210 mdev = (struct erofs_map_dev) { 301 .m_deviceid = map.m_deviceid, 211 .m_deviceid = map.m_deviceid, 302 .m_pa = map.m_pa, 212 .m_pa = map.m_pa, 303 }; 213 }; 304 ret = erofs_map_dev(sb, &mdev); !! 214 ret = erofs_map_dev(inode->i_sb, &mdev); 305 if (ret) 215 if (ret) 306 return ret; 216 return ret; 307 217 >> 218 iomap->bdev = mdev.m_bdev; >> 219 iomap->dax_dev = mdev.m_daxdev; 308 iomap->offset = map.m_la; 220 iomap->offset = map.m_la; 309 if (flags & IOMAP_DAX) << 310 iomap->dax_dev = mdev.m_daxdev << 311 else << 312 iomap->bdev = mdev.m_bdev; << 313 iomap->length = map.m_llen; 221 iomap->length = map.m_llen; 314 iomap->flags = 0; 222 iomap->flags = 0; 315 iomap->private = NULL; 223 iomap->private = NULL; 316 224 317 if (!(map.m_flags & EROFS_MAP_MAPPED)) 225 if (!(map.m_flags & EROFS_MAP_MAPPED)) { 318 iomap->type = IOMAP_HOLE; 226 iomap->type = IOMAP_HOLE; 319 iomap->addr = IOMAP_NULL_ADDR; 227 iomap->addr = IOMAP_NULL_ADDR; 320 if (!iomap->length) 228 if (!iomap->length) 321 iomap->length = length 229 iomap->length = length; 322 return 0; 230 return 0; 323 } 231 } 324 232 325 if (map.m_flags & EROFS_MAP_META) { 233 if (map.m_flags & EROFS_MAP_META) { 326 void *ptr; !! 234 struct page *ipage; 327 struct erofs_buf buf = __EROFS << 328 235 329 iomap->type = IOMAP_INLINE; 236 iomap->type = IOMAP_INLINE; 330 ptr = erofs_read_metabuf(&buf, !! 237 ipage = erofs_get_meta_page(inode->i_sb, 331 if (IS_ERR(ptr)) !! 238 erofs_blknr(mdev.m_pa)); 332 return PTR_ERR(ptr); !! 239 if (IS_ERR(ipage)) 333 iomap->inline_data = ptr; !! 240 return PTR_ERR(ipage); 334 iomap->private = buf.base; !! 241 iomap->inline_data = page_address(ipage) + >> 242 erofs_blkoff(mdev.m_pa); >> 243 iomap->private = ipage; 335 } else { 244 } else { 336 iomap->type = IOMAP_MAPPED; 245 iomap->type = IOMAP_MAPPED; 337 iomap->addr = mdev.m_pa; 246 iomap->addr = mdev.m_pa; 338 if (flags & IOMAP_DAX) << 339 iomap->addr += mdev.m_ << 340 } 247 } 341 return 0; 248 return 0; 342 } 249 } 343 250 344 static int erofs_iomap_end(struct inode *inode 251 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length, 345 ssize_t written, unsigned int 252 ssize_t written, unsigned int flags, struct iomap *iomap) 346 { 253 { 347 void *ptr = iomap->private; !! 254 struct page *ipage = iomap->private; 348 << 349 if (ptr) { << 350 struct erofs_buf buf = { << 351 .page = kmap_to_page(p << 352 .base = ptr, << 353 .kmap_type = EROFS_KMA << 354 }; << 355 255 >> 256 if (ipage) { 356 DBG_BUGON(iomap->type != IOMAP 257 DBG_BUGON(iomap->type != IOMAP_INLINE); 357 erofs_put_metabuf(&buf); !! 258 unlock_page(ipage); >> 259 put_page(ipage); 358 } else { 260 } else { 359 DBG_BUGON(iomap->type == IOMAP 261 DBG_BUGON(iomap->type == IOMAP_INLINE); 360 } 262 } 361 return written; 263 return written; 362 } 264 } 363 265 364 static const struct iomap_ops erofs_iomap_ops 266 static const struct iomap_ops erofs_iomap_ops = { 365 .iomap_begin = erofs_iomap_begin, 267 .iomap_begin = erofs_iomap_begin, 366 .iomap_end = erofs_iomap_end, 268 .iomap_end = erofs_iomap_end, 367 }; 269 }; 368 270 369 int erofs_fiemap(struct inode *inode, struct f 271 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 370 u64 start, u64 len) 272 u64 start, u64 len) 371 { 273 { 372 if (erofs_inode_is_data_compressed(ERO 274 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) { 373 #ifdef CONFIG_EROFS_FS_ZIP 275 #ifdef CONFIG_EROFS_FS_ZIP 374 return iomap_fiemap(inode, fie 276 return iomap_fiemap(inode, fieinfo, start, len, 375 &z_erofs_i 277 &z_erofs_iomap_report_ops); 376 #else 278 #else 377 return -EOPNOTSUPP; 279 return -EOPNOTSUPP; 378 #endif 280 #endif 379 } 281 } 380 return iomap_fiemap(inode, fieinfo, st 282 return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops); 381 } 283 } 382 284 383 /* 285 /* 384 * since we dont have write or truncate flows, 286 * since we dont have write or truncate flows, so no inode 385 * locking needs to be held at the moment. 287 * locking needs to be held at the moment. 386 */ 288 */ 387 static int erofs_read_folio(struct file *file, !! 289 static int erofs_readpage(struct file *file, struct page *page) 388 { 290 { 389 return iomap_read_folio(folio, &erofs_ !! 291 return iomap_readpage(page, &erofs_iomap_ops); 390 } 292 } 391 293 392 static void erofs_readahead(struct readahead_c 294 static void erofs_readahead(struct readahead_control *rac) 393 { 295 { 394 return iomap_readahead(rac, &erofs_iom 296 return iomap_readahead(rac, &erofs_iomap_ops); 395 } 297 } 396 298 397 static sector_t erofs_bmap(struct address_spac 299 static sector_t erofs_bmap(struct address_space *mapping, sector_t block) 398 { 300 { 399 return iomap_bmap(mapping, block, &ero 301 return iomap_bmap(mapping, block, &erofs_iomap_ops); 400 } 302 } 401 303 402 static ssize_t erofs_file_read_iter(struct kio !! 304 static int erofs_prepare_dio(struct kiocb *iocb, struct iov_iter *to) 403 { 305 { 404 struct inode *inode = file_inode(iocb- 306 struct inode *inode = file_inode(iocb->ki_filp); >> 307 loff_t align = iocb->ki_pos | iov_iter_count(to) | >> 308 iov_iter_alignment(to); >> 309 struct block_device *bdev = inode->i_sb->s_bdev; >> 310 unsigned int blksize_mask; 405 311 >> 312 if (bdev) >> 313 blksize_mask = (1 << ilog2(bdev_logical_block_size(bdev))) - 1; >> 314 else >> 315 blksize_mask = (1 << inode->i_blkbits) - 1; >> 316 >> 317 if (align & blksize_mask) >> 318 return -EINVAL; >> 319 return 0; >> 320 } >> 321 >> 322 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) >> 323 { 406 /* no need taking (shared) inode lock 324 /* no need taking (shared) inode lock since it's a ro filesystem */ 407 if (!iov_iter_count(to)) 325 if (!iov_iter_count(to)) 408 return 0; 326 return 0; 409 327 410 #ifdef CONFIG_FS_DAX 328 #ifdef CONFIG_FS_DAX 411 if (IS_DAX(inode)) !! 329 if (IS_DAX(iocb->ki_filp->f_mapping->host)) 412 return dax_iomap_rw(iocb, to, 330 return dax_iomap_rw(iocb, to, &erofs_iomap_ops); 413 #endif 331 #endif 414 if (iocb->ki_flags & IOCB_DIRECT) { 332 if (iocb->ki_flags & IOCB_DIRECT) { 415 struct block_device *bdev = in !! 333 int err = erofs_prepare_dio(iocb, to); 416 unsigned int blksize_mask; << 417 << 418 if (bdev) << 419 blksize_mask = bdev_lo << 420 else << 421 blksize_mask = i_block << 422 << 423 if ((iocb->ki_pos | iov_iter_c << 424 iov_iter_alignment(to)) & << 425 return -EINVAL; << 426 334 427 return iomap_dio_rw(iocb, to, !! 335 if (!err) 428 NULL, 0, N !! 336 return iomap_dio_rw(iocb, to, &erofs_iomap_ops, >> 337 NULL, 0, 0); >> 338 if (err < 0) >> 339 return err; 429 } 340 } 430 return filemap_read(iocb, to, 0); 341 return filemap_read(iocb, to, 0); 431 } 342 } 432 343 433 /* for uncompressed (aligned) files and raw ac 344 /* for uncompressed (aligned) files and raw access for other files */ 434 const struct address_space_operations erofs_ao !! 345 const struct address_space_operations erofs_raw_access_aops = { 435 .read_folio = erofs_read_folio, !! 346 .readpage = erofs_readpage, 436 .readahead = erofs_readahead, 347 .readahead = erofs_readahead, 437 .bmap = erofs_bmap, 348 .bmap = erofs_bmap, 438 .direct_IO = noop_direct_IO, 349 .direct_IO = noop_direct_IO, 439 .release_folio = iomap_release_folio, << 440 .invalidate_folio = iomap_invalidate_f << 441 }; 350 }; 442 351 443 #ifdef CONFIG_FS_DAX 352 #ifdef CONFIG_FS_DAX 444 static vm_fault_t erofs_dax_huge_fault(struct 353 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf, 445 unsigned int order) !! 354 enum page_entry_size pe_size) 446 { 355 { 447 return dax_iomap_fault(vmf, order, NUL !! 356 return dax_iomap_fault(vmf, pe_size, NULL, NULL, &erofs_iomap_ops); 448 } 357 } 449 358 450 static vm_fault_t erofs_dax_fault(struct vm_fa 359 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf) 451 { 360 { 452 return erofs_dax_huge_fault(vmf, 0); !! 361 return erofs_dax_huge_fault(vmf, PE_SIZE_PTE); 453 } 362 } 454 363 455 static const struct vm_operations_struct erofs 364 static const struct vm_operations_struct erofs_dax_vm_ops = { 456 .fault = erofs_dax_fault, 365 .fault = erofs_dax_fault, 457 .huge_fault = erofs_dax_huge_fault 366 .huge_fault = erofs_dax_huge_fault, 458 }; 367 }; 459 368 460 static int erofs_file_mmap(struct file *file, 369 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma) 461 { 370 { 462 if (!IS_DAX(file_inode(file))) 371 if (!IS_DAX(file_inode(file))) 463 return generic_file_readonly_m 372 return generic_file_readonly_mmap(file, vma); 464 373 465 if ((vma->vm_flags & VM_SHARED) && (vm 374 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 466 return -EINVAL; 375 return -EINVAL; 467 376 468 vma->vm_ops = &erofs_dax_vm_ops; 377 vma->vm_ops = &erofs_dax_vm_ops; 469 vm_flags_set(vma, VM_HUGEPAGE); !! 378 vma->vm_flags |= VM_HUGEPAGE; 470 return 0; 379 return 0; 471 } 380 } 472 #else 381 #else 473 #define erofs_file_mmap generic_file_readonly_ 382 #define erofs_file_mmap generic_file_readonly_mmap 474 #endif 383 #endif 475 384 476 const struct file_operations erofs_file_fops = 385 const struct file_operations erofs_file_fops = { 477 .llseek = generic_file_llseek, 386 .llseek = generic_file_llseek, 478 .read_iter = erofs_file_read_iter 387 .read_iter = erofs_file_read_iter, 479 .mmap = erofs_file_mmap, 388 .mmap = erofs_file_mmap, 480 .get_unmapped_area = thp_get_unmapped_ !! 389 .splice_read = generic_file_splice_read, 481 .splice_read = filemap_splice_read, << 482 }; 390 }; 483 391
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.