~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/erofs/zmap.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Copyright (C) 2018-2019 HUAWEI, Inc.
  4  *             https://www.huawei.com/
  5  */
  6 #include "internal.h"
  7 #include <asm/unaligned.h>
  8 #include <trace/events/erofs.h>
  9 
 10 struct z_erofs_maprecorder {
 11         struct inode *inode;
 12         struct erofs_map_blocks *map;
 13         void *kaddr;
 14 
 15         unsigned long lcn;
 16         /* compression extent information gathered */
 17         u8  type, headtype;
 18         u16 clusterofs;
 19         u16 delta[2];
 20         erofs_blk_t pblk, compressedblks;
 21         erofs_off_t nextpackoff;
 22         bool partialref;
 23 };
 24 
 25 static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
 26                                       unsigned long lcn)
 27 {
 28         struct inode *const inode = m->inode;
 29         struct erofs_inode *const vi = EROFS_I(inode);
 30         const erofs_off_t pos = Z_EROFS_FULL_INDEX_ALIGN(erofs_iloc(inode) +
 31                         vi->inode_isize + vi->xattr_isize) +
 32                         lcn * sizeof(struct z_erofs_lcluster_index);
 33         struct z_erofs_lcluster_index *di;
 34         unsigned int advise;
 35 
 36         m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
 37                                       pos, EROFS_KMAP);
 38         if (IS_ERR(m->kaddr))
 39                 return PTR_ERR(m->kaddr);
 40 
 41         m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
 42         m->lcn = lcn;
 43         di = m->kaddr;
 44 
 45         advise = le16_to_cpu(di->di_advise);
 46         m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK;
 47         if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
 48                 m->clusterofs = 1 << vi->z_logical_clusterbits;
 49                 m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
 50                 if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
 51                         if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
 52                                         Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
 53                                 DBG_BUGON(1);
 54                                 return -EFSCORRUPTED;
 55                         }
 56                         m->compressedblks = m->delta[0] &
 57                                 ~Z_EROFS_LI_D0_CBLKCNT;
 58                         m->delta[0] = 1;
 59                 }
 60                 m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
 61         } else {
 62                 m->partialref = !!(advise & Z_EROFS_LI_PARTIAL_REF);
 63                 m->clusterofs = le16_to_cpu(di->di_clusterofs);
 64                 if (m->clusterofs >= 1 << vi->z_logical_clusterbits) {
 65                         DBG_BUGON(1);
 66                         return -EFSCORRUPTED;
 67                 }
 68                 m->pblk = le32_to_cpu(di->di_u.blkaddr);
 69         }
 70         return 0;
 71 }
 72 
 73 static unsigned int decode_compactedbits(unsigned int lobits,
 74                                          u8 *in, unsigned int pos, u8 *type)
 75 {
 76         const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
 77         const unsigned int lo = v & ((1 << lobits) - 1);
 78 
 79         *type = (v >> lobits) & 3;
 80         return lo;
 81 }
 82 
 83 static int get_compacted_la_distance(unsigned int lobits,
 84                                      unsigned int encodebits,
 85                                      unsigned int vcnt, u8 *in, int i)
 86 {
 87         unsigned int lo, d1 = 0;
 88         u8 type;
 89 
 90         DBG_BUGON(i >= vcnt);
 91 
 92         do {
 93                 lo = decode_compactedbits(lobits, in, encodebits * i, &type);
 94 
 95                 if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
 96                         return d1;
 97                 ++d1;
 98         } while (++i < vcnt);
 99 
100         /* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
101         if (!(lo & Z_EROFS_LI_D0_CBLKCNT))
102                 d1 += lo - 1;
103         return d1;
104 }
105 
106 static int unpack_compacted_index(struct z_erofs_maprecorder *m,
107                                   unsigned int amortizedshift,
108                                   erofs_off_t pos, bool lookahead)
109 {
110         struct erofs_inode *const vi = EROFS_I(m->inode);
111         const unsigned int lclusterbits = vi->z_logical_clusterbits;
112         unsigned int vcnt, lo, lobits, encodebits, nblk, bytes;
113         int i;
114         u8 *in, type;
115         bool big_pcluster;
116 
117         if (1 << amortizedshift == 4 && lclusterbits <= 14)
118                 vcnt = 2;
119         else if (1 << amortizedshift == 2 && lclusterbits <= 12)
120                 vcnt = 16;
121         else
122                 return -EOPNOTSUPP;
123 
124         /* it doesn't equal to round_up(..) */
125         m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
126                          (vcnt << amortizedshift);
127         big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
128         lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
129         encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
130         bytes = pos & ((vcnt << amortizedshift) - 1);
131 
132         in = m->kaddr - bytes;
133 
134         i = bytes >> amortizedshift;
135 
136         lo = decode_compactedbits(lobits, in, encodebits * i, &type);
137         m->type = type;
138         if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
139                 m->clusterofs = 1 << lclusterbits;
140 
141                 /* figure out lookahead_distance: delta[1] if needed */
142                 if (lookahead)
143                         m->delta[1] = get_compacted_la_distance(lobits,
144                                                 encodebits, vcnt, in, i);
145                 if (lo & Z_EROFS_LI_D0_CBLKCNT) {
146                         if (!big_pcluster) {
147                                 DBG_BUGON(1);
148                                 return -EFSCORRUPTED;
149                         }
150                         m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT;
151                         m->delta[0] = 1;
152                         return 0;
153                 } else if (i + 1 != (int)vcnt) {
154                         m->delta[0] = lo;
155                         return 0;
156                 }
157                 /*
158                  * since the last lcluster in the pack is special,
159                  * of which lo saves delta[1] rather than delta[0].
160                  * Hence, get delta[0] by the previous lcluster indirectly.
161                  */
162                 lo = decode_compactedbits(lobits, in,
163                                           encodebits * (i - 1), &type);
164                 if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
165                         lo = 0;
166                 else if (lo & Z_EROFS_LI_D0_CBLKCNT)
167                         lo = 1;
168                 m->delta[0] = lo + 1;
169                 return 0;
170         }
171         m->clusterofs = lo;
172         m->delta[0] = 0;
173         /* figout out blkaddr (pblk) for HEAD lclusters */
174         if (!big_pcluster) {
175                 nblk = 1;
176                 while (i > 0) {
177                         --i;
178                         lo = decode_compactedbits(lobits, in,
179                                                   encodebits * i, &type);
180                         if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
181                                 i -= lo;
182 
183                         if (i >= 0)
184                                 ++nblk;
185                 }
186         } else {
187                 nblk = 0;
188                 while (i > 0) {
189                         --i;
190                         lo = decode_compactedbits(lobits, in,
191                                                   encodebits * i, &type);
192                         if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
193                                 if (lo & Z_EROFS_LI_D0_CBLKCNT) {
194                                         --i;
195                                         nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT;
196                                         continue;
197                                 }
198                                 /* bigpcluster shouldn't have plain d0 == 1 */
199                                 if (lo <= 1) {
200                                         DBG_BUGON(1);
201                                         return -EFSCORRUPTED;
202                                 }
203                                 i -= lo - 2;
204                                 continue;
205                         }
206                         ++nblk;
207                 }
208         }
209         in += (vcnt << amortizedshift) - sizeof(__le32);
210         m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
211         return 0;
212 }
213 
214 static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
215                                          unsigned long lcn, bool lookahead)
216 {
217         struct inode *const inode = m->inode;
218         struct erofs_inode *const vi = EROFS_I(inode);
219         const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
220                 ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
221         unsigned int totalidx = erofs_iblks(inode);
222         unsigned int compacted_4b_initial, compacted_2b;
223         unsigned int amortizedshift;
224         erofs_off_t pos;
225 
226         if (lcn >= totalidx)
227                 return -EINVAL;
228 
229         m->lcn = lcn;
230         /* used to align to 32-byte (compacted_2b) alignment */
231         compacted_4b_initial = (32 - ebase % 32) / 4;
232         if (compacted_4b_initial == 32 / 4)
233                 compacted_4b_initial = 0;
234 
235         if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
236             compacted_4b_initial < totalidx)
237                 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
238         else
239                 compacted_2b = 0;
240 
241         pos = ebase;
242         if (lcn < compacted_4b_initial) {
243                 amortizedshift = 2;
244                 goto out;
245         }
246         pos += compacted_4b_initial * 4;
247         lcn -= compacted_4b_initial;
248 
249         if (lcn < compacted_2b) {
250                 amortizedshift = 1;
251                 goto out;
252         }
253         pos += compacted_2b * 2;
254         lcn -= compacted_2b;
255         amortizedshift = 2;
256 out:
257         pos += lcn * (1 << amortizedshift);
258         m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
259                                       pos, EROFS_KMAP);
260         if (IS_ERR(m->kaddr))
261                 return PTR_ERR(m->kaddr);
262         return unpack_compacted_index(m, amortizedshift, pos, lookahead);
263 }
264 
265 static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
266                                            unsigned int lcn, bool lookahead)
267 {
268         switch (EROFS_I(m->inode)->datalayout) {
269         case EROFS_INODE_COMPRESSED_FULL:
270                 return z_erofs_load_full_lcluster(m, lcn);
271         case EROFS_INODE_COMPRESSED_COMPACT:
272                 return z_erofs_load_compact_lcluster(m, lcn, lookahead);
273         default:
274                 return -EINVAL;
275         }
276 }
277 
278 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
279                                    unsigned int lookback_distance)
280 {
281         struct super_block *sb = m->inode->i_sb;
282         struct erofs_inode *const vi = EROFS_I(m->inode);
283         const unsigned int lclusterbits = vi->z_logical_clusterbits;
284 
285         while (m->lcn >= lookback_distance) {
286                 unsigned long lcn = m->lcn - lookback_distance;
287                 int err;
288 
289                 err = z_erofs_load_lcluster_from_disk(m, lcn, false);
290                 if (err)
291                         return err;
292 
293                 switch (m->type) {
294                 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
295                         lookback_distance = m->delta[0];
296                         if (!lookback_distance)
297                                 goto err_bogus;
298                         continue;
299                 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
300                 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
301                 case Z_EROFS_LCLUSTER_TYPE_HEAD2:
302                         m->headtype = m->type;
303                         m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
304                         return 0;
305                 default:
306                         erofs_err(sb, "unknown type %u @ lcn %lu of nid %llu",
307                                   m->type, lcn, vi->nid);
308                         DBG_BUGON(1);
309                         return -EOPNOTSUPP;
310                 }
311         }
312 err_bogus:
313         erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu",
314                   lookback_distance, m->lcn, vi->nid);
315         DBG_BUGON(1);
316         return -EFSCORRUPTED;
317 }
318 
319 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
320                                             unsigned int initial_lcn)
321 {
322         struct super_block *sb = m->inode->i_sb;
323         struct erofs_inode *const vi = EROFS_I(m->inode);
324         struct erofs_map_blocks *const map = m->map;
325         const unsigned int lclusterbits = vi->z_logical_clusterbits;
326         unsigned long lcn;
327         int err;
328 
329         DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN &&
330                   m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1 &&
331                   m->type != Z_EROFS_LCLUSTER_TYPE_HEAD2);
332         DBG_BUGON(m->type != m->headtype);
333 
334         if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
335             ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1) &&
336              !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) ||
337             ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) &&
338              !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) {
339                 map->m_plen = 1ULL << lclusterbits;
340                 return 0;
341         }
342         lcn = m->lcn + 1;
343         if (m->compressedblks)
344                 goto out;
345 
346         err = z_erofs_load_lcluster_from_disk(m, lcn, false);
347         if (err)
348                 return err;
349 
350         /*
351          * If the 1st NONHEAD lcluster has already been handled initially w/o
352          * valid compressedblks, which means at least it mustn't be CBLKCNT, or
353          * an internal implemenatation error is detected.
354          *
355          * The following code can also handle it properly anyway, but let's
356          * BUG_ON in the debugging mode only for developers to notice that.
357          */
358         DBG_BUGON(lcn == initial_lcn &&
359                   m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
360 
361         switch (m->type) {
362         case Z_EROFS_LCLUSTER_TYPE_PLAIN:
363         case Z_EROFS_LCLUSTER_TYPE_HEAD1:
364         case Z_EROFS_LCLUSTER_TYPE_HEAD2:
365                 /*
366                  * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
367                  * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
368                  */
369                 m->compressedblks = 1 << (lclusterbits - sb->s_blocksize_bits);
370                 break;
371         case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
372                 if (m->delta[0] != 1)
373                         goto err_bonus_cblkcnt;
374                 if (m->compressedblks)
375                         break;
376                 fallthrough;
377         default:
378                 erofs_err(sb, "cannot found CBLKCNT @ lcn %lu of nid %llu", lcn,
379                           vi->nid);
380                 DBG_BUGON(1);
381                 return -EFSCORRUPTED;
382         }
383 out:
384         map->m_plen = erofs_pos(sb, m->compressedblks);
385         return 0;
386 err_bonus_cblkcnt:
387         erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
388         DBG_BUGON(1);
389         return -EFSCORRUPTED;
390 }
391 
392 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
393 {
394         struct inode *inode = m->inode;
395         struct erofs_inode *vi = EROFS_I(inode);
396         struct erofs_map_blocks *map = m->map;
397         unsigned int lclusterbits = vi->z_logical_clusterbits;
398         u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
399         int err;
400 
401         do {
402                 /* handle the last EOF pcluster (no next HEAD lcluster) */
403                 if ((lcn << lclusterbits) >= inode->i_size) {
404                         map->m_llen = inode->i_size - map->m_la;
405                         return 0;
406                 }
407 
408                 err = z_erofs_load_lcluster_from_disk(m, lcn, true);
409                 if (err)
410                         return err;
411 
412                 if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
413                         DBG_BUGON(!m->delta[1] &&
414                                   m->clusterofs != 1 << lclusterbits);
415                 } else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
416                            m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1 ||
417                            m->type == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
418                         /* go on until the next HEAD lcluster */
419                         if (lcn != headlcn)
420                                 break;
421                         m->delta[1] = 1;
422                 } else {
423                         erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu",
424                                   m->type, lcn, vi->nid);
425                         DBG_BUGON(1);
426                         return -EOPNOTSUPP;
427                 }
428                 lcn += m->delta[1];
429         } while (m->delta[1]);
430 
431         map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
432         return 0;
433 }
434 
435 static int z_erofs_do_map_blocks(struct inode *inode,
436                                  struct erofs_map_blocks *map, int flags)
437 {
438         struct erofs_inode *const vi = EROFS_I(inode);
439         bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
440         bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
441         struct z_erofs_maprecorder m = {
442                 .inode = inode,
443                 .map = map,
444         };
445         int err = 0;
446         unsigned int lclusterbits, endoff, afmt;
447         unsigned long initial_lcn;
448         unsigned long long ofs, end;
449 
450         lclusterbits = vi->z_logical_clusterbits;
451         ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la;
452         initial_lcn = ofs >> lclusterbits;
453         endoff = ofs & ((1 << lclusterbits) - 1);
454 
455         err = z_erofs_load_lcluster_from_disk(&m, initial_lcn, false);
456         if (err)
457                 goto unmap_out;
458 
459         if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
460                 vi->z_idataoff = m.nextpackoff;
461 
462         map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
463         end = (m.lcn + 1ULL) << lclusterbits;
464 
465         switch (m.type) {
466         case Z_EROFS_LCLUSTER_TYPE_PLAIN:
467         case Z_EROFS_LCLUSTER_TYPE_HEAD1:
468         case Z_EROFS_LCLUSTER_TYPE_HEAD2:
469                 if (endoff >= m.clusterofs) {
470                         m.headtype = m.type;
471                         map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
472                         /*
473                          * For ztailpacking files, in order to inline data more
474                          * effectively, special EOF lclusters are now supported
475                          * which can have three parts at most.
476                          */
477                         if (ztailpacking && end > inode->i_size)
478                                 end = inode->i_size;
479                         break;
480                 }
481                 /* m.lcn should be >= 1 if endoff < m.clusterofs */
482                 if (!m.lcn) {
483                         erofs_err(inode->i_sb,
484                                   "invalid logical cluster 0 at nid %llu",
485                                   vi->nid);
486                         err = -EFSCORRUPTED;
487                         goto unmap_out;
488                 }
489                 end = (m.lcn << lclusterbits) | m.clusterofs;
490                 map->m_flags |= EROFS_MAP_FULL_MAPPED;
491                 m.delta[0] = 1;
492                 fallthrough;
493         case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
494                 /* get the corresponding first chunk */
495                 err = z_erofs_extent_lookback(&m, m.delta[0]);
496                 if (err)
497                         goto unmap_out;
498                 break;
499         default:
500                 erofs_err(inode->i_sb,
501                           "unknown type %u @ offset %llu of nid %llu",
502                           m.type, ofs, vi->nid);
503                 err = -EOPNOTSUPP;
504                 goto unmap_out;
505         }
506         if (m.partialref)
507                 map->m_flags |= EROFS_MAP_PARTIAL_REF;
508         map->m_llen = end - map->m_la;
509 
510         if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
511                 vi->z_tailextent_headlcn = m.lcn;
512                 /* for non-compact indexes, fragmentoff is 64 bits */
513                 if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
514                         vi->z_fragmentoff |= (u64)m.pblk << 32;
515         }
516         if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
517                 map->m_flags |= EROFS_MAP_META;
518                 map->m_pa = vi->z_idataoff;
519                 map->m_plen = vi->z_idata_size;
520         } else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
521                 map->m_flags |= EROFS_MAP_FRAGMENT;
522         } else {
523                 map->m_pa = erofs_pos(inode->i_sb, m.pblk);
524                 err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
525                 if (err)
526                         goto unmap_out;
527         }
528 
529         if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) {
530                 if (map->m_llen > map->m_plen) {
531                         DBG_BUGON(1);
532                         err = -EFSCORRUPTED;
533                         goto unmap_out;
534                 }
535                 afmt = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER ?
536                         Z_EROFS_COMPRESSION_INTERLACED :
537                         Z_EROFS_COMPRESSION_SHIFTED;
538         } else {
539                 afmt = m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2 ?
540                         vi->z_algorithmtype[1] : vi->z_algorithmtype[0];
541                 if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) {
542                         erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
543                                   afmt, vi->nid);
544                         err = -EFSCORRUPTED;
545                         goto unmap_out;
546                 }
547         }
548         map->m_algorithmformat = afmt;
549 
550         if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
551             ((flags & EROFS_GET_BLOCKS_READMORE) &&
552              (map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA ||
553               map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE ||
554               map->m_algorithmformat == Z_EROFS_COMPRESSION_ZSTD) &&
555               map->m_llen >= i_blocksize(inode))) {
556                 err = z_erofs_get_extent_decompressedlen(&m);
557                 if (!err)
558                         map->m_flags |= EROFS_MAP_FULL_MAPPED;
559         }
560 
561 unmap_out:
562         erofs_unmap_metabuf(&m.map->buf);
563         return err;
564 }
565 
566 static int z_erofs_fill_inode_lazy(struct inode *inode)
567 {
568         struct erofs_inode *const vi = EROFS_I(inode);
569         struct super_block *const sb = inode->i_sb;
570         int err, headnr;
571         erofs_off_t pos;
572         struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
573         struct z_erofs_map_header *h;
574 
575         if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
576                 /*
577                  * paired with smp_mb() at the end of the function to ensure
578                  * fields will only be observed after the bit is set.
579                  */
580                 smp_mb();
581                 return 0;
582         }
583 
584         if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
585                 return -ERESTARTSYS;
586 
587         err = 0;
588         if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
589                 goto out_unlock;
590 
591         pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
592         h = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP);
593         if (IS_ERR(h)) {
594                 err = PTR_ERR(h);
595                 goto out_unlock;
596         }
597 
598         /*
599          * if the highest bit of the 8-byte map header is set, the whole file
600          * is stored in the packed inode. The rest bits keeps z_fragmentoff.
601          */
602         if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
603                 vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
604                 vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
605                 vi->z_tailextent_headlcn = 0;
606                 goto done;
607         }
608         vi->z_advise = le16_to_cpu(h->h_advise);
609         vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
610         vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
611 
612         headnr = 0;
613         if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX ||
614             vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) {
615                 erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
616                           headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
617                 err = -EOPNOTSUPP;
618                 goto out_put_metabuf;
619         }
620 
621         vi->z_logical_clusterbits = sb->s_blocksize_bits + (h->h_clusterbits & 7);
622         if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) &&
623             vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
624                             Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
625                 erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
626                           vi->nid);
627                 err = -EFSCORRUPTED;
628                 goto out_put_metabuf;
629         }
630         if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
631             !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
632             !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
633                 erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
634                           vi->nid);
635                 err = -EFSCORRUPTED;
636                 goto out_put_metabuf;
637         }
638 
639         if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
640                 struct erofs_map_blocks map = {
641                         .buf = __EROFS_BUF_INITIALIZER
642                 };
643 
644                 vi->z_idata_size = le16_to_cpu(h->h_idata_size);
645                 err = z_erofs_do_map_blocks(inode, &map,
646                                             EROFS_GET_BLOCKS_FINDTAIL);
647                 erofs_put_metabuf(&map.buf);
648 
649                 if (!map.m_plen ||
650                     erofs_blkoff(sb, map.m_pa) + map.m_plen > sb->s_blocksize) {
651                         erofs_err(sb, "invalid tail-packing pclustersize %llu",
652                                   map.m_plen);
653                         err = -EFSCORRUPTED;
654                 }
655                 if (err < 0)
656                         goto out_put_metabuf;
657         }
658 
659         if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
660             !(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
661                 struct erofs_map_blocks map = {
662                         .buf = __EROFS_BUF_INITIALIZER
663                 };
664 
665                 vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff);
666                 err = z_erofs_do_map_blocks(inode, &map,
667                                             EROFS_GET_BLOCKS_FINDTAIL);
668                 erofs_put_metabuf(&map.buf);
669                 if (err < 0)
670                         goto out_put_metabuf;
671         }
672 done:
673         /* paired with smp_mb() at the beginning of the function */
674         smp_mb();
675         set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
676 out_put_metabuf:
677         erofs_put_metabuf(&buf);
678 out_unlock:
679         clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
680         return err;
681 }
682 
683 int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
684                             int flags)
685 {
686         struct erofs_inode *const vi = EROFS_I(inode);
687         int err = 0;
688 
689         trace_erofs_map_blocks_enter(inode, map, flags);
690 
691         /* when trying to read beyond EOF, leave it unmapped */
692         if (map->m_la >= inode->i_size) {
693                 map->m_llen = map->m_la + 1 - inode->i_size;
694                 map->m_la = inode->i_size;
695                 map->m_flags = 0;
696                 goto out;
697         }
698 
699         err = z_erofs_fill_inode_lazy(inode);
700         if (err)
701                 goto out;
702 
703         if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
704             !vi->z_tailextent_headlcn) {
705                 map->m_la = 0;
706                 map->m_llen = inode->i_size;
707                 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_FULL_MAPPED |
708                                 EROFS_MAP_FRAGMENT;
709                 goto out;
710         }
711 
712         err = z_erofs_do_map_blocks(inode, map, flags);
713 out:
714         if (err)
715                 map->m_llen = 0;
716         trace_erofs_map_blocks_exit(inode, map, flags, err);
717         return err;
718 }
719 
720 static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
721                                 loff_t length, unsigned int flags,
722                                 struct iomap *iomap, struct iomap *srcmap)
723 {
724         int ret;
725         struct erofs_map_blocks map = { .m_la = offset };
726 
727         ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP);
728         erofs_put_metabuf(&map.buf);
729         if (ret < 0)
730                 return ret;
731 
732         iomap->bdev = inode->i_sb->s_bdev;
733         iomap->offset = map.m_la;
734         iomap->length = map.m_llen;
735         if (map.m_flags & EROFS_MAP_MAPPED) {
736                 iomap->type = IOMAP_MAPPED;
737                 iomap->addr = map.m_flags & EROFS_MAP_FRAGMENT ?
738                               IOMAP_NULL_ADDR : map.m_pa;
739         } else {
740                 iomap->type = IOMAP_HOLE;
741                 iomap->addr = IOMAP_NULL_ADDR;
742                 /*
743                  * No strict rule on how to describe extents for post EOF, yet
744                  * we need to do like below. Otherwise, iomap itself will get
745                  * into an endless loop on post EOF.
746                  *
747                  * Calculate the effective offset by subtracting extent start
748                  * (map.m_la) from the requested offset, and add it to length.
749                  * (NB: offset >= map.m_la always)
750                  */
751                 if (iomap->offset >= inode->i_size)
752                         iomap->length = length + offset - map.m_la;
753         }
754         iomap->flags = 0;
755         return 0;
756 }
757 
758 const struct iomap_ops z_erofs_iomap_report_ops = {
759         .iomap_begin = z_erofs_iomap_begin_report,
760 };
761 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php