~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/xfs/scrub/repair.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * Copyright (C) 2018-2023 Oracle.  All Rights Reserved.
  4  * Author: Darrick J. Wong <djwong@kernel.org>
  5  */
  6 #include "xfs.h"
  7 #include "xfs_fs.h"
  8 #include "xfs_shared.h"
  9 #include "xfs_format.h"
 10 #include "xfs_trans_resv.h"
 11 #include "xfs_mount.h"
 12 #include "xfs_btree.h"
 13 #include "xfs_log_format.h"
 14 #include "xfs_trans.h"
 15 #include "xfs_sb.h"
 16 #include "xfs_inode.h"
 17 #include "xfs_alloc.h"
 18 #include "xfs_alloc_btree.h"
 19 #include "xfs_ialloc.h"
 20 #include "xfs_ialloc_btree.h"
 21 #include "xfs_rmap.h"
 22 #include "xfs_rmap_btree.h"
 23 #include "xfs_refcount_btree.h"
 24 #include "xfs_extent_busy.h"
 25 #include "xfs_ag.h"
 26 #include "xfs_ag_resv.h"
 27 #include "xfs_quota.h"
 28 #include "xfs_qm.h"
 29 #include "xfs_defer.h"
 30 #include "xfs_errortag.h"
 31 #include "xfs_error.h"
 32 #include "xfs_reflink.h"
 33 #include "xfs_health.h"
 34 #include "xfs_buf_mem.h"
 35 #include "xfs_da_format.h"
 36 #include "xfs_da_btree.h"
 37 #include "xfs_attr.h"
 38 #include "xfs_dir2.h"
 39 #include "scrub/scrub.h"
 40 #include "scrub/common.h"
 41 #include "scrub/trace.h"
 42 #include "scrub/repair.h"
 43 #include "scrub/bitmap.h"
 44 #include "scrub/stats.h"
 45 #include "scrub/xfile.h"
 46 #include "scrub/attr_repair.h"
 47 
 48 /*
 49  * Attempt to repair some metadata, if the metadata is corrupt and userspace
 50  * told us to fix it.  This function returns -EAGAIN to mean "re-run scrub",
 51  * and will set *fixed to true if it thinks it repaired anything.
 52  */
 53 int
 54 xrep_attempt(
 55         struct xfs_scrub        *sc,
 56         struct xchk_stats_run   *run)
 57 {
 58         u64                     repair_start;
 59         int                     error = 0;
 60 
 61         trace_xrep_attempt(XFS_I(file_inode(sc->file)), sc->sm, error);
 62 
 63         xchk_ag_btcur_free(&sc->sa);
 64 
 65         /* Repair whatever's broken. */
 66         ASSERT(sc->ops->repair);
 67         run->repair_attempted = true;
 68         repair_start = xchk_stats_now();
 69         error = sc->ops->repair(sc);
 70         trace_xrep_done(XFS_I(file_inode(sc->file)), sc->sm, error);
 71         run->repair_ns += xchk_stats_elapsed_ns(repair_start);
 72         switch (error) {
 73         case 0:
 74                 /*
 75                  * Repair succeeded.  Commit the fixes and perform a second
 76                  * scrub so that we can tell userspace if we fixed the problem.
 77                  */
 78                 sc->sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT;
 79                 sc->flags |= XREP_ALREADY_FIXED;
 80                 run->repair_succeeded = true;
 81                 return -EAGAIN;
 82         case -ECHRNG:
 83                 sc->flags |= XCHK_NEED_DRAIN;
 84                 run->retries++;
 85                 return -EAGAIN;
 86         case -EDEADLOCK:
 87                 /* Tell the caller to try again having grabbed all the locks. */
 88                 if (!(sc->flags & XCHK_TRY_HARDER)) {
 89                         sc->flags |= XCHK_TRY_HARDER;
 90                         run->retries++;
 91                         return -EAGAIN;
 92                 }
 93                 /*
 94                  * We tried harder but still couldn't grab all the resources
 95                  * we needed to fix it.  The corruption has not been fixed,
 96                  * so exit to userspace with the scan's output flags unchanged.
 97                  */
 98                 return 0;
 99         default:
100                 /*
101                  * EAGAIN tells the caller to re-scrub, so we cannot return
102                  * that here.
103                  */
104                 ASSERT(error != -EAGAIN);
105                 return error;
106         }
107 }
108 
109 /*
110  * Complain about unfixable problems in the filesystem.  We don't log
111  * corruptions when IFLAG_REPAIR wasn't set on the assumption that the driver
112  * program is xfs_scrub, which will call back with IFLAG_REPAIR set if the
113  * administrator isn't running xfs_scrub in no-repairs mode.
114  *
115  * Use this helper function because _ratelimited silently declares a static
116  * structure to track rate limiting information.
117  */
118 void
119 xrep_failure(
120         struct xfs_mount        *mp)
121 {
122         xfs_alert_ratelimited(mp,
123 "Corruption not fixed during online repair.  Unmount and run xfs_repair.");
124 }
125 
126 /*
127  * Repair probe -- userspace uses this to probe if we're willing to repair a
128  * given mountpoint.
129  */
130 int
131 xrep_probe(
132         struct xfs_scrub        *sc)
133 {
134         int                     error = 0;
135 
136         if (xchk_should_terminate(sc, &error))
137                 return error;
138 
139         return 0;
140 }
141 
142 /*
143  * Roll a transaction, keeping the AG headers locked and reinitializing
144  * the btree cursors.
145  */
146 int
147 xrep_roll_ag_trans(
148         struct xfs_scrub        *sc)
149 {
150         int                     error;
151 
152         /*
153          * Keep the AG header buffers locked while we roll the transaction.
154          * Ensure that both AG buffers are dirty and held when we roll the
155          * transaction so that they move forward in the log without losing the
156          * bli (and hence the bli type) when the transaction commits.
157          *
158          * Normal code would never hold clean buffers across a roll, but repair
159          * needs both buffers to maintain a total lock on the AG.
160          */
161         if (sc->sa.agi_bp) {
162                 xfs_ialloc_log_agi(sc->tp, sc->sa.agi_bp, XFS_AGI_MAGICNUM);
163                 xfs_trans_bhold(sc->tp, sc->sa.agi_bp);
164         }
165 
166         if (sc->sa.agf_bp) {
167                 xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_MAGICNUM);
168                 xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
169         }
170 
171         /*
172          * Roll the transaction.  We still hold the AG header buffers locked
173          * regardless of whether or not that succeeds.  On failure, the buffers
174          * will be released during teardown on our way out of the kernel.  If
175          * successful, join the buffers to the new transaction and move on.
176          */
177         error = xfs_trans_roll(&sc->tp);
178         if (error)
179                 return error;
180 
181         /* Join the AG headers to the new transaction. */
182         if (sc->sa.agi_bp)
183                 xfs_trans_bjoin(sc->tp, sc->sa.agi_bp);
184         if (sc->sa.agf_bp)
185                 xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
186 
187         return 0;
188 }
189 
190 /* Roll the scrub transaction, holding the primary metadata locked. */
191 int
192 xrep_roll_trans(
193         struct xfs_scrub        *sc)
194 {
195         if (!sc->ip)
196                 return xrep_roll_ag_trans(sc);
197         return xfs_trans_roll_inode(&sc->tp, sc->ip);
198 }
199 
200 /* Finish all deferred work attached to the repair transaction. */
201 int
202 xrep_defer_finish(
203         struct xfs_scrub        *sc)
204 {
205         int                     error;
206 
207         /*
208          * Keep the AG header buffers locked while we complete deferred work
209          * items.  Ensure that both AG buffers are dirty and held when we roll
210          * the transaction so that they move forward in the log without losing
211          * the bli (and hence the bli type) when the transaction commits.
212          *
213          * Normal code would never hold clean buffers across a roll, but repair
214          * needs both buffers to maintain a total lock on the AG.
215          */
216         if (sc->sa.agi_bp) {
217                 xfs_ialloc_log_agi(sc->tp, sc->sa.agi_bp, XFS_AGI_MAGICNUM);
218                 xfs_trans_bhold(sc->tp, sc->sa.agi_bp);
219         }
220 
221         if (sc->sa.agf_bp) {
222                 xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_MAGICNUM);
223                 xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
224         }
225 
226         /*
227          * Finish all deferred work items.  We still hold the AG header buffers
228          * locked regardless of whether or not that succeeds.  On failure, the
229          * buffers will be released during teardown on our way out of the
230          * kernel.  If successful, join the buffers to the new transaction
231          * and move on.
232          */
233         error = xfs_defer_finish(&sc->tp);
234         if (error)
235                 return error;
236 
237         /*
238          * Release the hold that we set above because defer_finish won't do
239          * that for us.  The defer roll code redirties held buffers after each
240          * roll, so the AG header buffers should be ready for logging.
241          */
242         if (sc->sa.agi_bp)
243                 xfs_trans_bhold_release(sc->tp, sc->sa.agi_bp);
244         if (sc->sa.agf_bp)
245                 xfs_trans_bhold_release(sc->tp, sc->sa.agf_bp);
246 
247         return 0;
248 }
249 
250 /*
251  * Does the given AG have enough space to rebuild a btree?  Neither AG
252  * reservation can be critical, and we must have enough space (factoring
253  * in AG reservations) to construct a whole btree.
254  */
255 bool
256 xrep_ag_has_space(
257         struct xfs_perag        *pag,
258         xfs_extlen_t            nr_blocks,
259         enum xfs_ag_resv_type   type)
260 {
261         return  !xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) &&
262                 !xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA) &&
263                 pag->pagf_freeblks > xfs_ag_resv_needed(pag, type) + nr_blocks;
264 }
265 
266 /*
267  * Figure out how many blocks to reserve for an AG repair.  We calculate the
268  * worst case estimate for the number of blocks we'd need to rebuild one of
269  * any type of per-AG btree.
270  */
271 xfs_extlen_t
272 xrep_calc_ag_resblks(
273         struct xfs_scrub                *sc)
274 {
275         struct xfs_mount                *mp = sc->mp;
276         struct xfs_scrub_metadata       *sm = sc->sm;
277         struct xfs_perag                *pag;
278         struct xfs_buf                  *bp;
279         xfs_agino_t                     icount = NULLAGINO;
280         xfs_extlen_t                    aglen = NULLAGBLOCK;
281         xfs_extlen_t                    usedlen;
282         xfs_extlen_t                    freelen;
283         xfs_extlen_t                    bnobt_sz;
284         xfs_extlen_t                    inobt_sz;
285         xfs_extlen_t                    rmapbt_sz;
286         xfs_extlen_t                    refcbt_sz;
287         int                             error;
288 
289         if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
290                 return 0;
291 
292         pag = xfs_perag_get(mp, sm->sm_agno);
293         if (xfs_perag_initialised_agi(pag)) {
294                 /* Use in-core icount if possible. */
295                 icount = pag->pagi_count;
296         } else {
297                 /* Try to get the actual counters from disk. */
298                 error = xfs_ialloc_read_agi(pag, NULL, 0, &bp);
299                 if (!error) {
300                         icount = pag->pagi_count;
301                         xfs_buf_relse(bp);
302                 }
303         }
304 
305         /* Now grab the block counters from the AGF. */
306         error = xfs_alloc_read_agf(pag, NULL, 0, &bp);
307         if (error) {
308                 aglen = pag->block_count;
309                 freelen = aglen;
310                 usedlen = aglen;
311         } else {
312                 struct xfs_agf  *agf = bp->b_addr;
313 
314                 aglen = be32_to_cpu(agf->agf_length);
315                 freelen = be32_to_cpu(agf->agf_freeblks);
316                 usedlen = aglen - freelen;
317                 xfs_buf_relse(bp);
318         }
319 
320         /* If the icount is impossible, make some worst-case assumptions. */
321         if (icount == NULLAGINO ||
322             !xfs_verify_agino(pag, icount)) {
323                 icount = pag->agino_max - pag->agino_min + 1;
324         }
325 
326         /* If the block counts are impossible, make worst-case assumptions. */
327         if (aglen == NULLAGBLOCK ||
328             aglen != pag->block_count ||
329             freelen >= aglen) {
330                 aglen = pag->block_count;
331                 freelen = aglen;
332                 usedlen = aglen;
333         }
334         xfs_perag_put(pag);
335 
336         trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
337                         freelen, usedlen);
338 
339         /*
340          * Figure out how many blocks we'd need worst case to rebuild
341          * each type of btree.  Note that we can only rebuild the
342          * bnobt/cntbt or inobt/finobt as pairs.
343          */
344         bnobt_sz = 2 * xfs_allocbt_calc_size(mp, freelen);
345         if (xfs_has_sparseinodes(mp))
346                 inobt_sz = xfs_iallocbt_calc_size(mp, icount /
347                                 XFS_INODES_PER_HOLEMASK_BIT);
348         else
349                 inobt_sz = xfs_iallocbt_calc_size(mp, icount /
350                                 XFS_INODES_PER_CHUNK);
351         if (xfs_has_finobt(mp))
352                 inobt_sz *= 2;
353         if (xfs_has_reflink(mp))
354                 refcbt_sz = xfs_refcountbt_calc_size(mp, usedlen);
355         else
356                 refcbt_sz = 0;
357         if (xfs_has_rmapbt(mp)) {
358                 /*
359                  * Guess how many blocks we need to rebuild the rmapbt.
360                  * For non-reflink filesystems we can't have more records than
361                  * used blocks.  However, with reflink it's possible to have
362                  * more than one rmap record per AG block.  We don't know how
363                  * many rmaps there could be in the AG, so we start off with
364                  * what we hope is an generous over-estimation.
365                  */
366                 if (xfs_has_reflink(mp))
367                         rmapbt_sz = xfs_rmapbt_calc_size(mp,
368                                         (unsigned long long)aglen * 2);
369                 else
370                         rmapbt_sz = xfs_rmapbt_calc_size(mp, usedlen);
371         } else {
372                 rmapbt_sz = 0;
373         }
374 
375         trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
376                         inobt_sz, rmapbt_sz, refcbt_sz);
377 
378         return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz));
379 }
380 
381 /*
382  * Reconstructing per-AG Btrees
383  *
384  * When a space btree is corrupt, we don't bother trying to fix it.  Instead,
385  * we scan secondary space metadata to derive the records that should be in
386  * the damaged btree, initialize a fresh btree root, and insert the records.
387  * Note that for rebuilding the rmapbt we scan all the primary data to
388  * generate the new records.
389  *
390  * However, that leaves the matter of removing all the metadata describing the
391  * old broken structure.  For primary metadata we use the rmap data to collect
392  * every extent with a matching rmap owner (bitmap); we then iterate all other
393  * metadata structures with the same rmap owner to collect the extents that
394  * cannot be removed (sublist).  We then subtract sublist from bitmap to
395  * derive the blocks that were used by the old btree.  These blocks can be
396  * reaped.
397  *
398  * For rmapbt reconstructions we must use different tactics for extent
399  * collection.  First we iterate all primary metadata (this excludes the old
400  * rmapbt, obviously) to generate new rmap records.  The gaps in the rmap
401  * records are collected as bitmap.  The bnobt records are collected as
402  * sublist.  As with the other btrees we subtract sublist from bitmap, and the
403  * result (since the rmapbt lives in the free space) are the blocks from the
404  * old rmapbt.
405  */
406 
407 /* Ensure the freelist is the correct size. */
408 int
409 xrep_fix_freelist(
410         struct xfs_scrub        *sc,
411         int                     alloc_flags)
412 {
413         struct xfs_alloc_arg    args = {0};
414 
415         args.mp = sc->mp;
416         args.tp = sc->tp;
417         args.agno = sc->sa.pag->pag_agno;
418         args.alignment = 1;
419         args.pag = sc->sa.pag;
420 
421         return xfs_alloc_fix_freelist(&args, alloc_flags);
422 }
423 
424 /*
425  * Finding per-AG Btree Roots for AGF/AGI Reconstruction
426  *
427  * If the AGF or AGI become slightly corrupted, it may be necessary to rebuild
428  * the AG headers by using the rmap data to rummage through the AG looking for
429  * btree roots.  This is not guaranteed to work if the AG is heavily damaged
430  * or the rmap data are corrupt.
431  *
432  * Callers of xrep_find_ag_btree_roots must lock the AGF and AGFL
433  * buffers if the AGF is being rebuilt; or the AGF and AGI buffers if the
434  * AGI is being rebuilt.  It must maintain these locks until it's safe for
435  * other threads to change the btrees' shapes.  The caller provides
436  * information about the btrees to look for by passing in an array of
437  * xrep_find_ag_btree with the (rmap owner, buf_ops, magic) fields set.
438  * The (root, height) fields will be set on return if anything is found.  The
439  * last element of the array should have a NULL buf_ops to mark the end of the
440  * array.
441  *
442  * For every rmapbt record matching any of the rmap owners in btree_info,
443  * read each block referenced by the rmap record.  If the block is a btree
444  * block from this filesystem matching any of the magic numbers and has a
445  * level higher than what we've already seen, remember the block and the
446  * height of the tree required to have such a block.  When the call completes,
447  * we return the highest block we've found for each btree description; those
448  * should be the roots.
449  */
450 
451 struct xrep_findroot {
452         struct xfs_scrub                *sc;
453         struct xfs_buf                  *agfl_bp;
454         struct xfs_agf                  *agf;
455         struct xrep_find_ag_btree       *btree_info;
456 };
457 
458 /* See if our block is in the AGFL. */
459 STATIC int
460 xrep_findroot_agfl_walk(
461         struct xfs_mount        *mp,
462         xfs_agblock_t           bno,
463         void                    *priv)
464 {
465         xfs_agblock_t           *agbno = priv;
466 
467         return (*agbno == bno) ? -ECANCELED : 0;
468 }
469 
470 /* Does this block match the btree information passed in? */
471 STATIC int
472 xrep_findroot_block(
473         struct xrep_findroot            *ri,
474         struct xrep_find_ag_btree       *fab,
475         uint64_t                        owner,
476         xfs_agblock_t                   agbno,
477         bool                            *done_with_block)
478 {
479         struct xfs_mount                *mp = ri->sc->mp;
480         struct xfs_buf                  *bp;
481         struct xfs_btree_block          *btblock;
482         xfs_daddr_t                     daddr;
483         int                             block_level;
484         int                             error = 0;
485 
486         daddr = XFS_AGB_TO_DADDR(mp, ri->sc->sa.pag->pag_agno, agbno);
487 
488         /*
489          * Blocks in the AGFL have stale contents that might just happen to
490          * have a matching magic and uuid.  We don't want to pull these blocks
491          * in as part of a tree root, so we have to filter out the AGFL stuff
492          * here.  If the AGFL looks insane we'll just refuse to repair.
493          */
494         if (owner == XFS_RMAP_OWN_AG) {
495                 error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp,
496                                 xrep_findroot_agfl_walk, &agbno);
497                 if (error == -ECANCELED)
498                         return 0;
499                 if (error)
500                         return error;
501         }
502 
503         /*
504          * Read the buffer into memory so that we can see if it's a match for
505          * our btree type.  We have no clue if it is beforehand, and we want to
506          * avoid xfs_trans_read_buf's behavior of dumping the DONE state (which
507          * will cause needless disk reads in subsequent calls to this function)
508          * and logging metadata verifier failures.
509          *
510          * Therefore, pass in NULL buffer ops.  If the buffer was already in
511          * memory from some other caller it will already have b_ops assigned.
512          * If it was in memory from a previous unsuccessful findroot_block
513          * call, the buffer won't have b_ops but it should be clean and ready
514          * for us to try to verify if the read call succeeds.  The same applies
515          * if the buffer wasn't in memory at all.
516          *
517          * Note: If we never match a btree type with this buffer, it will be
518          * left in memory with NULL b_ops.  This shouldn't be a problem unless
519          * the buffer gets written.
520          */
521         error = xfs_trans_read_buf(mp, ri->sc->tp, mp->m_ddev_targp, daddr,
522                         mp->m_bsize, 0, &bp, NULL);
523         if (error)
524                 return error;
525 
526         /* Ensure the block magic matches the btree type we're looking for. */
527         btblock = XFS_BUF_TO_BLOCK(bp);
528         ASSERT(fab->buf_ops->magic[1] != 0);
529         if (btblock->bb_magic != fab->buf_ops->magic[1])
530                 goto out;
531 
532         /*
533          * If the buffer already has ops applied and they're not the ones for
534          * this btree type, we know this block doesn't match the btree and we
535          * can bail out.
536          *
537          * If the buffer ops match ours, someone else has already validated
538          * the block for us, so we can move on to checking if this is a root
539          * block candidate.
540          *
541          * If the buffer does not have ops, nobody has successfully validated
542          * the contents and the buffer cannot be dirty.  If the magic, uuid,
543          * and structure match this btree type then we'll move on to checking
544          * if it's a root block candidate.  If there is no match, bail out.
545          */
546         if (bp->b_ops) {
547                 if (bp->b_ops != fab->buf_ops)
548                         goto out;
549         } else {
550                 ASSERT(!xfs_trans_buf_is_dirty(bp));
551                 if (!uuid_equal(&btblock->bb_u.s.bb_uuid,
552                                 &mp->m_sb.sb_meta_uuid))
553                         goto out;
554                 /*
555                  * Read verifiers can reference b_ops, so we set the pointer
556                  * here.  If the verifier fails we'll reset the buffer state
557                  * to what it was before we touched the buffer.
558                  */
559                 bp->b_ops = fab->buf_ops;
560                 fab->buf_ops->verify_read(bp);
561                 if (bp->b_error) {
562                         bp->b_ops = NULL;
563                         bp->b_error = 0;
564                         goto out;
565                 }
566 
567                 /*
568                  * Some read verifiers will (re)set b_ops, so we must be
569                  * careful not to change b_ops after running the verifier.
570                  */
571         }
572 
573         /*
574          * This block passes the magic/uuid and verifier tests for this btree
575          * type.  We don't need the caller to try the other tree types.
576          */
577         *done_with_block = true;
578 
579         /*
580          * Compare this btree block's level to the height of the current
581          * candidate root block.
582          *
583          * If the level matches the root we found previously, throw away both
584          * blocks because there can't be two candidate roots.
585          *
586          * If level is lower in the tree than the root we found previously,
587          * ignore this block.
588          */
589         block_level = xfs_btree_get_level(btblock);
590         if (block_level + 1 == fab->height) {
591                 fab->root = NULLAGBLOCK;
592                 goto out;
593         } else if (block_level < fab->height) {
594                 goto out;
595         }
596 
597         /*
598          * This is the highest block in the tree that we've found so far.
599          * Update the btree height to reflect what we've learned from this
600          * block.
601          */
602         fab->height = block_level + 1;
603 
604         /*
605          * If this block doesn't have sibling pointers, then it's the new root
606          * block candidate.  Otherwise, the root will be found farther up the
607          * tree.
608          */
609         if (btblock->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) &&
610             btblock->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
611                 fab->root = agbno;
612         else
613                 fab->root = NULLAGBLOCK;
614 
615         trace_xrep_findroot_block(mp, ri->sc->sa.pag->pag_agno, agbno,
616                         be32_to_cpu(btblock->bb_magic), fab->height - 1);
617 out:
618         xfs_trans_brelse(ri->sc->tp, bp);
619         return error;
620 }
621 
622 /*
623  * Do any of the blocks in this rmap record match one of the btrees we're
624  * looking for?
625  */
626 STATIC int
627 xrep_findroot_rmap(
628         struct xfs_btree_cur            *cur,
629         const struct xfs_rmap_irec      *rec,
630         void                            *priv)
631 {
632         struct xrep_findroot            *ri = priv;
633         struct xrep_find_ag_btree       *fab;
634         xfs_agblock_t                   b;
635         bool                            done;
636         int                             error = 0;
637 
638         /* Ignore anything that isn't AG metadata. */
639         if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner))
640                 return 0;
641 
642         /* Otherwise scan each block + btree type. */
643         for (b = 0; b < rec->rm_blockcount; b++) {
644                 done = false;
645                 for (fab = ri->btree_info; fab->buf_ops; fab++) {
646                         if (rec->rm_owner != fab->rmap_owner)
647                                 continue;
648                         error = xrep_findroot_block(ri, fab,
649                                         rec->rm_owner, rec->rm_startblock + b,
650                                         &done);
651                         if (error)
652                                 return error;
653                         if (done)
654                                 break;
655                 }
656         }
657 
658         return 0;
659 }
660 
661 /* Find the roots of the per-AG btrees described in btree_info. */
662 int
663 xrep_find_ag_btree_roots(
664         struct xfs_scrub                *sc,
665         struct xfs_buf                  *agf_bp,
666         struct xrep_find_ag_btree       *btree_info,
667         struct xfs_buf                  *agfl_bp)
668 {
669         struct xfs_mount                *mp = sc->mp;
670         struct xrep_findroot            ri;
671         struct xrep_find_ag_btree       *fab;
672         struct xfs_btree_cur            *cur;
673         int                             error;
674 
675         ASSERT(xfs_buf_islocked(agf_bp));
676         ASSERT(agfl_bp == NULL || xfs_buf_islocked(agfl_bp));
677 
678         ri.sc = sc;
679         ri.btree_info = btree_info;
680         ri.agf = agf_bp->b_addr;
681         ri.agfl_bp = agfl_bp;
682         for (fab = btree_info; fab->buf_ops; fab++) {
683                 ASSERT(agfl_bp || fab->rmap_owner != XFS_RMAP_OWN_AG);
684                 ASSERT(XFS_RMAP_NON_INODE_OWNER(fab->rmap_owner));
685                 fab->root = NULLAGBLOCK;
686                 fab->height = 0;
687         }
688 
689         cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
690         error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri);
691         xfs_btree_del_cursor(cur, error);
692 
693         return error;
694 }
695 
696 #ifdef CONFIG_XFS_QUOTA
697 /* Update some quota flags in the superblock. */
698 void
699 xrep_update_qflags(
700         struct xfs_scrub        *sc,
701         unsigned int            clear_flags,
702         unsigned int            set_flags)
703 {
704         struct xfs_mount        *mp = sc->mp;
705         struct xfs_buf          *bp;
706 
707         mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
708         if ((mp->m_qflags & clear_flags) == 0 &&
709             (mp->m_qflags & set_flags) == set_flags)
710                 goto no_update;
711 
712         mp->m_qflags &= ~clear_flags;
713         mp->m_qflags |= set_flags;
714 
715         spin_lock(&mp->m_sb_lock);
716         mp->m_sb.sb_qflags &= ~clear_flags;
717         mp->m_sb.sb_qflags |= set_flags;
718         spin_unlock(&mp->m_sb_lock);
719 
720         /*
721          * Update the quota flags in the ondisk superblock without touching
722          * the summary counters.  We have not quiesced inode chunk allocation,
723          * so we cannot coordinate with updates to the icount and ifree percpu
724          * counters.
725          */
726         bp = xfs_trans_getsb(sc->tp);
727         xfs_sb_to_disk(bp->b_addr, &mp->m_sb);
728         xfs_trans_buf_set_type(sc->tp, bp, XFS_BLFT_SB_BUF);
729         xfs_trans_log_buf(sc->tp, bp, 0, sizeof(struct xfs_dsb) - 1);
730 
731 no_update:
732         mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
733 }
734 
735 /* Force a quotacheck the next time we mount. */
736 void
737 xrep_force_quotacheck(
738         struct xfs_scrub        *sc,
739         xfs_dqtype_t            type)
740 {
741         uint                    flag;
742 
743         flag = xfs_quota_chkd_flag(type);
744         if (!(flag & sc->mp->m_qflags))
745                 return;
746 
747         xrep_update_qflags(sc, flag, 0);
748 }
749 
750 /*
751  * Attach dquots to this inode, or schedule quotacheck to fix them.
752  *
753  * This function ensures that the appropriate dquots are attached to an inode.
754  * We cannot allow the dquot code to allocate an on-disk dquot block here
755  * because we're already in transaction context.  The on-disk dquot should
756  * already exist anyway.  If the quota code signals corruption or missing quota
757  * information, schedule quotacheck, which will repair corruptions in the quota
758  * metadata.
759  */
760 int
761 xrep_ino_dqattach(
762         struct xfs_scrub        *sc)
763 {
764         int                     error;
765 
766         ASSERT(sc->tp != NULL);
767         ASSERT(sc->ip != NULL);
768 
769         error = xfs_qm_dqattach(sc->ip);
770         switch (error) {
771         case -EFSBADCRC:
772         case -EFSCORRUPTED:
773         case -ENOENT:
774                 xfs_err_ratelimited(sc->mp,
775 "inode %llu repair encountered quota error %d, quotacheck forced.",
776                                 (unsigned long long)sc->ip->i_ino, error);
777                 if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot)
778                         xrep_force_quotacheck(sc, XFS_DQTYPE_USER);
779                 if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot)
780                         xrep_force_quotacheck(sc, XFS_DQTYPE_GROUP);
781                 if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot)
782                         xrep_force_quotacheck(sc, XFS_DQTYPE_PROJ);
783                 fallthrough;
784         case -ESRCH:
785                 error = 0;
786                 break;
787         default:
788                 break;
789         }
790 
791         return error;
792 }
793 #endif /* CONFIG_XFS_QUOTA */
794 
795 /*
796  * Ensure that the inode being repaired is ready to handle a certain number of
797  * extents, or return EFSCORRUPTED.  Caller must hold the ILOCK of the inode
798  * being repaired and have joined it to the scrub transaction.
799  */
800 int
801 xrep_ino_ensure_extent_count(
802         struct xfs_scrub        *sc,
803         int                     whichfork,
804         xfs_extnum_t            nextents)
805 {
806         xfs_extnum_t            max_extents;
807         bool                    inode_has_nrext64;
808 
809         inode_has_nrext64 = xfs_inode_has_large_extent_counts(sc->ip);
810         max_extents = xfs_iext_max_nextents(inode_has_nrext64, whichfork);
811         if (nextents <= max_extents)
812                 return 0;
813         if (inode_has_nrext64)
814                 return -EFSCORRUPTED;
815         if (!xfs_has_large_extent_counts(sc->mp))
816                 return -EFSCORRUPTED;
817 
818         max_extents = xfs_iext_max_nextents(true, whichfork);
819         if (nextents > max_extents)
820                 return -EFSCORRUPTED;
821 
822         sc->ip->i_diflags2 |= XFS_DIFLAG2_NREXT64;
823         xfs_trans_log_inode(sc->tp, sc->ip, XFS_ILOG_CORE);
824         return 0;
825 }
826 
827 /*
828  * Initialize all the btree cursors for an AG repair except for the btree that
829  * we're rebuilding.
830  */
831 void
832 xrep_ag_btcur_init(
833         struct xfs_scrub        *sc,
834         struct xchk_ag          *sa)
835 {
836         struct xfs_mount        *mp = sc->mp;
837 
838         /* Set up a bnobt cursor for cross-referencing. */
839         if (sc->sm->sm_type != XFS_SCRUB_TYPE_BNOBT &&
840             sc->sm->sm_type != XFS_SCRUB_TYPE_CNTBT) {
841                 sa->bno_cur = xfs_bnobt_init_cursor(mp, sc->tp, sa->agf_bp,
842                                 sc->sa.pag);
843                 sa->cnt_cur = xfs_cntbt_init_cursor(mp, sc->tp, sa->agf_bp,
844                                 sc->sa.pag);
845         }
846 
847         /* Set up a inobt cursor for cross-referencing. */
848         if (sc->sm->sm_type != XFS_SCRUB_TYPE_INOBT &&
849             sc->sm->sm_type != XFS_SCRUB_TYPE_FINOBT) {
850                 sa->ino_cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp,
851                                 sa->agi_bp);
852                 if (xfs_has_finobt(mp))
853                         sa->fino_cur = xfs_finobt_init_cursor(sc->sa.pag,
854                                         sc->tp, sa->agi_bp);
855         }
856 
857         /* Set up a rmapbt cursor for cross-referencing. */
858         if (sc->sm->sm_type != XFS_SCRUB_TYPE_RMAPBT &&
859             xfs_has_rmapbt(mp))
860                 sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp,
861                                 sc->sa.pag);
862 
863         /* Set up a refcountbt cursor for cross-referencing. */
864         if (sc->sm->sm_type != XFS_SCRUB_TYPE_REFCNTBT &&
865             xfs_has_reflink(mp))
866                 sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
867                                 sa->agf_bp, sc->sa.pag);
868 }
869 
870 /*
871  * Reinitialize the in-core AG state after a repair by rereading the AGF
872  * buffer.  We had better get the same AGF buffer as the one that's attached
873  * to the scrub context.
874  */
875 int
876 xrep_reinit_pagf(
877         struct xfs_scrub        *sc)
878 {
879         struct xfs_perag        *pag = sc->sa.pag;
880         struct xfs_buf          *bp;
881         int                     error;
882 
883         ASSERT(pag);
884         ASSERT(xfs_perag_initialised_agf(pag));
885 
886         clear_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate);
887         error = xfs_alloc_read_agf(pag, sc->tp, 0, &bp);
888         if (error)
889                 return error;
890 
891         if (bp != sc->sa.agf_bp) {
892                 ASSERT(bp == sc->sa.agf_bp);
893                 return -EFSCORRUPTED;
894         }
895 
896         return 0;
897 }
898 
899 /*
900  * Reinitialize the in-core AG state after a repair by rereading the AGI
901  * buffer.  We had better get the same AGI buffer as the one that's attached
902  * to the scrub context.
903  */
904 int
905 xrep_reinit_pagi(
906         struct xfs_scrub        *sc)
907 {
908         struct xfs_perag        *pag = sc->sa.pag;
909         struct xfs_buf          *bp;
910         int                     error;
911 
912         ASSERT(pag);
913         ASSERT(xfs_perag_initialised_agi(pag));
914 
915         clear_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate);
916         error = xfs_ialloc_read_agi(pag, sc->tp, 0, &bp);
917         if (error)
918                 return error;
919 
920         if (bp != sc->sa.agi_bp) {
921                 ASSERT(bp == sc->sa.agi_bp);
922                 return -EFSCORRUPTED;
923         }
924 
925         return 0;
926 }
927 
928 /*
929  * Given an active reference to a perag structure, load AG headers and cursors.
930  * This should only be called to scan an AG while repairing file-based metadata.
931  */
932 int
933 xrep_ag_init(
934         struct xfs_scrub        *sc,
935         struct xfs_perag        *pag,
936         struct xchk_ag          *sa)
937 {
938         int                     error;
939 
940         ASSERT(!sa->pag);
941 
942         error = xfs_ialloc_read_agi(pag, sc->tp, 0, &sa->agi_bp);
943         if (error)
944                 return error;
945 
946         error = xfs_alloc_read_agf(pag, sc->tp, 0, &sa->agf_bp);
947         if (error)
948                 return error;
949 
950         /* Grab our own passive reference from the caller's ref. */
951         sa->pag = xfs_perag_hold(pag);
952         xrep_ag_btcur_init(sc, sa);
953         return 0;
954 }
955 
956 /* Reinitialize the per-AG block reservation for the AG we just fixed. */
957 int
958 xrep_reset_perag_resv(
959         struct xfs_scrub        *sc)
960 {
961         int                     error;
962 
963         if (!(sc->flags & XREP_RESET_PERAG_RESV))
964                 return 0;
965 
966         ASSERT(sc->sa.pag != NULL);
967         ASSERT(sc->ops->type == ST_PERAG);
968         ASSERT(sc->tp);
969 
970         sc->flags &= ~XREP_RESET_PERAG_RESV;
971         xfs_ag_resv_free(sc->sa.pag);
972         error = xfs_ag_resv_init(sc->sa.pag, sc->tp);
973         if (error == -ENOSPC) {
974                 xfs_err(sc->mp,
975 "Insufficient free space to reset per-AG reservation for AG %u after repair.",
976                                 sc->sa.pag->pag_agno);
977                 error = 0;
978         }
979 
980         return error;
981 }
982 
983 /* Decide if we are going to call the repair function for a scrub type. */
984 bool
985 xrep_will_attempt(
986         struct xfs_scrub        *sc)
987 {
988         /* Userspace asked us to rebuild the structure regardless. */
989         if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_FORCE_REBUILD)
990                 return true;
991 
992         /* Let debug users force us into the repair routines. */
993         if (XFS_TEST_ERROR(false, sc->mp, XFS_ERRTAG_FORCE_SCRUB_REPAIR))
994                 return true;
995 
996         /* Metadata is corrupt or failed cross-referencing. */
997         if (xchk_needs_repair(sc->sm))
998                 return true;
999 
1000         return false;
1001 }
1002 
1003 /* Try to fix some part of a metadata inode by calling another scrubber. */
1004 STATIC int
1005 xrep_metadata_inode_subtype(
1006         struct xfs_scrub        *sc,
1007         unsigned int            scrub_type)
1008 {
1009         struct xfs_scrub_subord *sub;
1010         int                     error;
1011 
1012         /*
1013          * Let's see if the inode needs repair.  Use a subordinate scrub context
1014          * to call the scrub and repair functions so that we can hang on to the
1015          * resources that we already acquired instead of using the standard
1016          * setup/teardown routines.
1017          */
1018         sub = xchk_scrub_create_subord(sc, scrub_type);
1019         error = sub->sc.ops->scrub(&sub->sc);
1020         if (error)
1021                 goto out;
1022         if (!xrep_will_attempt(&sub->sc))
1023                 goto out;
1024 
1025         /*
1026          * Repair some part of the inode.  This will potentially join the inode
1027          * to the transaction.
1028          */
1029         error = sub->sc.ops->repair(&sub->sc);
1030         if (error)
1031                 goto out;
1032 
1033         /*
1034          * Finish all deferred intent items and then roll the transaction so
1035          * that the inode will not be joined to the transaction when we exit
1036          * the function.
1037          */
1038         error = xfs_defer_finish(&sub->sc.tp);
1039         if (error)
1040                 goto out;
1041         error = xfs_trans_roll(&sub->sc.tp);
1042         if (error)
1043                 goto out;
1044 
1045         /*
1046          * Clear the corruption flags and re-check the metadata that we just
1047          * repaired.
1048          */
1049         sub->sc.sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT;
1050         error = sub->sc.ops->scrub(&sub->sc);
1051         if (error)
1052                 goto out;
1053 
1054         /* If corruption persists, the repair has failed. */
1055         if (xchk_needs_repair(sub->sc.sm)) {
1056                 error = -EFSCORRUPTED;
1057                 goto out;
1058         }
1059 out:
1060         xchk_scrub_free_subord(sub);
1061         return error;
1062 }
1063 
1064 /*
1065  * Repair the ondisk forks of a metadata inode.  The caller must ensure that
1066  * sc->ip points to the metadata inode and the ILOCK is held on that inode.
1067  * The inode must not be joined to the transaction before the call, and will
1068  * not be afterwards.
1069  */
1070 int
1071 xrep_metadata_inode_forks(
1072         struct xfs_scrub        *sc)
1073 {
1074         bool                    dirty = false;
1075         int                     error;
1076 
1077         /* Repair the inode record and the data fork. */
1078         error = xrep_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_INODE);
1079         if (error)
1080                 return error;
1081 
1082         error = xrep_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_BMBTD);
1083         if (error)
1084                 return error;
1085 
1086         /* Make sure the attr fork looks ok before we delete it. */
1087         error = xrep_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_BMBTA);
1088         if (error)
1089                 return error;
1090 
1091         /* Clear the reflink flag since metadata never shares. */
1092         if (xfs_is_reflink_inode(sc->ip)) {
1093                 dirty = true;
1094                 xfs_trans_ijoin(sc->tp, sc->ip, 0);
1095                 error = xfs_reflink_clear_inode_flag(sc->ip, &sc->tp);
1096                 if (error)
1097                         return error;
1098         }
1099 
1100         /* Clear the attr forks since metadata shouldn't have that. */
1101         if (xfs_inode_hasattr(sc->ip)) {
1102                 if (!dirty) {
1103                         dirty = true;
1104                         xfs_trans_ijoin(sc->tp, sc->ip, 0);
1105                 }
1106                 error = xrep_xattr_reset_fork(sc);
1107                 if (error)
1108                         return error;
1109         }
1110 
1111         /*
1112          * If we modified the inode, roll the transaction but don't rejoin the
1113          * inode to the new transaction because xrep_bmap_data can do that.
1114          */
1115         if (dirty) {
1116                 error = xfs_trans_roll(&sc->tp);
1117                 if (error)
1118                         return error;
1119                 dirty = false;
1120         }
1121 
1122         return 0;
1123 }
1124 
1125 /*
1126  * Set up an in-memory buffer cache so that we can use the xfbtree.  Allocating
1127  * a shmem file might take loks, so we cannot be in transaction context.  Park
1128  * our resources in the scrub context and let the teardown function take care
1129  * of them at the right time.
1130  */
1131 int
1132 xrep_setup_xfbtree(
1133         struct xfs_scrub        *sc,
1134         const char              *descr)
1135 {
1136         ASSERT(sc->tp == NULL);
1137 
1138         return xmbuf_alloc(sc->mp, descr, &sc->xmbtp);
1139 }
1140 
1141 /*
1142  * Create a dummy transaction for use in a live update hook function.  This
1143  * function MUST NOT be called from regular repair code because the current
1144  * process' transaction is saved via the cookie.
1145  */
1146 int
1147 xrep_trans_alloc_hook_dummy(
1148         struct xfs_mount        *mp,
1149         void                    **cookiep,
1150         struct xfs_trans        **tpp)
1151 {
1152         int                     error;
1153 
1154         *cookiep = current->journal_info;
1155         current->journal_info = NULL;
1156 
1157         error = xfs_trans_alloc_empty(mp, tpp);
1158         if (!error)
1159                 return 0;
1160 
1161         current->journal_info = *cookiep;
1162         *cookiep = NULL;
1163         return error;
1164 }
1165 
1166 /* Cancel a dummy transaction used by a live update hook function. */
1167 void
1168 xrep_trans_cancel_hook_dummy(
1169         void                    **cookiep,
1170         struct xfs_trans        *tp)
1171 {
1172         xfs_trans_cancel(tp);
1173         current->journal_info = *cookiep;
1174         *cookiep = NULL;
1175 }
1176 
1177 /*
1178  * See if this buffer can pass the given ->verify_struct() function.
1179  *
1180  * If the buffer already has ops attached and they're not the ones that were
1181  * passed in, we reject the buffer.  Otherwise, we perform the structure test
1182  * (note that we do not check CRCs) and return the outcome of the test.  The
1183  * buffer ops and error state are left unchanged.
1184  */
1185 bool
1186 xrep_buf_verify_struct(
1187         struct xfs_buf                  *bp,
1188         const struct xfs_buf_ops        *ops)
1189 {
1190         const struct xfs_buf_ops        *old_ops = bp->b_ops;
1191         xfs_failaddr_t                  fa;
1192         int                             old_error;
1193 
1194         if (old_ops) {
1195                 if (old_ops != ops)
1196                         return false;
1197         }
1198 
1199         old_error = bp->b_error;
1200         bp->b_ops = ops;
1201         fa = bp->b_ops->verify_struct(bp);
1202         bp->b_ops = old_ops;
1203         bp->b_error = old_error;
1204 
1205         return fa == NULL;
1206 }
1207 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php