~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/xfs/xfs_trans.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  4  * Copyright (C) 2010 Red Hat, Inc.
  5  * All Rights Reserved.
  6  */
  7 #include "xfs.h"
  8 #include "xfs_fs.h"
  9 #include "xfs_shared.h"
 10 #include "xfs_format.h"
 11 #include "xfs_log_format.h"
 12 #include "xfs_trans_resv.h"
 13 #include "xfs_mount.h"
 14 #include "xfs_extent_busy.h"
 15 #include "xfs_quota.h"
 16 #include "xfs_trans.h"
 17 #include "xfs_trans_priv.h"
 18 #include "xfs_log.h"
 19 #include "xfs_log_priv.h"
 20 #include "xfs_trace.h"
 21 #include "xfs_error.h"
 22 #include "xfs_defer.h"
 23 #include "xfs_inode.h"
 24 #include "xfs_dquot_item.h"
 25 #include "xfs_dquot.h"
 26 #include "xfs_icache.h"
 27 #include "xfs_rtbitmap.h"
 28 
 29 struct kmem_cache       *xfs_trans_cache;
 30 
 31 #if defined(CONFIG_TRACEPOINTS)
 32 static void
 33 xfs_trans_trace_reservations(
 34         struct xfs_mount        *mp)
 35 {
 36         struct xfs_trans_res    *res;
 37         struct xfs_trans_res    *end_res;
 38         int                     i;
 39 
 40         res = (struct xfs_trans_res *)M_RES(mp);
 41         end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
 42         for (i = 0; res < end_res; i++, res++)
 43                 trace_xfs_trans_resv_calc(mp, i, res);
 44 }
 45 #else
 46 # define xfs_trans_trace_reservations(mp)
 47 #endif
 48 
 49 /*
 50  * Initialize the precomputed transaction reservation values
 51  * in the mount structure.
 52  */
 53 void
 54 xfs_trans_init(
 55         struct xfs_mount        *mp)
 56 {
 57         xfs_trans_resv_calc(mp, M_RES(mp));
 58         xfs_trans_trace_reservations(mp);
 59 }
 60 
 61 /*
 62  * Free the transaction structure.  If there is more clean up
 63  * to do when the structure is freed, add it here.
 64  */
 65 STATIC void
 66 xfs_trans_free(
 67         struct xfs_trans        *tp)
 68 {
 69         xfs_extent_busy_sort(&tp->t_busy);
 70         xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
 71 
 72         trace_xfs_trans_free(tp, _RET_IP_);
 73         xfs_trans_clear_context(tp);
 74         if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
 75                 sb_end_intwrite(tp->t_mountp->m_super);
 76         xfs_trans_free_dqinfo(tp);
 77         kmem_cache_free(xfs_trans_cache, tp);
 78 }
 79 
 80 /*
 81  * This is called to create a new transaction which will share the
 82  * permanent log reservation of the given transaction.  The remaining
 83  * unused block and rt extent reservations are also inherited.  This
 84  * implies that the original transaction is no longer allowed to allocate
 85  * blocks.  Locks and log items, however, are no inherited.  They must
 86  * be added to the new transaction explicitly.
 87  */
 88 STATIC struct xfs_trans *
 89 xfs_trans_dup(
 90         struct xfs_trans        *tp)
 91 {
 92         struct xfs_trans        *ntp;
 93 
 94         trace_xfs_trans_dup(tp, _RET_IP_);
 95 
 96         ntp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL);
 97 
 98         /*
 99          * Initialize the new transaction structure.
100          */
101         ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
102         ntp->t_mountp = tp->t_mountp;
103         INIT_LIST_HEAD(&ntp->t_items);
104         INIT_LIST_HEAD(&ntp->t_busy);
105         INIT_LIST_HEAD(&ntp->t_dfops);
106         ntp->t_highest_agno = NULLAGNUMBER;
107 
108         ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
109         ASSERT(tp->t_ticket != NULL);
110 
111         ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
112                        (tp->t_flags & XFS_TRANS_RESERVE) |
113                        (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) |
114                        (tp->t_flags & XFS_TRANS_RES_FDBLKS);
115         /* We gave our writer reference to the new transaction */
116         tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
117         ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
118 
119         ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
120         ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
121         tp->t_blk_res = tp->t_blk_res_used;
122 
123         ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
124         tp->t_rtx_res = tp->t_rtx_res_used;
125 
126         xfs_trans_switch_context(tp, ntp);
127 
128         /* move deferred ops over to the new tp */
129         xfs_defer_move(ntp, tp);
130 
131         xfs_trans_dup_dqinfo(tp, ntp);
132         return ntp;
133 }
134 
135 /*
136  * This is called to reserve free disk blocks and log space for the
137  * given transaction.  This must be done before allocating any resources
138  * within the transaction.
139  *
140  * This will return ENOSPC if there are not enough blocks available.
141  * It will sleep waiting for available log space.
142  * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
143  * is used by long running transactions.  If any one of the reservations
144  * fails then they will all be backed out.
145  *
146  * This does not do quota reservations. That typically is done by the
147  * caller afterwards.
148  */
149 static int
150 xfs_trans_reserve(
151         struct xfs_trans        *tp,
152         struct xfs_trans_res    *resp,
153         uint                    blocks,
154         uint                    rtextents)
155 {
156         struct xfs_mount        *mp = tp->t_mountp;
157         int                     error = 0;
158         bool                    rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
159 
160         /*
161          * Attempt to reserve the needed disk blocks by decrementing
162          * the number needed from the number available.  This will
163          * fail if the count would go below zero.
164          */
165         if (blocks > 0) {
166                 error = xfs_dec_fdblocks(mp, blocks, rsvd);
167                 if (error != 0)
168                         return -ENOSPC;
169                 tp->t_blk_res += blocks;
170         }
171 
172         /*
173          * Reserve the log space needed for this transaction.
174          */
175         if (resp->tr_logres > 0) {
176                 bool    permanent = false;
177 
178                 ASSERT(tp->t_log_res == 0 ||
179                        tp->t_log_res == resp->tr_logres);
180                 ASSERT(tp->t_log_count == 0 ||
181                        tp->t_log_count == resp->tr_logcount);
182 
183                 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
184                         tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
185                         permanent = true;
186                 } else {
187                         ASSERT(tp->t_ticket == NULL);
188                         ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
189                 }
190 
191                 if (tp->t_ticket != NULL) {
192                         ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
193                         error = xfs_log_regrant(mp, tp->t_ticket);
194                 } else {
195                         error = xfs_log_reserve(mp, resp->tr_logres,
196                                                 resp->tr_logcount,
197                                                 &tp->t_ticket, permanent);
198                 }
199 
200                 if (error)
201                         goto undo_blocks;
202 
203                 tp->t_log_res = resp->tr_logres;
204                 tp->t_log_count = resp->tr_logcount;
205         }
206 
207         /*
208          * Attempt to reserve the needed realtime extents by decrementing
209          * the number needed from the number available.  This will
210          * fail if the count would go below zero.
211          */
212         if (rtextents > 0) {
213                 error = xfs_dec_frextents(mp, rtextents);
214                 if (error) {
215                         error = -ENOSPC;
216                         goto undo_log;
217                 }
218                 tp->t_rtx_res += rtextents;
219         }
220 
221         return 0;
222 
223         /*
224          * Error cases jump to one of these labels to undo any
225          * reservations which have already been performed.
226          */
227 undo_log:
228         if (resp->tr_logres > 0) {
229                 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
230                 tp->t_ticket = NULL;
231                 tp->t_log_res = 0;
232                 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
233         }
234 
235 undo_blocks:
236         if (blocks > 0) {
237                 xfs_add_fdblocks(mp, blocks);
238                 tp->t_blk_res = 0;
239         }
240         return error;
241 }
242 
243 int
244 xfs_trans_alloc(
245         struct xfs_mount        *mp,
246         struct xfs_trans_res    *resp,
247         uint                    blocks,
248         uint                    rtextents,
249         uint                    flags,
250         struct xfs_trans        **tpp)
251 {
252         struct xfs_trans        *tp;
253         bool                    want_retry = true;
254         int                     error;
255 
256         /*
257          * Allocate the handle before we do our freeze accounting and setting up
258          * GFP_NOFS allocation context so that we avoid lockdep false positives
259          * by doing GFP_KERNEL allocations inside sb_start_intwrite().
260          */
261 retry:
262         tp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL);
263         if (!(flags & XFS_TRANS_NO_WRITECOUNT))
264                 sb_start_intwrite(mp->m_super);
265         xfs_trans_set_context(tp);
266 
267         /*
268          * Zero-reservation ("empty") transactions can't modify anything, so
269          * they're allowed to run while we're frozen.
270          */
271         WARN_ON(resp->tr_logres > 0 &&
272                 mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
273         ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) ||
274                xfs_has_lazysbcount(mp));
275 
276         tp->t_magic = XFS_TRANS_HEADER_MAGIC;
277         tp->t_flags = flags;
278         tp->t_mountp = mp;
279         INIT_LIST_HEAD(&tp->t_items);
280         INIT_LIST_HEAD(&tp->t_busy);
281         INIT_LIST_HEAD(&tp->t_dfops);
282         tp->t_highest_agno = NULLAGNUMBER;
283 
284         error = xfs_trans_reserve(tp, resp, blocks, rtextents);
285         if (error == -ENOSPC && want_retry) {
286                 xfs_trans_cancel(tp);
287 
288                 /*
289                  * We weren't able to reserve enough space for the transaction.
290                  * Flush the other speculative space allocations to free space.
291                  * Do not perform a synchronous scan because callers can hold
292                  * other locks.
293                  */
294                 error = xfs_blockgc_flush_all(mp);
295                 if (error)
296                         return error;
297                 want_retry = false;
298                 goto retry;
299         }
300         if (error) {
301                 xfs_trans_cancel(tp);
302                 return error;
303         }
304 
305         trace_xfs_trans_alloc(tp, _RET_IP_);
306 
307         *tpp = tp;
308         return 0;
309 }
310 
311 /*
312  * Create an empty transaction with no reservation.  This is a defensive
313  * mechanism for routines that query metadata without actually modifying them --
314  * if the metadata being queried is somehow cross-linked (think a btree block
315  * pointer that points higher in the tree), we risk deadlock.  However, blocks
316  * grabbed as part of a transaction can be re-grabbed.  The verifiers will
317  * notice the corrupt block and the operation will fail back to userspace
318  * without deadlocking.
319  *
320  * Note the zero-length reservation; this transaction MUST be cancelled without
321  * any dirty data.
322  *
323  * Callers should obtain freeze protection to avoid a conflict with fs freezing
324  * where we can be grabbing buffers at the same time that freeze is trying to
325  * drain the buffer LRU list.
326  */
327 int
328 xfs_trans_alloc_empty(
329         struct xfs_mount                *mp,
330         struct xfs_trans                **tpp)
331 {
332         struct xfs_trans_res            resv = {0};
333 
334         return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
335 }
336 
337 /*
338  * Record the indicated change to the given field for application
339  * to the file system's superblock when the transaction commits.
340  * For now, just store the change in the transaction structure.
341  *
342  * Mark the transaction structure to indicate that the superblock
343  * needs to be updated before committing.
344  *
345  * Because we may not be keeping track of allocated/free inodes and
346  * used filesystem blocks in the superblock, we do not mark the
347  * superblock dirty in this transaction if we modify these fields.
348  * We still need to update the transaction deltas so that they get
349  * applied to the incore superblock, but we don't want them to
350  * cause the superblock to get locked and logged if these are the
351  * only fields in the superblock that the transaction modifies.
352  */
353 void
354 xfs_trans_mod_sb(
355         xfs_trans_t     *tp,
356         uint            field,
357         int64_t         delta)
358 {
359         uint32_t        flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
360         xfs_mount_t     *mp = tp->t_mountp;
361 
362         switch (field) {
363         case XFS_TRANS_SB_ICOUNT:
364                 tp->t_icount_delta += delta;
365                 if (xfs_has_lazysbcount(mp))
366                         flags &= ~XFS_TRANS_SB_DIRTY;
367                 break;
368         case XFS_TRANS_SB_IFREE:
369                 tp->t_ifree_delta += delta;
370                 if (xfs_has_lazysbcount(mp))
371                         flags &= ~XFS_TRANS_SB_DIRTY;
372                 break;
373         case XFS_TRANS_SB_FDBLOCKS:
374                 /*
375                  * Track the number of blocks allocated in the transaction.
376                  * Make sure it does not exceed the number reserved. If so,
377                  * shutdown as this can lead to accounting inconsistency.
378                  */
379                 if (delta < 0) {
380                         tp->t_blk_res_used += (uint)-delta;
381                         if (tp->t_blk_res_used > tp->t_blk_res)
382                                 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
383                 } else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) {
384                         int64_t blkres_delta;
385 
386                         /*
387                          * Return freed blocks directly to the reservation
388                          * instead of the global pool, being careful not to
389                          * overflow the trans counter. This is used to preserve
390                          * reservation across chains of transaction rolls that
391                          * repeatedly free and allocate blocks.
392                          */
393                         blkres_delta = min_t(int64_t, delta,
394                                              UINT_MAX - tp->t_blk_res);
395                         tp->t_blk_res += blkres_delta;
396                         delta -= blkres_delta;
397                 }
398                 tp->t_fdblocks_delta += delta;
399                 if (xfs_has_lazysbcount(mp))
400                         flags &= ~XFS_TRANS_SB_DIRTY;
401                 break;
402         case XFS_TRANS_SB_RES_FDBLOCKS:
403                 /*
404                  * The allocation has already been applied to the
405                  * in-core superblock's counter.  This should only
406                  * be applied to the on-disk superblock.
407                  */
408                 tp->t_res_fdblocks_delta += delta;
409                 if (xfs_has_lazysbcount(mp))
410                         flags &= ~XFS_TRANS_SB_DIRTY;
411                 break;
412         case XFS_TRANS_SB_FREXTENTS:
413                 /*
414                  * Track the number of blocks allocated in the
415                  * transaction.  Make sure it does not exceed the
416                  * number reserved.
417                  */
418                 if (delta < 0) {
419                         tp->t_rtx_res_used += (uint)-delta;
420                         ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
421                 }
422                 tp->t_frextents_delta += delta;
423                 break;
424         case XFS_TRANS_SB_RES_FREXTENTS:
425                 /*
426                  * The allocation has already been applied to the
427                  * in-core superblock's counter.  This should only
428                  * be applied to the on-disk superblock.
429                  */
430                 ASSERT(delta < 0);
431                 tp->t_res_frextents_delta += delta;
432                 break;
433         case XFS_TRANS_SB_DBLOCKS:
434                 tp->t_dblocks_delta += delta;
435                 break;
436         case XFS_TRANS_SB_AGCOUNT:
437                 ASSERT(delta > 0);
438                 tp->t_agcount_delta += delta;
439                 break;
440         case XFS_TRANS_SB_IMAXPCT:
441                 tp->t_imaxpct_delta += delta;
442                 break;
443         case XFS_TRANS_SB_REXTSIZE:
444                 tp->t_rextsize_delta += delta;
445                 break;
446         case XFS_TRANS_SB_RBMBLOCKS:
447                 tp->t_rbmblocks_delta += delta;
448                 break;
449         case XFS_TRANS_SB_RBLOCKS:
450                 tp->t_rblocks_delta += delta;
451                 break;
452         case XFS_TRANS_SB_REXTENTS:
453                 tp->t_rextents_delta += delta;
454                 break;
455         case XFS_TRANS_SB_REXTSLOG:
456                 tp->t_rextslog_delta += delta;
457                 break;
458         default:
459                 ASSERT(0);
460                 return;
461         }
462 
463         tp->t_flags |= flags;
464 }
465 
466 /*
467  * xfs_trans_apply_sb_deltas() is called from the commit code
468  * to bring the superblock buffer into the current transaction
469  * and modify it as requested by earlier calls to xfs_trans_mod_sb().
470  *
471  * For now we just look at each field allowed to change and change
472  * it if necessary.
473  */
474 STATIC void
475 xfs_trans_apply_sb_deltas(
476         xfs_trans_t     *tp)
477 {
478         struct xfs_dsb  *sbp;
479         struct xfs_buf  *bp;
480         int             whole = 0;
481 
482         bp = xfs_trans_getsb(tp);
483         sbp = bp->b_addr;
484 
485         /*
486          * Only update the superblock counters if we are logging them
487          */
488         if (!xfs_has_lazysbcount((tp->t_mountp))) {
489                 if (tp->t_icount_delta)
490                         be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
491                 if (tp->t_ifree_delta)
492                         be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
493                 if (tp->t_fdblocks_delta)
494                         be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
495                 if (tp->t_res_fdblocks_delta)
496                         be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
497         }
498 
499         /*
500          * Updating frextents requires careful handling because it does not
501          * behave like the lazysb counters because we cannot rely on log
502          * recovery in older kenels to recompute the value from the rtbitmap.
503          * This means that the ondisk frextents must be consistent with the
504          * rtbitmap.
505          *
506          * Therefore, log the frextents change to the ondisk superblock and
507          * update the incore superblock so that future calls to xfs_log_sb
508          * write the correct value ondisk.
509          *
510          * Don't touch m_frextents because it includes incore reservations,
511          * and those are handled by the unreserve function.
512          */
513         if (tp->t_frextents_delta || tp->t_res_frextents_delta) {
514                 struct xfs_mount        *mp = tp->t_mountp;
515                 int64_t                 rtxdelta;
516 
517                 rtxdelta = tp->t_frextents_delta + tp->t_res_frextents_delta;
518 
519                 spin_lock(&mp->m_sb_lock);
520                 be64_add_cpu(&sbp->sb_frextents, rtxdelta);
521                 mp->m_sb.sb_frextents += rtxdelta;
522                 spin_unlock(&mp->m_sb_lock);
523         }
524 
525         if (tp->t_dblocks_delta) {
526                 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
527                 whole = 1;
528         }
529         if (tp->t_agcount_delta) {
530                 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
531                 whole = 1;
532         }
533         if (tp->t_imaxpct_delta) {
534                 sbp->sb_imax_pct += tp->t_imaxpct_delta;
535                 whole = 1;
536         }
537         if (tp->t_rextsize_delta) {
538                 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
539                 whole = 1;
540         }
541         if (tp->t_rbmblocks_delta) {
542                 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
543                 whole = 1;
544         }
545         if (tp->t_rblocks_delta) {
546                 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
547                 whole = 1;
548         }
549         if (tp->t_rextents_delta) {
550                 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
551                 whole = 1;
552         }
553         if (tp->t_rextslog_delta) {
554                 sbp->sb_rextslog += tp->t_rextslog_delta;
555                 whole = 1;
556         }
557 
558         xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
559         if (whole)
560                 /*
561                  * Log the whole thing, the fields are noncontiguous.
562                  */
563                 xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb) - 1);
564         else
565                 /*
566                  * Since all the modifiable fields are contiguous, we
567                  * can get away with this.
568                  */
569                 xfs_trans_log_buf(tp, bp, offsetof(struct xfs_dsb, sb_icount),
570                                   offsetof(struct xfs_dsb, sb_frextents) +
571                                   sizeof(sbp->sb_frextents) - 1);
572 }
573 
574 /*
575  * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and
576  * apply superblock counter changes to the in-core superblock.  The
577  * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
578  * applied to the in-core superblock.  The idea is that that has already been
579  * done.
580  *
581  * If we are not logging superblock counters, then the inode allocated/free and
582  * used block counts are not updated in the on disk superblock. In this case,
583  * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
584  * still need to update the incore superblock with the changes.
585  *
586  * Deltas for the inode count are +/-64, hence we use a large batch size of 128
587  * so we don't need to take the counter lock on every update.
588  */
589 #define XFS_ICOUNT_BATCH        128
590 
591 void
592 xfs_trans_unreserve_and_mod_sb(
593         struct xfs_trans        *tp)
594 {
595         struct xfs_mount        *mp = tp->t_mountp;
596         int64_t                 blkdelta = tp->t_blk_res;
597         int64_t                 rtxdelta = tp->t_rtx_res;
598         int64_t                 idelta = 0;
599         int64_t                 ifreedelta = 0;
600 
601         /*
602          * Calculate the deltas.
603          *
604          * t_fdblocks_delta and t_frextents_delta can be positive or negative:
605          *
606          *  - positive values indicate blocks freed in the transaction.
607          *  - negative values indicate blocks allocated in the transaction
608          *
609          * Negative values can only happen if the transaction has a block
610          * reservation that covers the allocated block.  The end result is
611          * that the calculated delta values must always be positive and we
612          * can only put back previous allocated or reserved blocks here.
613          */
614         ASSERT(tp->t_blk_res || tp->t_fdblocks_delta >= 0);
615         if (xfs_has_lazysbcount(mp) || (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
616                 blkdelta += tp->t_fdblocks_delta;
617                 ASSERT(blkdelta >= 0);
618         }
619 
620         ASSERT(tp->t_rtx_res || tp->t_frextents_delta >= 0);
621         if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
622                 rtxdelta += tp->t_frextents_delta;
623                 ASSERT(rtxdelta >= 0);
624         }
625 
626         if (xfs_has_lazysbcount(mp) || (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
627                 idelta = tp->t_icount_delta;
628                 ifreedelta = tp->t_ifree_delta;
629         }
630 
631         /* apply the per-cpu counters */
632         if (blkdelta)
633                 xfs_add_fdblocks(mp, blkdelta);
634 
635         if (idelta)
636                 percpu_counter_add_batch(&mp->m_icount, idelta,
637                                          XFS_ICOUNT_BATCH);
638 
639         if (ifreedelta)
640                 percpu_counter_add(&mp->m_ifree, ifreedelta);
641 
642         if (rtxdelta)
643                 xfs_add_frextents(mp, rtxdelta);
644 
645         if (!(tp->t_flags & XFS_TRANS_SB_DIRTY))
646                 return;
647 
648         /* apply remaining deltas */
649         spin_lock(&mp->m_sb_lock);
650         mp->m_sb.sb_fdblocks += tp->t_fdblocks_delta + tp->t_res_fdblocks_delta;
651         mp->m_sb.sb_icount += idelta;
652         mp->m_sb.sb_ifree += ifreedelta;
653         /*
654          * Do not touch sb_frextents here because we are dealing with incore
655          * reservation.  sb_frextents is not part of the lazy sb counters so it
656          * must be consistent with the ondisk rtbitmap and must never include
657          * incore reservations.
658          */
659         mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
660         mp->m_sb.sb_agcount += tp->t_agcount_delta;
661         mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
662         mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
663         if (tp->t_rextsize_delta) {
664                 mp->m_rtxblklog = log2_if_power2(mp->m_sb.sb_rextsize);
665                 mp->m_rtxblkmask = mask64_if_power2(mp->m_sb.sb_rextsize);
666         }
667         mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
668         mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
669         mp->m_sb.sb_rextents += tp->t_rextents_delta;
670         mp->m_sb.sb_rextslog += tp->t_rextslog_delta;
671         spin_unlock(&mp->m_sb_lock);
672 
673         /*
674          * Debug checks outside of the spinlock so they don't lock up the
675          * machine if they fail.
676          */
677         ASSERT(mp->m_sb.sb_imax_pct >= 0);
678         ASSERT(mp->m_sb.sb_rextslog >= 0);
679 }
680 
681 /* Add the given log item to the transaction's list of log items. */
682 void
683 xfs_trans_add_item(
684         struct xfs_trans        *tp,
685         struct xfs_log_item     *lip)
686 {
687         ASSERT(lip->li_log == tp->t_mountp->m_log);
688         ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
689         ASSERT(list_empty(&lip->li_trans));
690         ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags));
691 
692         list_add_tail(&lip->li_trans, &tp->t_items);
693         trace_xfs_trans_add_item(tp, _RET_IP_);
694 }
695 
696 /*
697  * Unlink the log item from the transaction. the log item is no longer
698  * considered dirty in this transaction, as the linked transaction has
699  * finished, either by abort or commit completion.
700  */
701 void
702 xfs_trans_del_item(
703         struct xfs_log_item     *lip)
704 {
705         clear_bit(XFS_LI_DIRTY, &lip->li_flags);
706         list_del_init(&lip->li_trans);
707 }
708 
709 /* Detach and unlock all of the items in a transaction */
710 static void
711 xfs_trans_free_items(
712         struct xfs_trans        *tp,
713         bool                    abort)
714 {
715         struct xfs_log_item     *lip, *next;
716 
717         trace_xfs_trans_free_items(tp, _RET_IP_);
718 
719         list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
720                 xfs_trans_del_item(lip);
721                 if (abort)
722                         set_bit(XFS_LI_ABORTED, &lip->li_flags);
723                 if (lip->li_ops->iop_release)
724                         lip->li_ops->iop_release(lip);
725         }
726 }
727 
728 /*
729  * Sort transaction items prior to running precommit operations. This will
730  * attempt to order the items such that they will always be locked in the same
731  * order. Items that have no sort function are moved to the end of the list
732  * and so are locked last.
733  *
734  * This may need refinement as different types of objects add sort functions.
735  *
736  * Function is more complex than it needs to be because we are comparing 64 bit
737  * values and the function only returns 32 bit values.
738  */
739 static int
740 xfs_trans_precommit_sort(
741         void                    *unused_arg,
742         const struct list_head  *a,
743         const struct list_head  *b)
744 {
745         struct xfs_log_item     *lia = container_of(a,
746                                         struct xfs_log_item, li_trans);
747         struct xfs_log_item     *lib = container_of(b,
748                                         struct xfs_log_item, li_trans);
749         int64_t                 diff;
750 
751         /*
752          * If both items are non-sortable, leave them alone. If only one is
753          * sortable, move the non-sortable item towards the end of the list.
754          */
755         if (!lia->li_ops->iop_sort && !lib->li_ops->iop_sort)
756                 return 0;
757         if (!lia->li_ops->iop_sort)
758                 return 1;
759         if (!lib->li_ops->iop_sort)
760                 return -1;
761 
762         diff = lia->li_ops->iop_sort(lia) - lib->li_ops->iop_sort(lib);
763         if (diff < 0)
764                 return -1;
765         if (diff > 0)
766                 return 1;
767         return 0;
768 }
769 
770 /*
771  * Run transaction precommit functions.
772  *
773  * If there is an error in any of the callouts, then stop immediately and
774  * trigger a shutdown to abort the transaction. There is no recovery possible
775  * from errors at this point as the transaction is dirty....
776  */
777 static int
778 xfs_trans_run_precommits(
779         struct xfs_trans        *tp)
780 {
781         struct xfs_mount        *mp = tp->t_mountp;
782         struct xfs_log_item     *lip, *n;
783         int                     error = 0;
784 
785         /*
786          * Sort the item list to avoid ABBA deadlocks with other transactions
787          * running precommit operations that lock multiple shared items such as
788          * inode cluster buffers.
789          */
790         list_sort(NULL, &tp->t_items, xfs_trans_precommit_sort);
791 
792         /*
793          * Precommit operations can remove the log item from the transaction
794          * if the log item exists purely to delay modifications until they
795          * can be ordered against other operations. Hence we have to use
796          * list_for_each_entry_safe() here.
797          */
798         list_for_each_entry_safe(lip, n, &tp->t_items, li_trans) {
799                 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
800                         continue;
801                 if (lip->li_ops->iop_precommit) {
802                         error = lip->li_ops->iop_precommit(tp, lip);
803                         if (error)
804                                 break;
805                 }
806         }
807         if (error)
808                 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
809         return error;
810 }
811 
812 /*
813  * Commit the given transaction to the log.
814  *
815  * XFS disk error handling mechanism is not based on a typical
816  * transaction abort mechanism. Logically after the filesystem
817  * gets marked 'SHUTDOWN', we can't let any new transactions
818  * be durable - ie. committed to disk - because some metadata might
819  * be inconsistent. In such cases, this returns an error, and the
820  * caller may assume that all locked objects joined to the transaction
821  * have already been unlocked as if the commit had succeeded.
822  * Do not reference the transaction structure after this call.
823  */
824 static int
825 __xfs_trans_commit(
826         struct xfs_trans        *tp,
827         bool                    regrant)
828 {
829         struct xfs_mount        *mp = tp->t_mountp;
830         struct xlog             *log = mp->m_log;
831         xfs_csn_t               commit_seq = 0;
832         int                     error = 0;
833         int                     sync = tp->t_flags & XFS_TRANS_SYNC;
834 
835         trace_xfs_trans_commit(tp, _RET_IP_);
836 
837         error = xfs_trans_run_precommits(tp);
838         if (error) {
839                 if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
840                         xfs_defer_cancel(tp);
841                 goto out_unreserve;
842         }
843 
844         /*
845          * Finish deferred items on final commit. Only permanent transactions
846          * should ever have deferred ops.
847          */
848         WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
849                      !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
850         if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
851                 error = xfs_defer_finish_noroll(&tp);
852                 if (error)
853                         goto out_unreserve;
854 
855                 /* Run precommits from final tx in defer chain. */
856                 error = xfs_trans_run_precommits(tp);
857                 if (error)
858                         goto out_unreserve;
859         }
860 
861         /*
862          * If there is nothing to be logged by the transaction,
863          * then unlock all of the items associated with the
864          * transaction and free the transaction structure.
865          * Also make sure to return any reserved blocks to
866          * the free pool.
867          */
868         if (!(tp->t_flags & XFS_TRANS_DIRTY))
869                 goto out_unreserve;
870 
871         /*
872          * We must check against log shutdown here because we cannot abort log
873          * items and leave them dirty, inconsistent and unpinned in memory while
874          * the log is active. This leaves them open to being written back to
875          * disk, and that will lead to on-disk corruption.
876          */
877         if (xlog_is_shutdown(log)) {
878                 error = -EIO;
879                 goto out_unreserve;
880         }
881 
882         ASSERT(tp->t_ticket != NULL);
883 
884         /*
885          * If we need to update the superblock, then do it now.
886          */
887         if (tp->t_flags & XFS_TRANS_SB_DIRTY)
888                 xfs_trans_apply_sb_deltas(tp);
889         xfs_trans_apply_dquot_deltas(tp);
890 
891         xlog_cil_commit(log, tp, &commit_seq, regrant);
892 
893         xfs_trans_free(tp);
894 
895         /*
896          * If the transaction needs to be synchronous, then force the
897          * log out now and wait for it.
898          */
899         if (sync) {
900                 error = xfs_log_force_seq(mp, commit_seq, XFS_LOG_SYNC, NULL);
901                 XFS_STATS_INC(mp, xs_trans_sync);
902         } else {
903                 XFS_STATS_INC(mp, xs_trans_async);
904         }
905 
906         return error;
907 
908 out_unreserve:
909         xfs_trans_unreserve_and_mod_sb(tp);
910 
911         /*
912          * It is indeed possible for the transaction to be not dirty but
913          * the dqinfo portion to be.  All that means is that we have some
914          * (non-persistent) quota reservations that need to be unreserved.
915          */
916         xfs_trans_unreserve_and_mod_dquots(tp);
917         if (tp->t_ticket) {
918                 if (regrant && !xlog_is_shutdown(log))
919                         xfs_log_ticket_regrant(log, tp->t_ticket);
920                 else
921                         xfs_log_ticket_ungrant(log, tp->t_ticket);
922                 tp->t_ticket = NULL;
923         }
924         xfs_trans_free_items(tp, !!error);
925         xfs_trans_free(tp);
926 
927         XFS_STATS_INC(mp, xs_trans_empty);
928         return error;
929 }
930 
931 int
932 xfs_trans_commit(
933         struct xfs_trans        *tp)
934 {
935         return __xfs_trans_commit(tp, false);
936 }
937 
938 /*
939  * Unlock all of the transaction's items and free the transaction.  If the
940  * transaction is dirty, we must shut down the filesystem because there is no
941  * way to restore them to their previous state.
942  *
943  * If the transaction has made a log reservation, make sure to release it as
944  * well.
945  *
946  * This is a high level function (equivalent to xfs_trans_commit()) and so can
947  * be called after the transaction has effectively been aborted due to the mount
948  * being shut down. However, if the mount has not been shut down and the
949  * transaction is dirty we will shut the mount down and, in doing so, that
950  * guarantees that the log is shut down, too. Hence we don't need to be as
951  * careful with shutdown state and dirty items here as we need to be in
952  * xfs_trans_commit().
953  */
954 void
955 xfs_trans_cancel(
956         struct xfs_trans        *tp)
957 {
958         struct xfs_mount        *mp = tp->t_mountp;
959         struct xlog             *log = mp->m_log;
960         bool                    dirty = (tp->t_flags & XFS_TRANS_DIRTY);
961 
962         trace_xfs_trans_cancel(tp, _RET_IP_);
963 
964         /*
965          * It's never valid to cancel a transaction with deferred ops attached,
966          * because the transaction is effectively dirty.  Complain about this
967          * loudly before freeing the in-memory defer items and shutting down the
968          * filesystem.
969          */
970         if (!list_empty(&tp->t_dfops)) {
971                 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
972                 dirty = true;
973                 xfs_defer_cancel(tp);
974         }
975 
976         /*
977          * See if the caller is relying on us to shut down the filesystem. We
978          * only want an error report if there isn't already a shutdown in
979          * progress, so we only need to check against the mount shutdown state
980          * here.
981          */
982         if (dirty && !xfs_is_shutdown(mp)) {
983                 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
984                 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
985         }
986 #ifdef DEBUG
987         /* Log items need to be consistent until the log is shut down. */
988         if (!dirty && !xlog_is_shutdown(log)) {
989                 struct xfs_log_item *lip;
990 
991                 list_for_each_entry(lip, &tp->t_items, li_trans)
992                         ASSERT(!xlog_item_is_intent_done(lip));
993         }
994 #endif
995         xfs_trans_unreserve_and_mod_sb(tp);
996         xfs_trans_unreserve_and_mod_dquots(tp);
997 
998         if (tp->t_ticket) {
999                 xfs_log_ticket_ungrant(log, tp->t_ticket);
1000                 tp->t_ticket = NULL;
1001         }
1002 
1003         xfs_trans_free_items(tp, dirty);
1004         xfs_trans_free(tp);
1005 }
1006 
1007 /*
1008  * Roll from one trans in the sequence of PERMANENT transactions to
1009  * the next: permanent transactions are only flushed out when
1010  * committed with xfs_trans_commit(), but we still want as soon
1011  * as possible to let chunks of it go to the log. So we commit the
1012  * chunk we've been working on and get a new transaction to continue.
1013  */
1014 int
1015 xfs_trans_roll(
1016         struct xfs_trans        **tpp)
1017 {
1018         struct xfs_trans        *trans = *tpp;
1019         struct xfs_trans_res    tres;
1020         int                     error;
1021 
1022         trace_xfs_trans_roll(trans, _RET_IP_);
1023 
1024         /*
1025          * Copy the critical parameters from one trans to the next.
1026          */
1027         tres.tr_logres = trans->t_log_res;
1028         tres.tr_logcount = trans->t_log_count;
1029 
1030         *tpp = xfs_trans_dup(trans);
1031 
1032         /*
1033          * Commit the current transaction.
1034          * If this commit failed, then it'd just unlock those items that
1035          * are not marked ihold. That also means that a filesystem shutdown
1036          * is in progress. The caller takes the responsibility to cancel
1037          * the duplicate transaction that gets returned.
1038          */
1039         error = __xfs_trans_commit(trans, true);
1040         if (error)
1041                 return error;
1042 
1043         /*
1044          * Reserve space in the log for the next transaction.
1045          * This also pushes items in the "AIL", the list of logged items,
1046          * out to disk if they are taking up space at the tail of the log
1047          * that we want to use.  This requires that either nothing be locked
1048          * across this call, or that anything that is locked be logged in
1049          * the prior and the next transactions.
1050          */
1051         tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1052         return xfs_trans_reserve(*tpp, &tres, 0, 0);
1053 }
1054 
1055 /*
1056  * Allocate an transaction, lock and join the inode to it, and reserve quota.
1057  *
1058  * The caller must ensure that the on-disk dquots attached to this inode have
1059  * already been allocated and initialized.  The caller is responsible for
1060  * releasing ILOCK_EXCL if a new transaction is returned.
1061  */
1062 int
1063 xfs_trans_alloc_inode(
1064         struct xfs_inode        *ip,
1065         struct xfs_trans_res    *resv,
1066         unsigned int            dblocks,
1067         unsigned int            rblocks,
1068         bool                    force,
1069         struct xfs_trans        **tpp)
1070 {
1071         struct xfs_trans        *tp;
1072         struct xfs_mount        *mp = ip->i_mount;
1073         bool                    retried = false;
1074         int                     error;
1075 
1076 retry:
1077         error = xfs_trans_alloc(mp, resv, dblocks,
1078                         xfs_extlen_to_rtxlen(mp, rblocks),
1079                         force ? XFS_TRANS_RESERVE : 0, &tp);
1080         if (error)
1081                 return error;
1082 
1083         xfs_ilock(ip, XFS_ILOCK_EXCL);
1084         xfs_trans_ijoin(tp, ip, 0);
1085 
1086         error = xfs_qm_dqattach_locked(ip, false);
1087         if (error) {
1088                 /* Caller should have allocated the dquots! */
1089                 ASSERT(error != -ENOENT);
1090                 goto out_cancel;
1091         }
1092 
1093         error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, force);
1094         if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1095                 xfs_trans_cancel(tp);
1096                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1097                 xfs_blockgc_free_quota(ip, 0);
1098                 retried = true;
1099                 goto retry;
1100         }
1101         if (error)
1102                 goto out_cancel;
1103 
1104         *tpp = tp;
1105         return 0;
1106 
1107 out_cancel:
1108         xfs_trans_cancel(tp);
1109         xfs_iunlock(ip, XFS_ILOCK_EXCL);
1110         return error;
1111 }
1112 
1113 /*
1114  * Try to reserve more blocks for a transaction.
1115  *
1116  * This is for callers that need to attach resources to a transaction, scan
1117  * those resources to determine the space reservation requirements, and then
1118  * modify the attached resources.  In other words, online repair.  This can
1119  * fail due to ENOSPC, so the caller must be able to cancel the transaction
1120  * without shutting down the fs.
1121  */
1122 int
1123 xfs_trans_reserve_more(
1124         struct xfs_trans        *tp,
1125         unsigned int            blocks,
1126         unsigned int            rtextents)
1127 {
1128         struct xfs_trans_res    resv = { };
1129 
1130         return xfs_trans_reserve(tp, &resv, blocks, rtextents);
1131 }
1132 
1133 /*
1134  * Try to reserve more blocks and file quota for a transaction.  Same
1135  * conditions of usage as xfs_trans_reserve_more.
1136  */
1137 int
1138 xfs_trans_reserve_more_inode(
1139         struct xfs_trans        *tp,
1140         struct xfs_inode        *ip,
1141         unsigned int            dblocks,
1142         unsigned int            rblocks,
1143         bool                    force_quota)
1144 {
1145         struct xfs_trans_res    resv = { };
1146         struct xfs_mount        *mp = ip->i_mount;
1147         unsigned int            rtx = xfs_extlen_to_rtxlen(mp, rblocks);
1148         int                     error;
1149 
1150         xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1151 
1152         error = xfs_trans_reserve(tp, &resv, dblocks, rtx);
1153         if (error)
1154                 return error;
1155 
1156         if (!XFS_IS_QUOTA_ON(mp) || xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
1157                 return 0;
1158 
1159         if (tp->t_flags & XFS_TRANS_RESERVE)
1160                 force_quota = true;
1161 
1162         error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks,
1163                         force_quota);
1164         if (!error)
1165                 return 0;
1166 
1167         /* Quota failed, give back the new reservation. */
1168         xfs_add_fdblocks(mp, dblocks);
1169         tp->t_blk_res -= dblocks;
1170         xfs_add_frextents(mp, rtx);
1171         tp->t_rtx_res -= rtx;
1172         return error;
1173 }
1174 
1175 /*
1176  * Allocate an transaction in preparation for inode creation by reserving quota
1177  * against the given dquots.  Callers are not required to hold any inode locks.
1178  */
1179 int
1180 xfs_trans_alloc_icreate(
1181         struct xfs_mount        *mp,
1182         struct xfs_trans_res    *resv,
1183         struct xfs_dquot        *udqp,
1184         struct xfs_dquot        *gdqp,
1185         struct xfs_dquot        *pdqp,
1186         unsigned int            dblocks,
1187         struct xfs_trans        **tpp)
1188 {
1189         struct xfs_trans        *tp;
1190         bool                    retried = false;
1191         int                     error;
1192 
1193 retry:
1194         error = xfs_trans_alloc(mp, resv, dblocks, 0, 0, &tp);
1195         if (error)
1196                 return error;
1197 
1198         error = xfs_trans_reserve_quota_icreate(tp, udqp, gdqp, pdqp, dblocks);
1199         if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1200                 xfs_trans_cancel(tp);
1201                 xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
1202                 retried = true;
1203                 goto retry;
1204         }
1205         if (error) {
1206                 xfs_trans_cancel(tp);
1207                 return error;
1208         }
1209 
1210         *tpp = tp;
1211         return 0;
1212 }
1213 
1214 /*
1215  * Allocate an transaction, lock and join the inode to it, and reserve quota
1216  * in preparation for inode attribute changes that include uid, gid, or prid
1217  * changes.
1218  *
1219  * The caller must ensure that the on-disk dquots attached to this inode have
1220  * already been allocated and initialized.  The ILOCK will be dropped when the
1221  * transaction is committed or cancelled.
1222  */
1223 int
1224 xfs_trans_alloc_ichange(
1225         struct xfs_inode        *ip,
1226         struct xfs_dquot        *new_udqp,
1227         struct xfs_dquot        *new_gdqp,
1228         struct xfs_dquot        *new_pdqp,
1229         bool                    force,
1230         struct xfs_trans        **tpp)
1231 {
1232         struct xfs_trans        *tp;
1233         struct xfs_mount        *mp = ip->i_mount;
1234         struct xfs_dquot        *udqp;
1235         struct xfs_dquot        *gdqp;
1236         struct xfs_dquot        *pdqp;
1237         bool                    retried = false;
1238         int                     error;
1239 
1240 retry:
1241         error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
1242         if (error)
1243                 return error;
1244 
1245         xfs_ilock(ip, XFS_ILOCK_EXCL);
1246         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1247 
1248         error = xfs_qm_dqattach_locked(ip, false);
1249         if (error) {
1250                 /* Caller should have allocated the dquots! */
1251                 ASSERT(error != -ENOENT);
1252                 goto out_cancel;
1253         }
1254 
1255         /*
1256          * For each quota type, skip quota reservations if the inode's dquots
1257          * now match the ones that came from the caller, or the caller didn't
1258          * pass one in.  The inode's dquots can change if we drop the ILOCK to
1259          * perform a blockgc scan, so we must preserve the caller's arguments.
1260          */
1261         udqp = (new_udqp != ip->i_udquot) ? new_udqp : NULL;
1262         gdqp = (new_gdqp != ip->i_gdquot) ? new_gdqp : NULL;
1263         pdqp = (new_pdqp != ip->i_pdquot) ? new_pdqp : NULL;
1264         if (udqp || gdqp || pdqp) {
1265                 unsigned int    qflags = XFS_QMOPT_RES_REGBLKS;
1266 
1267                 if (force)
1268                         qflags |= XFS_QMOPT_FORCE_RES;
1269 
1270                 /*
1271                  * Reserve enough quota to handle blocks on disk and reserved
1272                  * for a delayed allocation.  We'll actually transfer the
1273                  * delalloc reservation between dquots at chown time, even
1274                  * though that part is only semi-transactional.
1275                  */
1276                 error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp,
1277                                 pdqp, ip->i_nblocks + ip->i_delayed_blks,
1278                                 1, qflags);
1279                 if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1280                         xfs_trans_cancel(tp);
1281                         xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
1282                         retried = true;
1283                         goto retry;
1284                 }
1285                 if (error)
1286                         goto out_cancel;
1287         }
1288 
1289         *tpp = tp;
1290         return 0;
1291 
1292 out_cancel:
1293         xfs_trans_cancel(tp);
1294         return error;
1295 }
1296 
1297 /*
1298  * Allocate an transaction, lock and join the directory and child inodes to it,
1299  * and reserve quota for a directory update.  If there isn't sufficient space,
1300  * @dblocks will be set to zero for a reservationless directory update and
1301  * @nospace_error will be set to a negative errno describing the space
1302  * constraint we hit.
1303  *
1304  * The caller must ensure that the on-disk dquots attached to this inode have
1305  * already been allocated and initialized.  The ILOCKs will be dropped when the
1306  * transaction is committed or cancelled.
1307  *
1308  * Caller is responsible for unlocking the inodes manually upon return
1309  */
1310 int
1311 xfs_trans_alloc_dir(
1312         struct xfs_inode        *dp,
1313         struct xfs_trans_res    *resv,
1314         struct xfs_inode        *ip,
1315         unsigned int            *dblocks,
1316         struct xfs_trans        **tpp,
1317         int                     *nospace_error)
1318 {
1319         struct xfs_trans        *tp;
1320         struct xfs_mount        *mp = ip->i_mount;
1321         unsigned int            resblks;
1322         bool                    retried = false;
1323         int                     error;
1324 
1325 retry:
1326         *nospace_error = 0;
1327         resblks = *dblocks;
1328         error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp);
1329         if (error == -ENOSPC) {
1330                 *nospace_error = error;
1331                 resblks = 0;
1332                 error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp);
1333         }
1334         if (error)
1335                 return error;
1336 
1337         xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
1338 
1339         xfs_trans_ijoin(tp, dp, 0);
1340         xfs_trans_ijoin(tp, ip, 0);
1341 
1342         error = xfs_qm_dqattach_locked(dp, false);
1343         if (error) {
1344                 /* Caller should have allocated the dquots! */
1345                 ASSERT(error != -ENOENT);
1346                 goto out_cancel;
1347         }
1348 
1349         error = xfs_qm_dqattach_locked(ip, false);
1350         if (error) {
1351                 /* Caller should have allocated the dquots! */
1352                 ASSERT(error != -ENOENT);
1353                 goto out_cancel;
1354         }
1355 
1356         if (resblks == 0)
1357                 goto done;
1358 
1359         error = xfs_trans_reserve_quota_nblks(tp, dp, resblks, 0, false);
1360         if (error == -EDQUOT || error == -ENOSPC) {
1361                 if (!retried) {
1362                         xfs_trans_cancel(tp);
1363                         xfs_iunlock(dp, XFS_ILOCK_EXCL);
1364                         if (dp != ip)
1365                                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1366                         xfs_blockgc_free_quota(dp, 0);
1367                         retried = true;
1368                         goto retry;
1369                 }
1370 
1371                 *nospace_error = error;
1372                 resblks = 0;
1373                 error = 0;
1374         }
1375         if (error)
1376                 goto out_cancel;
1377 
1378 done:
1379         *tpp = tp;
1380         *dblocks = resblks;
1381         return 0;
1382 
1383 out_cancel:
1384         xfs_trans_cancel(tp);
1385         return error;
1386 }
1387 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php