~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/xfs/xfs_trans.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /fs/xfs/xfs_trans.c (Version linux-6.12-rc7) and /fs/xfs/xfs_trans.c (Version linux-5.8.18)


  1 // SPDX-License-Identifier: GPL-2.0                 1 // SPDX-License-Identifier: GPL-2.0
  2 /*                                                  2 /*
  3  * Copyright (c) 2000-2003,2005 Silicon Graphi      3  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  4  * Copyright (C) 2010 Red Hat, Inc.                 4  * Copyright (C) 2010 Red Hat, Inc.
  5  * All Rights Reserved.                             5  * All Rights Reserved.
  6  */                                                 6  */
  7 #include "xfs.h"                                    7 #include "xfs.h"
  8 #include "xfs_fs.h"                                 8 #include "xfs_fs.h"
  9 #include "xfs_shared.h"                             9 #include "xfs_shared.h"
 10 #include "xfs_format.h"                            10 #include "xfs_format.h"
 11 #include "xfs_log_format.h"                        11 #include "xfs_log_format.h"
                                                   >>  12 #include "xfs_log_priv.h"
 12 #include "xfs_trans_resv.h"                        13 #include "xfs_trans_resv.h"
 13 #include "xfs_mount.h"                             14 #include "xfs_mount.h"
 14 #include "xfs_extent_busy.h"                       15 #include "xfs_extent_busy.h"
 15 #include "xfs_quota.h"                             16 #include "xfs_quota.h"
 16 #include "xfs_trans.h"                             17 #include "xfs_trans.h"
 17 #include "xfs_trans_priv.h"                        18 #include "xfs_trans_priv.h"
 18 #include "xfs_log.h"                               19 #include "xfs_log.h"
 19 #include "xfs_log_priv.h"                      << 
 20 #include "xfs_trace.h"                             20 #include "xfs_trace.h"
 21 #include "xfs_error.h"                             21 #include "xfs_error.h"
 22 #include "xfs_defer.h"                             22 #include "xfs_defer.h"
 23 #include "xfs_inode.h"                         << 
 24 #include "xfs_dquot_item.h"                    << 
 25 #include "xfs_dquot.h"                         << 
 26 #include "xfs_icache.h"                        << 
 27 #include "xfs_rtbitmap.h"                      << 
 28                                                    23 
 29 struct kmem_cache       *xfs_trans_cache;      !!  24 kmem_zone_t     *xfs_trans_zone;
 30                                                    25 
 31 #if defined(CONFIG_TRACEPOINTS)                    26 #if defined(CONFIG_TRACEPOINTS)
 32 static void                                        27 static void
 33 xfs_trans_trace_reservations(                      28 xfs_trans_trace_reservations(
 34         struct xfs_mount        *mp)               29         struct xfs_mount        *mp)
 35 {                                                  30 {
                                                   >>  31         struct xfs_trans_res    resv;
 36         struct xfs_trans_res    *res;              32         struct xfs_trans_res    *res;
 37         struct xfs_trans_res    *end_res;          33         struct xfs_trans_res    *end_res;
 38         int                     i;                 34         int                     i;
 39                                                    35 
 40         res = (struct xfs_trans_res *)M_RES(mp     36         res = (struct xfs_trans_res *)M_RES(mp);
 41         end_res = (struct xfs_trans_res *)(M_R     37         end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
 42         for (i = 0; res < end_res; i++, res++)     38         for (i = 0; res < end_res; i++, res++)
 43                 trace_xfs_trans_resv_calc(mp,      39                 trace_xfs_trans_resv_calc(mp, i, res);
                                                   >>  40         xfs_log_get_max_trans_res(mp, &resv);
                                                   >>  41         trace_xfs_trans_resv_calc(mp, -1, &resv);
 44 }                                                  42 }
 45 #else                                              43 #else
 46 # define xfs_trans_trace_reservations(mp)          44 # define xfs_trans_trace_reservations(mp)
 47 #endif                                             45 #endif
 48                                                    46 
 49 /*                                                 47 /*
 50  * Initialize the precomputed transaction rese     48  * Initialize the precomputed transaction reservation values
 51  * in the mount structure.                         49  * in the mount structure.
 52  */                                                50  */
 53 void                                               51 void
 54 xfs_trans_init(                                    52 xfs_trans_init(
 55         struct xfs_mount        *mp)               53         struct xfs_mount        *mp)
 56 {                                                  54 {
 57         xfs_trans_resv_calc(mp, M_RES(mp));        55         xfs_trans_resv_calc(mp, M_RES(mp));
 58         xfs_trans_trace_reservations(mp);          56         xfs_trans_trace_reservations(mp);
 59 }                                                  57 }
 60                                                    58 
 61 /*                                                 59 /*
 62  * Free the transaction structure.  If there i     60  * Free the transaction structure.  If there is more clean up
 63  * to do when the structure is freed, add it h     61  * to do when the structure is freed, add it here.
 64  */                                                62  */
 65 STATIC void                                        63 STATIC void
 66 xfs_trans_free(                                    64 xfs_trans_free(
 67         struct xfs_trans        *tp)               65         struct xfs_trans        *tp)
 68 {                                                  66 {
 69         xfs_extent_busy_sort(&tp->t_busy);         67         xfs_extent_busy_sort(&tp->t_busy);
 70         xfs_extent_busy_clear(tp->t_mountp, &t     68         xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
 71                                                    69 
 72         trace_xfs_trans_free(tp, _RET_IP_);        70         trace_xfs_trans_free(tp, _RET_IP_);
 73         xfs_trans_clear_context(tp);           << 
 74         if (!(tp->t_flags & XFS_TRANS_NO_WRITE     71         if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
 75                 sb_end_intwrite(tp->t_mountp->     72                 sb_end_intwrite(tp->t_mountp->m_super);
 76         xfs_trans_free_dqinfo(tp);                 73         xfs_trans_free_dqinfo(tp);
 77         kmem_cache_free(xfs_trans_cache, tp);  !!  74         kmem_cache_free(xfs_trans_zone, tp);
 78 }                                                  75 }
 79                                                    76 
 80 /*                                                 77 /*
 81  * This is called to create a new transaction      78  * This is called to create a new transaction which will share the
 82  * permanent log reservation of the given tran     79  * permanent log reservation of the given transaction.  The remaining
 83  * unused block and rt extent reservations are     80  * unused block and rt extent reservations are also inherited.  This
 84  * implies that the original transaction is no     81  * implies that the original transaction is no longer allowed to allocate
 85  * blocks.  Locks and log items, however, are      82  * blocks.  Locks and log items, however, are no inherited.  They must
 86  * be added to the new transaction explicitly.     83  * be added to the new transaction explicitly.
 87  */                                                84  */
 88 STATIC struct xfs_trans *                          85 STATIC struct xfs_trans *
 89 xfs_trans_dup(                                     86 xfs_trans_dup(
 90         struct xfs_trans        *tp)               87         struct xfs_trans        *tp)
 91 {                                                  88 {
 92         struct xfs_trans        *ntp;              89         struct xfs_trans        *ntp;
 93                                                    90 
 94         trace_xfs_trans_dup(tp, _RET_IP_);         91         trace_xfs_trans_dup(tp, _RET_IP_);
 95                                                    92 
 96         ntp = kmem_cache_zalloc(xfs_trans_cach !!  93         ntp = kmem_zone_zalloc(xfs_trans_zone, 0);
 97                                                    94 
 98         /*                                         95         /*
 99          * Initialize the new transaction stru     96          * Initialize the new transaction structure.
100          */                                        97          */
101         ntp->t_magic = XFS_TRANS_HEADER_MAGIC;     98         ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
102         ntp->t_mountp = tp->t_mountp;              99         ntp->t_mountp = tp->t_mountp;
103         INIT_LIST_HEAD(&ntp->t_items);            100         INIT_LIST_HEAD(&ntp->t_items);
104         INIT_LIST_HEAD(&ntp->t_busy);             101         INIT_LIST_HEAD(&ntp->t_busy);
105         INIT_LIST_HEAD(&ntp->t_dfops);            102         INIT_LIST_HEAD(&ntp->t_dfops);
106         ntp->t_highest_agno = NULLAGNUMBER;    !! 103         ntp->t_firstblock = NULLFSBLOCK;
107                                                   104 
108         ASSERT(tp->t_flags & XFS_TRANS_PERM_LO    105         ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
109         ASSERT(tp->t_ticket != NULL);             106         ASSERT(tp->t_ticket != NULL);
110                                                   107 
111         ntp->t_flags = XFS_TRANS_PERM_LOG_RES     108         ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
112                        (tp->t_flags & XFS_TRAN    109                        (tp->t_flags & XFS_TRANS_RESERVE) |
113                        (tp->t_flags & XFS_TRAN    110                        (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) |
114                        (tp->t_flags & XFS_TRAN    111                        (tp->t_flags & XFS_TRANS_RES_FDBLKS);
115         /* We gave our writer reference to the    112         /* We gave our writer reference to the new transaction */
116         tp->t_flags |= XFS_TRANS_NO_WRITECOUNT    113         tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
117         ntp->t_ticket = xfs_log_ticket_get(tp-    114         ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
118                                                   115 
119         ASSERT(tp->t_blk_res >= tp->t_blk_res_    116         ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
120         ntp->t_blk_res = tp->t_blk_res - tp->t    117         ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
121         tp->t_blk_res = tp->t_blk_res_used;       118         tp->t_blk_res = tp->t_blk_res_used;
122                                                   119 
123         ntp->t_rtx_res = tp->t_rtx_res - tp->t    120         ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
124         tp->t_rtx_res = tp->t_rtx_res_used;       121         tp->t_rtx_res = tp->t_rtx_res_used;
125                                                !! 122         ntp->t_pflags = tp->t_pflags;
126         xfs_trans_switch_context(tp, ntp);     << 
127                                                   123 
128         /* move deferred ops over to the new t    124         /* move deferred ops over to the new tp */
129         xfs_defer_move(ntp, tp);                  125         xfs_defer_move(ntp, tp);
130                                                   126 
131         xfs_trans_dup_dqinfo(tp, ntp);            127         xfs_trans_dup_dqinfo(tp, ntp);
132         return ntp;                               128         return ntp;
133 }                                                 129 }
134                                                   130 
135 /*                                                131 /*
136  * This is called to reserve free disk blocks     132  * This is called to reserve free disk blocks and log space for the
137  * given transaction.  This must be done befor    133  * given transaction.  This must be done before allocating any resources
138  * within the transaction.                        134  * within the transaction.
139  *                                                135  *
140  * This will return ENOSPC if there are not en    136  * This will return ENOSPC if there are not enough blocks available.
141  * It will sleep waiting for available log spa    137  * It will sleep waiting for available log space.
142  * The only valid value for the flags paramete    138  * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
143  * is used by long running transactions.  If a    139  * is used by long running transactions.  If any one of the reservations
144  * fails then they will all be backed out.        140  * fails then they will all be backed out.
145  *                                                141  *
146  * This does not do quota reservations. That t    142  * This does not do quota reservations. That typically is done by the
147  * caller afterwards.                             143  * caller afterwards.
148  */                                               144  */
149 static int                                        145 static int
150 xfs_trans_reserve(                                146 xfs_trans_reserve(
151         struct xfs_trans        *tp,              147         struct xfs_trans        *tp,
152         struct xfs_trans_res    *resp,            148         struct xfs_trans_res    *resp,
153         uint                    blocks,           149         uint                    blocks,
154         uint                    rtextents)        150         uint                    rtextents)
155 {                                                 151 {
156         struct xfs_mount        *mp = tp->t_mo    152         struct xfs_mount        *mp = tp->t_mountp;
157         int                     error = 0;        153         int                     error = 0;
158         bool                    rsvd = (tp->t_    154         bool                    rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
159                                                   155 
                                                   >> 156         /* Mark this thread as being in a transaction */
                                                   >> 157         current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
                                                   >> 158 
160         /*                                        159         /*
161          * Attempt to reserve the needed disk     160          * Attempt to reserve the needed disk blocks by decrementing
162          * the number needed from the number a    161          * the number needed from the number available.  This will
163          * fail if the count would go below ze    162          * fail if the count would go below zero.
164          */                                       163          */
165         if (blocks > 0) {                         164         if (blocks > 0) {
166                 error = xfs_dec_fdblocks(mp, b !! 165                 error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd);
167                 if (error != 0)                !! 166                 if (error != 0) {
                                                   >> 167                         current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
168                         return -ENOSPC;           168                         return -ENOSPC;
                                                   >> 169                 }
169                 tp->t_blk_res += blocks;          170                 tp->t_blk_res += blocks;
170         }                                         171         }
171                                                   172 
172         /*                                        173         /*
173          * Reserve the log space needed for th    174          * Reserve the log space needed for this transaction.
174          */                                       175          */
175         if (resp->tr_logres > 0) {                176         if (resp->tr_logres > 0) {
176                 bool    permanent = false;        177                 bool    permanent = false;
177                                                   178 
178                 ASSERT(tp->t_log_res == 0 ||      179                 ASSERT(tp->t_log_res == 0 ||
179                        tp->t_log_res == resp->    180                        tp->t_log_res == resp->tr_logres);
180                 ASSERT(tp->t_log_count == 0 ||    181                 ASSERT(tp->t_log_count == 0 ||
181                        tp->t_log_count == resp    182                        tp->t_log_count == resp->tr_logcount);
182                                                   183 
183                 if (resp->tr_logflags & XFS_TR    184                 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
184                         tp->t_flags |= XFS_TRA    185                         tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
185                         permanent = true;         186                         permanent = true;
186                 } else {                          187                 } else {
187                         ASSERT(tp->t_ticket ==    188                         ASSERT(tp->t_ticket == NULL);
188                         ASSERT(!(tp->t_flags &    189                         ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
189                 }                                 190                 }
190                                                   191 
191                 if (tp->t_ticket != NULL) {       192                 if (tp->t_ticket != NULL) {
192                         ASSERT(resp->tr_logfla    193                         ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
193                         error = xfs_log_regran    194                         error = xfs_log_regrant(mp, tp->t_ticket);
194                 } else {                          195                 } else {
195                         error = xfs_log_reserv !! 196                         error = xfs_log_reserve(mp,
                                                   >> 197                                                 resp->tr_logres,
196                                                   198                                                 resp->tr_logcount,
197                                                !! 199                                                 &tp->t_ticket, XFS_TRANSACTION,
                                                   >> 200                                                 permanent);
198                 }                                 201                 }
199                                                   202 
200                 if (error)                        203                 if (error)
201                         goto undo_blocks;         204                         goto undo_blocks;
202                                                   205 
203                 tp->t_log_res = resp->tr_logre    206                 tp->t_log_res = resp->tr_logres;
204                 tp->t_log_count = resp->tr_log    207                 tp->t_log_count = resp->tr_logcount;
205         }                                         208         }
206                                                   209 
207         /*                                        210         /*
208          * Attempt to reserve the needed realt    211          * Attempt to reserve the needed realtime extents by decrementing
209          * the number needed from the number a    212          * the number needed from the number available.  This will
210          * fail if the count would go below ze    213          * fail if the count would go below zero.
211          */                                       214          */
212         if (rtextents > 0) {                      215         if (rtextents > 0) {
213                 error = xfs_dec_frextents(mp,  !! 216                 error = xfs_mod_frextents(mp, -((int64_t)rtextents));
214                 if (error) {                      217                 if (error) {
215                         error = -ENOSPC;          218                         error = -ENOSPC;
216                         goto undo_log;            219                         goto undo_log;
217                 }                                 220                 }
218                 tp->t_rtx_res += rtextents;       221                 tp->t_rtx_res += rtextents;
219         }                                         222         }
220                                                   223 
221         return 0;                                 224         return 0;
222                                                   225 
223         /*                                        226         /*
224          * Error cases jump to one of these la    227          * Error cases jump to one of these labels to undo any
225          * reservations which have already bee    228          * reservations which have already been performed.
226          */                                       229          */
227 undo_log:                                         230 undo_log:
228         if (resp->tr_logres > 0) {                231         if (resp->tr_logres > 0) {
229                 xfs_log_ticket_ungrant(mp->m_l    232                 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
230                 tp->t_ticket = NULL;              233                 tp->t_ticket = NULL;
231                 tp->t_log_res = 0;                234                 tp->t_log_res = 0;
232                 tp->t_flags &= ~XFS_TRANS_PERM    235                 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
233         }                                         236         }
234                                                   237 
235 undo_blocks:                                      238 undo_blocks:
236         if (blocks > 0) {                         239         if (blocks > 0) {
237                 xfs_add_fdblocks(mp, blocks);  !! 240                 xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd);
238                 tp->t_blk_res = 0;                241                 tp->t_blk_res = 0;
239         }                                         242         }
                                                   >> 243 
                                                   >> 244         current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
                                                   >> 245 
240         return error;                             246         return error;
241 }                                                 247 }
242                                                   248 
243 int                                               249 int
244 xfs_trans_alloc(                                  250 xfs_trans_alloc(
245         struct xfs_mount        *mp,              251         struct xfs_mount        *mp,
246         struct xfs_trans_res    *resp,            252         struct xfs_trans_res    *resp,
247         uint                    blocks,           253         uint                    blocks,
248         uint                    rtextents,        254         uint                    rtextents,
249         uint                    flags,            255         uint                    flags,
250         struct xfs_trans        **tpp)            256         struct xfs_trans        **tpp)
251 {                                                 257 {
252         struct xfs_trans        *tp;              258         struct xfs_trans        *tp;
253         bool                    want_retry = t << 
254         int                     error;            259         int                     error;
255                                                   260 
256         /*                                        261         /*
257          * Allocate the handle before we do ou    262          * Allocate the handle before we do our freeze accounting and setting up
258          * GFP_NOFS allocation context so that    263          * GFP_NOFS allocation context so that we avoid lockdep false positives
259          * by doing GFP_KERNEL allocations ins    264          * by doing GFP_KERNEL allocations inside sb_start_intwrite().
260          */                                       265          */
261 retry:                                         !! 266         tp = kmem_zone_zalloc(xfs_trans_zone, 0);
262         tp = kmem_cache_zalloc(xfs_trans_cache << 
263         if (!(flags & XFS_TRANS_NO_WRITECOUNT)    267         if (!(flags & XFS_TRANS_NO_WRITECOUNT))
264                 sb_start_intwrite(mp->m_super)    268                 sb_start_intwrite(mp->m_super);
265         xfs_trans_set_context(tp);             << 
266                                                   269 
267         /*                                        270         /*
268          * Zero-reservation ("empty") transact    271          * Zero-reservation ("empty") transactions can't modify anything, so
269          * they're allowed to run while we're     272          * they're allowed to run while we're frozen.
270          */                                       273          */
271         WARN_ON(resp->tr_logres > 0 &&            274         WARN_ON(resp->tr_logres > 0 &&
272                 mp->m_super->s_writers.frozen     275                 mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
273         ASSERT(!(flags & XFS_TRANS_RES_FDBLKS)    276         ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) ||
274                xfs_has_lazysbcount(mp));       !! 277                xfs_sb_version_haslazysbcount(&mp->m_sb));
275                                                   278 
276         tp->t_magic = XFS_TRANS_HEADER_MAGIC;     279         tp->t_magic = XFS_TRANS_HEADER_MAGIC;
277         tp->t_flags = flags;                      280         tp->t_flags = flags;
278         tp->t_mountp = mp;                        281         tp->t_mountp = mp;
279         INIT_LIST_HEAD(&tp->t_items);             282         INIT_LIST_HEAD(&tp->t_items);
280         INIT_LIST_HEAD(&tp->t_busy);              283         INIT_LIST_HEAD(&tp->t_busy);
281         INIT_LIST_HEAD(&tp->t_dfops);             284         INIT_LIST_HEAD(&tp->t_dfops);
282         tp->t_highest_agno = NULLAGNUMBER;     !! 285         tp->t_firstblock = NULLFSBLOCK;
283                                                   286 
284         error = xfs_trans_reserve(tp, resp, bl    287         error = xfs_trans_reserve(tp, resp, blocks, rtextents);
285         if (error == -ENOSPC && want_retry) {  << 
286                 xfs_trans_cancel(tp);          << 
287                                                << 
288                 /*                             << 
289                  * We weren't able to reserve  << 
290                  * Flush the other speculative << 
291                  * Do not perform a synchronou << 
292                  * other locks.                << 
293                  */                            << 
294                 error = xfs_blockgc_flush_all( << 
295                 if (error)                     << 
296                         return error;          << 
297                 want_retry = false;            << 
298                 goto retry;                    << 
299         }                                      << 
300         if (error) {                              288         if (error) {
301                 xfs_trans_cancel(tp);             289                 xfs_trans_cancel(tp);
302                 return error;                     290                 return error;
303         }                                         291         }
304                                                   292 
305         trace_xfs_trans_alloc(tp, _RET_IP_);      293         trace_xfs_trans_alloc(tp, _RET_IP_);
306                                                   294 
307         *tpp = tp;                                295         *tpp = tp;
308         return 0;                                 296         return 0;
309 }                                                 297 }
310                                                   298 
311 /*                                                299 /*
312  * Create an empty transaction with no reserva    300  * Create an empty transaction with no reservation.  This is a defensive
313  * mechanism for routines that query metadata     301  * mechanism for routines that query metadata without actually modifying them --
314  * if the metadata being queried is somehow cr    302  * if the metadata being queried is somehow cross-linked (think a btree block
315  * pointer that points higher in the tree), we    303  * pointer that points higher in the tree), we risk deadlock.  However, blocks
316  * grabbed as part of a transaction can be re-    304  * grabbed as part of a transaction can be re-grabbed.  The verifiers will
317  * notice the corrupt block and the operation     305  * notice the corrupt block and the operation will fail back to userspace
318  * without deadlocking.                           306  * without deadlocking.
319  *                                                307  *
320  * Note the zero-length reservation; this tran    308  * Note the zero-length reservation; this transaction MUST be cancelled without
321  * any dirty data.                                309  * any dirty data.
322  *                                                310  *
323  * Callers should obtain freeze protection to     311  * Callers should obtain freeze protection to avoid a conflict with fs freezing
324  * where we can be grabbing buffers at the sam    312  * where we can be grabbing buffers at the same time that freeze is trying to
325  * drain the buffer LRU list.                     313  * drain the buffer LRU list.
326  */                                               314  */
327 int                                               315 int
328 xfs_trans_alloc_empty(                            316 xfs_trans_alloc_empty(
329         struct xfs_mount                *mp,      317         struct xfs_mount                *mp,
330         struct xfs_trans                **tpp)    318         struct xfs_trans                **tpp)
331 {                                                 319 {
332         struct xfs_trans_res            resv =    320         struct xfs_trans_res            resv = {0};
333                                                   321 
334         return xfs_trans_alloc(mp, &resv, 0, 0    322         return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
335 }                                                 323 }
336                                                   324 
337 /*                                                325 /*
338  * Record the indicated change to the given fi    326  * Record the indicated change to the given field for application
339  * to the file system's superblock when the tr    327  * to the file system's superblock when the transaction commits.
340  * For now, just store the change in the trans    328  * For now, just store the change in the transaction structure.
341  *                                                329  *
342  * Mark the transaction structure to indicate     330  * Mark the transaction structure to indicate that the superblock
343  * needs to be updated before committing.         331  * needs to be updated before committing.
344  *                                                332  *
345  * Because we may not be keeping track of allo    333  * Because we may not be keeping track of allocated/free inodes and
346  * used filesystem blocks in the superblock, w    334  * used filesystem blocks in the superblock, we do not mark the
347  * superblock dirty in this transaction if we     335  * superblock dirty in this transaction if we modify these fields.
348  * We still need to update the transaction del    336  * We still need to update the transaction deltas so that they get
349  * applied to the incore superblock, but we do    337  * applied to the incore superblock, but we don't want them to
350  * cause the superblock to get locked and logg    338  * cause the superblock to get locked and logged if these are the
351  * only fields in the superblock that the tran    339  * only fields in the superblock that the transaction modifies.
352  */                                               340  */
353 void                                              341 void
354 xfs_trans_mod_sb(                                 342 xfs_trans_mod_sb(
355         xfs_trans_t     *tp,                      343         xfs_trans_t     *tp,
356         uint            field,                    344         uint            field,
357         int64_t         delta)                    345         int64_t         delta)
358 {                                                 346 {
359         uint32_t        flags = (XFS_TRANS_DIR    347         uint32_t        flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
360         xfs_mount_t     *mp = tp->t_mountp;       348         xfs_mount_t     *mp = tp->t_mountp;
361                                                   349 
362         switch (field) {                          350         switch (field) {
363         case XFS_TRANS_SB_ICOUNT:                 351         case XFS_TRANS_SB_ICOUNT:
364                 tp->t_icount_delta += delta;      352                 tp->t_icount_delta += delta;
365                 if (xfs_has_lazysbcount(mp))   !! 353                 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
366                         flags &= ~XFS_TRANS_SB    354                         flags &= ~XFS_TRANS_SB_DIRTY;
367                 break;                            355                 break;
368         case XFS_TRANS_SB_IFREE:                  356         case XFS_TRANS_SB_IFREE:
369                 tp->t_ifree_delta += delta;       357                 tp->t_ifree_delta += delta;
370                 if (xfs_has_lazysbcount(mp))   !! 358                 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
371                         flags &= ~XFS_TRANS_SB    359                         flags &= ~XFS_TRANS_SB_DIRTY;
372                 break;                            360                 break;
373         case XFS_TRANS_SB_FDBLOCKS:               361         case XFS_TRANS_SB_FDBLOCKS:
374                 /*                                362                 /*
375                  * Track the number of blocks     363                  * Track the number of blocks allocated in the transaction.
376                  * Make sure it does not excee    364                  * Make sure it does not exceed the number reserved. If so,
377                  * shutdown as this can lead t    365                  * shutdown as this can lead to accounting inconsistency.
378                  */                               366                  */
379                 if (delta < 0) {                  367                 if (delta < 0) {
380                         tp->t_blk_res_used +=     368                         tp->t_blk_res_used += (uint)-delta;
381                         if (tp->t_blk_res_used    369                         if (tp->t_blk_res_used > tp->t_blk_res)
382                                 xfs_force_shut    370                                 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
383                 } else if (delta > 0 && (tp->t    371                 } else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) {
384                         int64_t blkres_delta;     372                         int64_t blkres_delta;
385                                                   373 
386                         /*                        374                         /*
387                          * Return freed blocks    375                          * Return freed blocks directly to the reservation
388                          * instead of the glob    376                          * instead of the global pool, being careful not to
389                          * overflow the trans     377                          * overflow the trans counter. This is used to preserve
390                          * reservation across     378                          * reservation across chains of transaction rolls that
391                          * repeatedly free and    379                          * repeatedly free and allocate blocks.
392                          */                       380                          */
393                         blkres_delta = min_t(i    381                         blkres_delta = min_t(int64_t, delta,
394                                              U    382                                              UINT_MAX - tp->t_blk_res);
395                         tp->t_blk_res += blkre    383                         tp->t_blk_res += blkres_delta;
396                         delta -= blkres_delta;    384                         delta -= blkres_delta;
397                 }                                 385                 }
398                 tp->t_fdblocks_delta += delta;    386                 tp->t_fdblocks_delta += delta;
399                 if (xfs_has_lazysbcount(mp))   !! 387                 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
400                         flags &= ~XFS_TRANS_SB    388                         flags &= ~XFS_TRANS_SB_DIRTY;
401                 break;                            389                 break;
402         case XFS_TRANS_SB_RES_FDBLOCKS:           390         case XFS_TRANS_SB_RES_FDBLOCKS:
403                 /*                                391                 /*
404                  * The allocation has already     392                  * The allocation has already been applied to the
405                  * in-core superblock's counte    393                  * in-core superblock's counter.  This should only
406                  * be applied to the on-disk s    394                  * be applied to the on-disk superblock.
407                  */                               395                  */
408                 tp->t_res_fdblocks_delta += de    396                 tp->t_res_fdblocks_delta += delta;
409                 if (xfs_has_lazysbcount(mp))   !! 397                 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
410                         flags &= ~XFS_TRANS_SB    398                         flags &= ~XFS_TRANS_SB_DIRTY;
411                 break;                            399                 break;
412         case XFS_TRANS_SB_FREXTENTS:              400         case XFS_TRANS_SB_FREXTENTS:
413                 /*                                401                 /*
414                  * Track the number of blocks     402                  * Track the number of blocks allocated in the
415                  * transaction.  Make sure it     403                  * transaction.  Make sure it does not exceed the
416                  * number reserved.               404                  * number reserved.
417                  */                               405                  */
418                 if (delta < 0) {                  406                 if (delta < 0) {
419                         tp->t_rtx_res_used +=     407                         tp->t_rtx_res_used += (uint)-delta;
420                         ASSERT(tp->t_rtx_res_u    408                         ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
421                 }                                 409                 }
422                 tp->t_frextents_delta += delta    410                 tp->t_frextents_delta += delta;
423                 break;                            411                 break;
424         case XFS_TRANS_SB_RES_FREXTENTS:          412         case XFS_TRANS_SB_RES_FREXTENTS:
425                 /*                                413                 /*
426                  * The allocation has already     414                  * The allocation has already been applied to the
427                  * in-core superblock's counte    415                  * in-core superblock's counter.  This should only
428                  * be applied to the on-disk s    416                  * be applied to the on-disk superblock.
429                  */                               417                  */
430                 ASSERT(delta < 0);                418                 ASSERT(delta < 0);
431                 tp->t_res_frextents_delta += d    419                 tp->t_res_frextents_delta += delta;
432                 break;                            420                 break;
433         case XFS_TRANS_SB_DBLOCKS:                421         case XFS_TRANS_SB_DBLOCKS:
                                                   >> 422                 ASSERT(delta > 0);
434                 tp->t_dblocks_delta += delta;     423                 tp->t_dblocks_delta += delta;
435                 break;                            424                 break;
436         case XFS_TRANS_SB_AGCOUNT:                425         case XFS_TRANS_SB_AGCOUNT:
437                 ASSERT(delta > 0);                426                 ASSERT(delta > 0);
438                 tp->t_agcount_delta += delta;     427                 tp->t_agcount_delta += delta;
439                 break;                            428                 break;
440         case XFS_TRANS_SB_IMAXPCT:                429         case XFS_TRANS_SB_IMAXPCT:
441                 tp->t_imaxpct_delta += delta;     430                 tp->t_imaxpct_delta += delta;
442                 break;                            431                 break;
443         case XFS_TRANS_SB_REXTSIZE:               432         case XFS_TRANS_SB_REXTSIZE:
444                 tp->t_rextsize_delta += delta;    433                 tp->t_rextsize_delta += delta;
445                 break;                            434                 break;
446         case XFS_TRANS_SB_RBMBLOCKS:              435         case XFS_TRANS_SB_RBMBLOCKS:
447                 tp->t_rbmblocks_delta += delta    436                 tp->t_rbmblocks_delta += delta;
448                 break;                            437                 break;
449         case XFS_TRANS_SB_RBLOCKS:                438         case XFS_TRANS_SB_RBLOCKS:
450                 tp->t_rblocks_delta += delta;     439                 tp->t_rblocks_delta += delta;
451                 break;                            440                 break;
452         case XFS_TRANS_SB_REXTENTS:               441         case XFS_TRANS_SB_REXTENTS:
453                 tp->t_rextents_delta += delta;    442                 tp->t_rextents_delta += delta;
454                 break;                            443                 break;
455         case XFS_TRANS_SB_REXTSLOG:               444         case XFS_TRANS_SB_REXTSLOG:
456                 tp->t_rextslog_delta += delta;    445                 tp->t_rextslog_delta += delta;
457                 break;                            446                 break;
458         default:                                  447         default:
459                 ASSERT(0);                        448                 ASSERT(0);
460                 return;                           449                 return;
461         }                                         450         }
462                                                   451 
463         tp->t_flags |= flags;                     452         tp->t_flags |= flags;
464 }                                                 453 }
465                                                   454 
466 /*                                                455 /*
467  * xfs_trans_apply_sb_deltas() is called from     456  * xfs_trans_apply_sb_deltas() is called from the commit code
468  * to bring the superblock buffer into the cur    457  * to bring the superblock buffer into the current transaction
469  * and modify it as requested by earlier calls    458  * and modify it as requested by earlier calls to xfs_trans_mod_sb().
470  *                                                459  *
471  * For now we just look at each field allowed     460  * For now we just look at each field allowed to change and change
472  * it if necessary.                               461  * it if necessary.
473  */                                               462  */
474 STATIC void                                       463 STATIC void
475 xfs_trans_apply_sb_deltas(                        464 xfs_trans_apply_sb_deltas(
476         xfs_trans_t     *tp)                      465         xfs_trans_t     *tp)
477 {                                                 466 {
478         struct xfs_dsb  *sbp;                  !! 467         xfs_dsb_t       *sbp;
479         struct xfs_buf  *bp;                   !! 468         xfs_buf_t       *bp;
480         int             whole = 0;                469         int             whole = 0;
481                                                   470 
482         bp = xfs_trans_getsb(tp);              !! 471         bp = xfs_trans_getsb(tp, tp->t_mountp);
483         sbp = bp->b_addr;                         472         sbp = bp->b_addr;
484                                                   473 
485         /*                                        474         /*
                                                   >> 475          * Check that superblock mods match the mods made to AGF counters.
                                                   >> 476          */
                                                   >> 477         ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
                                                   >> 478                (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
                                                   >> 479                 tp->t_ag_btree_delta));
                                                   >> 480 
                                                   >> 481         /*
486          * Only update the superblock counters    482          * Only update the superblock counters if we are logging them
487          */                                       483          */
488         if (!xfs_has_lazysbcount((tp->t_mountp !! 484         if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
489                 if (tp->t_icount_delta)           485                 if (tp->t_icount_delta)
490                         be64_add_cpu(&sbp->sb_    486                         be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
491                 if (tp->t_ifree_delta)            487                 if (tp->t_ifree_delta)
492                         be64_add_cpu(&sbp->sb_    488                         be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
493                 if (tp->t_fdblocks_delta)         489                 if (tp->t_fdblocks_delta)
494                         be64_add_cpu(&sbp->sb_    490                         be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
495                 if (tp->t_res_fdblocks_delta)     491                 if (tp->t_res_fdblocks_delta)
496                         be64_add_cpu(&sbp->sb_    492                         be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
497         }                                         493         }
498                                                   494 
499         /*                                     !! 495         if (tp->t_frextents_delta)
500          * Updating frextents requires careful !! 496                 be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
501          * behave like the lazysb counters bec !! 497         if (tp->t_res_frextents_delta)
502          * recovery in older kenels to recompu !! 498                 be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
503          * This means that the ondisk frextent << 
504          * rtbitmap.                           << 
505          *                                     << 
506          * Therefore, log the frextents change << 
507          * update the incore superblock so tha << 
508          * write the correct value ondisk.     << 
509          *                                     << 
510          * Don't touch m_frextents because it  << 
511          * and those are handled by the unrese << 
512          */                                    << 
513         if (tp->t_frextents_delta || tp->t_res << 
514                 struct xfs_mount        *mp =  << 
515                 int64_t                 rtxdel << 
516                                                << 
517                 rtxdelta = tp->t_frextents_del << 
518                                                << 
519                 spin_lock(&mp->m_sb_lock);     << 
520                 be64_add_cpu(&sbp->sb_frextent << 
521                 mp->m_sb.sb_frextents += rtxde << 
522                 spin_unlock(&mp->m_sb_lock);   << 
523         }                                      << 
524                                                   499 
525         if (tp->t_dblocks_delta) {                500         if (tp->t_dblocks_delta) {
526                 be64_add_cpu(&sbp->sb_dblocks,    501                 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
527                 whole = 1;                        502                 whole = 1;
528         }                                         503         }
529         if (tp->t_agcount_delta) {                504         if (tp->t_agcount_delta) {
530                 be32_add_cpu(&sbp->sb_agcount,    505                 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
531                 whole = 1;                        506                 whole = 1;
532         }                                         507         }
533         if (tp->t_imaxpct_delta) {                508         if (tp->t_imaxpct_delta) {
534                 sbp->sb_imax_pct += tp->t_imax    509                 sbp->sb_imax_pct += tp->t_imaxpct_delta;
535                 whole = 1;                        510                 whole = 1;
536         }                                         511         }
537         if (tp->t_rextsize_delta) {               512         if (tp->t_rextsize_delta) {
538                 be32_add_cpu(&sbp->sb_rextsize    513                 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
539                 whole = 1;                        514                 whole = 1;
540         }                                         515         }
541         if (tp->t_rbmblocks_delta) {              516         if (tp->t_rbmblocks_delta) {
542                 be32_add_cpu(&sbp->sb_rbmblock    517                 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
543                 whole = 1;                        518                 whole = 1;
544         }                                         519         }
545         if (tp->t_rblocks_delta) {                520         if (tp->t_rblocks_delta) {
546                 be64_add_cpu(&sbp->sb_rblocks,    521                 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
547                 whole = 1;                        522                 whole = 1;
548         }                                         523         }
549         if (tp->t_rextents_delta) {               524         if (tp->t_rextents_delta) {
550                 be64_add_cpu(&sbp->sb_rextents    525                 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
551                 whole = 1;                        526                 whole = 1;
552         }                                         527         }
553         if (tp->t_rextslog_delta) {               528         if (tp->t_rextslog_delta) {
554                 sbp->sb_rextslog += tp->t_rext    529                 sbp->sb_rextslog += tp->t_rextslog_delta;
555                 whole = 1;                        530                 whole = 1;
556         }                                         531         }
557                                                   532 
558         xfs_trans_buf_set_type(tp, bp, XFS_BLF    533         xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
559         if (whole)                                534         if (whole)
560                 /*                                535                 /*
561                  * Log the whole thing, the fi    536                  * Log the whole thing, the fields are noncontiguous.
562                  */                               537                  */
563                 xfs_trans_log_buf(tp, bp, 0, s !! 538                 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
564         else                                      539         else
565                 /*                                540                 /*
566                  * Since all the modifiable fi    541                  * Since all the modifiable fields are contiguous, we
567                  * can get away with this.        542                  * can get away with this.
568                  */                               543                  */
569                 xfs_trans_log_buf(tp, bp, offs !! 544                 xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
570                                   offsetof(str !! 545                                   offsetof(xfs_dsb_t, sb_frextents) +
571                                   sizeof(sbp->    546                                   sizeof(sbp->sb_frextents) - 1);
572 }                                                 547 }
573                                                   548 
574 /*                                                549 /*
575  * xfs_trans_unreserve_and_mod_sb() is called     550  * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and
576  * apply superblock counter changes to the in-    551  * apply superblock counter changes to the in-core superblock.  The
577  * t_res_fdblocks_delta and t_res_frextents_de    552  * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
578  * applied to the in-core superblock.  The ide    553  * applied to the in-core superblock.  The idea is that that has already been
579  * done.                                          554  * done.
580  *                                                555  *
581  * If we are not logging superblock counters,     556  * If we are not logging superblock counters, then the inode allocated/free and
582  * used block counts are not updated in the on    557  * used block counts are not updated in the on disk superblock. In this case,
583  * XFS_TRANS_SB_DIRTY will not be set when the    558  * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
584  * still need to update the incore superblock     559  * still need to update the incore superblock with the changes.
585  *                                                560  *
586  * Deltas for the inode count are +/-64, hence    561  * Deltas for the inode count are +/-64, hence we use a large batch size of 128
587  * so we don't need to take the counter lock o    562  * so we don't need to take the counter lock on every update.
588  */                                               563  */
589 #define XFS_ICOUNT_BATCH        128               564 #define XFS_ICOUNT_BATCH        128
590                                                   565 
591 void                                              566 void
592 xfs_trans_unreserve_and_mod_sb(                   567 xfs_trans_unreserve_and_mod_sb(
593         struct xfs_trans        *tp)              568         struct xfs_trans        *tp)
594 {                                                 569 {
595         struct xfs_mount        *mp = tp->t_mo    570         struct xfs_mount        *mp = tp->t_mountp;
596         int64_t                 blkdelta = tp- !! 571         bool                    rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
597         int64_t                 rtxdelta = tp- !! 572         int64_t                 blkdelta = 0;
                                                   >> 573         int64_t                 rtxdelta = 0;
598         int64_t                 idelta = 0;       574         int64_t                 idelta = 0;
599         int64_t                 ifreedelta = 0    575         int64_t                 ifreedelta = 0;
                                                   >> 576         int                     error;
600                                                   577 
601         /*                                     !! 578         /* calculate deltas */
602          * Calculate the deltas.               !! 579         if (tp->t_blk_res > 0)
603          *                                     !! 580                 blkdelta = tp->t_blk_res;
604          * t_fdblocks_delta and t_frextents_de !! 581         if ((tp->t_fdblocks_delta != 0) &&
605          *                                     !! 582             (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
606          *  - positive values indicate blocks  !! 583              (tp->t_flags & XFS_TRANS_SB_DIRTY)))
607          *  - negative values indicate blocks  << 
608          *                                     << 
609          * Negative values can only happen if  << 
610          * reservation that covers the allocat << 
611          * that the calculated delta values mu << 
612          * can only put back previous allocate << 
613          */                                    << 
614         ASSERT(tp->t_blk_res || tp->t_fdblocks << 
615         if (xfs_has_lazysbcount(mp) || (tp->t_ << 
616                 blkdelta += tp->t_fdblocks_del    584                 blkdelta += tp->t_fdblocks_delta;
617                 ASSERT(blkdelta >= 0);         << 
618         }                                      << 
619                                                   585 
620         ASSERT(tp->t_rtx_res || tp->t_frextent !! 586         if (tp->t_rtx_res > 0)
621         if (tp->t_flags & XFS_TRANS_SB_DIRTY)  !! 587                 rtxdelta = tp->t_rtx_res;
                                                   >> 588         if ((tp->t_frextents_delta != 0) &&
                                                   >> 589             (tp->t_flags & XFS_TRANS_SB_DIRTY))
622                 rtxdelta += tp->t_frextents_de    590                 rtxdelta += tp->t_frextents_delta;
623                 ASSERT(rtxdelta >= 0);         << 
624         }                                      << 
625                                                   591 
626         if (xfs_has_lazysbcount(mp) || (tp->t_ !! 592         if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
                                                   >> 593              (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
627                 idelta = tp->t_icount_delta;      594                 idelta = tp->t_icount_delta;
628                 ifreedelta = tp->t_ifree_delta    595                 ifreedelta = tp->t_ifree_delta;
629         }                                         596         }
630                                                   597 
631         /* apply the per-cpu counters */          598         /* apply the per-cpu counters */
632         if (blkdelta)                          !! 599         if (blkdelta) {
633                 xfs_add_fdblocks(mp, blkdelta) !! 600                 error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
                                                   >> 601                 ASSERT(!error);
                                                   >> 602         }
634                                                   603 
635         if (idelta)                            !! 604         if (idelta) {
636                 percpu_counter_add_batch(&mp->    605                 percpu_counter_add_batch(&mp->m_icount, idelta,
637                                          XFS_I    606                                          XFS_ICOUNT_BATCH);
                                                   >> 607                 if (idelta < 0)
                                                   >> 608                         ASSERT(__percpu_counter_compare(&mp->m_icount, 0,
                                                   >> 609                                                         XFS_ICOUNT_BATCH) >= 0);
                                                   >> 610         }
638                                                   611 
639         if (ifreedelta)                        !! 612         if (ifreedelta) {
640                 percpu_counter_add(&mp->m_ifre    613                 percpu_counter_add(&mp->m_ifree, ifreedelta);
                                                   >> 614                 if (ifreedelta < 0)
                                                   >> 615                         ASSERT(percpu_counter_compare(&mp->m_ifree, 0) >= 0);
                                                   >> 616         }
641                                                   617 
642         if (rtxdelta)                          !! 618         if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
643                 xfs_add_frextents(mp, rtxdelta << 
644                                                << 
645         if (!(tp->t_flags & XFS_TRANS_SB_DIRTY << 
646                 return;                           619                 return;
647                                                   620 
648         /* apply remaining deltas */              621         /* apply remaining deltas */
649         spin_lock(&mp->m_sb_lock);                622         spin_lock(&mp->m_sb_lock);
650         mp->m_sb.sb_fdblocks += tp->t_fdblocks !! 623         mp->m_sb.sb_frextents += rtxdelta;
651         mp->m_sb.sb_icount += idelta;          << 
652         mp->m_sb.sb_ifree += ifreedelta;       << 
653         /*                                     << 
654          * Do not touch sb_frextents here beca << 
655          * reservation.  sb_frextents is not p << 
656          * must be consistent with the ondisk  << 
657          * incore reservations.                << 
658          */                                    << 
659         mp->m_sb.sb_dblocks += tp->t_dblocks_d    624         mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
660         mp->m_sb.sb_agcount += tp->t_agcount_d    625         mp->m_sb.sb_agcount += tp->t_agcount_delta;
661         mp->m_sb.sb_imax_pct += tp->t_imaxpct_    626         mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
662         mp->m_sb.sb_rextsize += tp->t_rextsize    627         mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
663         if (tp->t_rextsize_delta) {            << 
664                 mp->m_rtxblklog = log2_if_powe << 
665                 mp->m_rtxblkmask = mask64_if_p << 
666         }                                      << 
667         mp->m_sb.sb_rbmblocks += tp->t_rbmbloc    628         mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
668         mp->m_sb.sb_rblocks += tp->t_rblocks_d    629         mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
669         mp->m_sb.sb_rextents += tp->t_rextents    630         mp->m_sb.sb_rextents += tp->t_rextents_delta;
670         mp->m_sb.sb_rextslog += tp->t_rextslog    631         mp->m_sb.sb_rextslog += tp->t_rextslog_delta;
671         spin_unlock(&mp->m_sb_lock);              632         spin_unlock(&mp->m_sb_lock);
672                                                   633 
673         /*                                        634         /*
674          * Debug checks outside of the spinloc    635          * Debug checks outside of the spinlock so they don't lock up the
675          * machine if they fail.                  636          * machine if they fail.
676          */                                       637          */
677         ASSERT(mp->m_sb.sb_imax_pct >= 0);        638         ASSERT(mp->m_sb.sb_imax_pct >= 0);
678         ASSERT(mp->m_sb.sb_rextslog >= 0);        639         ASSERT(mp->m_sb.sb_rextslog >= 0);
                                                   >> 640         return;
679 }                                                 641 }
680                                                   642 
681 /* Add the given log item to the transaction's    643 /* Add the given log item to the transaction's list of log items. */
682 void                                              644 void
683 xfs_trans_add_item(                               645 xfs_trans_add_item(
684         struct xfs_trans        *tp,              646         struct xfs_trans        *tp,
685         struct xfs_log_item     *lip)             647         struct xfs_log_item     *lip)
686 {                                                 648 {
687         ASSERT(lip->li_log == tp->t_mountp->m_ !! 649         ASSERT(lip->li_mountp == tp->t_mountp);
688         ASSERT(lip->li_ailp == tp->t_mountp->m    650         ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
689         ASSERT(list_empty(&lip->li_trans));       651         ASSERT(list_empty(&lip->li_trans));
690         ASSERT(!test_bit(XFS_LI_DIRTY, &lip->l    652         ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags));
691                                                   653 
692         list_add_tail(&lip->li_trans, &tp->t_i    654         list_add_tail(&lip->li_trans, &tp->t_items);
693         trace_xfs_trans_add_item(tp, _RET_IP_)    655         trace_xfs_trans_add_item(tp, _RET_IP_);
694 }                                                 656 }
695                                                   657 
696 /*                                                658 /*
697  * Unlink the log item from the transaction. t    659  * Unlink the log item from the transaction. the log item is no longer
698  * considered dirty in this transaction, as th    660  * considered dirty in this transaction, as the linked transaction has
699  * finished, either by abort or commit complet    661  * finished, either by abort or commit completion.
700  */                                               662  */
701 void                                              663 void
702 xfs_trans_del_item(                               664 xfs_trans_del_item(
703         struct xfs_log_item     *lip)             665         struct xfs_log_item     *lip)
704 {                                                 666 {
705         clear_bit(XFS_LI_DIRTY, &lip->li_flags    667         clear_bit(XFS_LI_DIRTY, &lip->li_flags);
706         list_del_init(&lip->li_trans);            668         list_del_init(&lip->li_trans);
707 }                                                 669 }
708                                                   670 
709 /* Detach and unlock all of the items in a tra    671 /* Detach and unlock all of the items in a transaction */
710 static void                                       672 static void
711 xfs_trans_free_items(                             673 xfs_trans_free_items(
712         struct xfs_trans        *tp,              674         struct xfs_trans        *tp,
713         bool                    abort)            675         bool                    abort)
714 {                                                 676 {
715         struct xfs_log_item     *lip, *next;      677         struct xfs_log_item     *lip, *next;
716                                                   678 
717         trace_xfs_trans_free_items(tp, _RET_IP    679         trace_xfs_trans_free_items(tp, _RET_IP_);
718                                                   680 
719         list_for_each_entry_safe(lip, next, &t    681         list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
720                 xfs_trans_del_item(lip);          682                 xfs_trans_del_item(lip);
721                 if (abort)                        683                 if (abort)
722                         set_bit(XFS_LI_ABORTED    684                         set_bit(XFS_LI_ABORTED, &lip->li_flags);
723                 if (lip->li_ops->iop_release)     685                 if (lip->li_ops->iop_release)
724                         lip->li_ops->iop_relea    686                         lip->li_ops->iop_release(lip);
725         }                                         687         }
726 }                                                 688 }
727                                                   689 
728 /*                                             !! 690 static inline void
729  * Sort transaction items prior to running pre !! 691 xfs_log_item_batch_insert(
730  * attempt to order the items such that they w !! 692         struct xfs_ail          *ailp,
731  * order. Items that have no sort function are !! 693         struct xfs_ail_cursor   *cur,
732  * and so are locked last.                     !! 694         struct xfs_log_item     **log_items,
733  *                                             !! 695         int                     nr_items,
734  * This may need refinement as different types !! 696         xfs_lsn_t               commit_lsn)
735  *                                             !! 697 {
736  * Function is more complex than it needs to b !! 698         int     i;
737  * values and the function only returns 32 bit !! 699 
738  */                                            !! 700         spin_lock(&ailp->ail_lock);
739 static int                                     !! 701         /* xfs_trans_ail_update_bulk drops ailp->ail_lock */
740 xfs_trans_precommit_sort(                      !! 702         xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
741         void                    *unused_arg,   !! 703 
742         const struct list_head  *a,            !! 704         for (i = 0; i < nr_items; i++) {
743         const struct list_head  *b)            !! 705                 struct xfs_log_item *lip = log_items[i];
744 {                                              !! 706 
745         struct xfs_log_item     *lia = contain !! 707                 if (lip->li_ops->iop_unpin)
746                                         struct !! 708                         lip->li_ops->iop_unpin(lip, 0);
747         struct xfs_log_item     *lib = contain !! 709         }
748                                         struct << 
749         int64_t                 diff;          << 
750                                                << 
751         /*                                     << 
752          * If both items are non-sortable, lea << 
753          * sortable, move the non-sortable ite << 
754          */                                    << 
755         if (!lia->li_ops->iop_sort && !lib->li << 
756                 return 0;                      << 
757         if (!lia->li_ops->iop_sort)            << 
758                 return 1;                      << 
759         if (!lib->li_ops->iop_sort)            << 
760                 return -1;                     << 
761                                                << 
762         diff = lia->li_ops->iop_sort(lia) - li << 
763         if (diff < 0)                          << 
764                 return -1;                     << 
765         if (diff > 0)                          << 
766                 return 1;                      << 
767         return 0;                              << 
768 }                                                 710 }
769                                                   711 
770 /*                                                712 /*
771  * Run transaction precommit functions.        !! 713  * Bulk operation version of xfs_trans_committed that takes a log vector of
772  *                                             !! 714  * items to insert into the AIL. This uses bulk AIL insertion techniques to
773  * If there is an error in any of the callouts !! 715  * minimise lock traffic.
774  * trigger a shutdown to abort the transaction !! 716  *
775  * from errors at this point as the transactio !! 717  * If we are called with the aborted flag set, it is because a log write during
                                                   >> 718  * a CIL checkpoint commit has failed. In this case, all the items in the
                                                   >> 719  * checkpoint have already gone through iop_committed and iop_committing, which
                                                   >> 720  * means that checkpoint commit abort handling is treated exactly the same
                                                   >> 721  * as an iclog write error even though we haven't started any IO yet. Hence in
                                                   >> 722  * this case all we need to do is iop_committed processing, followed by an
                                                   >> 723  * iop_unpin(aborted) call.
                                                   >> 724  *
                                                   >> 725  * The AIL cursor is used to optimise the insert process. If commit_lsn is not
                                                   >> 726  * at the end of the AIL, the insert cursor avoids the need to walk
                                                   >> 727  * the AIL to find the insertion point on every xfs_log_item_batch_insert()
                                                   >> 728  * call. This saves a lot of needless list walking and is a net win, even
                                                   >> 729  * though it slightly increases that amount of AIL lock traffic to set it up
                                                   >> 730  * and tear it down.
776  */                                               731  */
777 static int                                     !! 732 void
778 xfs_trans_run_precommits(                      !! 733 xfs_trans_committed_bulk(
779         struct xfs_trans        *tp)           !! 734         struct xfs_ail          *ailp,
780 {                                              !! 735         struct xfs_log_vec      *log_vector,
781         struct xfs_mount        *mp = tp->t_mo !! 736         xfs_lsn_t               commit_lsn,
782         struct xfs_log_item     *lip, *n;      !! 737         bool                    aborted)
783         int                     error = 0;     !! 738 {
                                                   >> 739 #define LOG_ITEM_BATCH_SIZE     32
                                                   >> 740         struct xfs_log_item     *log_items[LOG_ITEM_BATCH_SIZE];
                                                   >> 741         struct xfs_log_vec      *lv;
                                                   >> 742         struct xfs_ail_cursor   cur;
                                                   >> 743         int                     i = 0;
                                                   >> 744 
                                                   >> 745         spin_lock(&ailp->ail_lock);
                                                   >> 746         xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
                                                   >> 747         spin_unlock(&ailp->ail_lock);
                                                   >> 748 
                                                   >> 749         /* unpin all the log items */
                                                   >> 750         for (lv = log_vector; lv; lv = lv->lv_next ) {
                                                   >> 751                 struct xfs_log_item     *lip = lv->lv_item;
                                                   >> 752                 xfs_lsn_t               item_lsn;
784                                                   753 
785         /*                                     !! 754                 if (aborted)
786          * Sort the item list to avoid ABBA de !! 755                         set_bit(XFS_LI_ABORTED, &lip->li_flags);
787          * running precommit operations that l << 
788          * inode cluster buffers.              << 
789          */                                    << 
790         list_sort(NULL, &tp->t_items, xfs_tran << 
791                                                   756 
792         /*                                     !! 757                 if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
793          * Precommit operations can remove the !! 758                         lip->li_ops->iop_release(lip);
794          * if the log item exists purely to de << 
795          * can be ordered against other operat << 
796          * list_for_each_entry_safe() here.    << 
797          */                                    << 
798         list_for_each_entry_safe(lip, n, &tp-> << 
799                 if (!test_bit(XFS_LI_DIRTY, &l << 
800                         continue;                 759                         continue;
801                 if (lip->li_ops->iop_precommit !! 760                 }
802                         error = lip->li_ops->i !! 761 
803                         if (error)             !! 762                 if (lip->li_ops->iop_committed)
804                                 break;         !! 763                         item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
                                                   >> 764                 else
                                                   >> 765                         item_lsn = commit_lsn;
                                                   >> 766 
                                                   >> 767                 /* item_lsn of -1 means the item needs no further processing */
                                                   >> 768                 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
                                                   >> 769                         continue;
                                                   >> 770 
                                                   >> 771                 /*
                                                   >> 772                  * if we are aborting the operation, no point in inserting the
                                                   >> 773                  * object into the AIL as we are in a shutdown situation.
                                                   >> 774                  */
                                                   >> 775                 if (aborted) {
                                                   >> 776                         ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount));
                                                   >> 777                         if (lip->li_ops->iop_unpin)
                                                   >> 778                                 lip->li_ops->iop_unpin(lip, 1);
                                                   >> 779                         continue;
                                                   >> 780                 }
                                                   >> 781 
                                                   >> 782                 if (item_lsn != commit_lsn) {
                                                   >> 783 
                                                   >> 784                         /*
                                                   >> 785                          * Not a bulk update option due to unusual item_lsn.
                                                   >> 786                          * Push into AIL immediately, rechecking the lsn once
                                                   >> 787                          * we have the ail lock. Then unpin the item. This does
                                                   >> 788                          * not affect the AIL cursor the bulk insert path is
                                                   >> 789                          * using.
                                                   >> 790                          */
                                                   >> 791                         spin_lock(&ailp->ail_lock);
                                                   >> 792                         if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
                                                   >> 793                                 xfs_trans_ail_update(ailp, lip, item_lsn);
                                                   >> 794                         else
                                                   >> 795                                 spin_unlock(&ailp->ail_lock);
                                                   >> 796                         if (lip->li_ops->iop_unpin)
                                                   >> 797                                 lip->li_ops->iop_unpin(lip, 0);
                                                   >> 798                         continue;
                                                   >> 799                 }
                                                   >> 800 
                                                   >> 801                 /* Item is a candidate for bulk AIL insert.  */
                                                   >> 802                 log_items[i++] = lv->lv_item;
                                                   >> 803                 if (i >= LOG_ITEM_BATCH_SIZE) {
                                                   >> 804                         xfs_log_item_batch_insert(ailp, &cur, log_items,
                                                   >> 805                                         LOG_ITEM_BATCH_SIZE, commit_lsn);
                                                   >> 806                         i = 0;
805                 }                                 807                 }
806         }                                         808         }
807         if (error)                             !! 809 
808                 xfs_force_shutdown(mp, SHUTDOW !! 810         /* make sure we insert the remainder! */
809         return error;                          !! 811         if (i)
                                                   >> 812                 xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
                                                   >> 813 
                                                   >> 814         spin_lock(&ailp->ail_lock);
                                                   >> 815         xfs_trans_ail_cursor_done(&cur);
                                                   >> 816         spin_unlock(&ailp->ail_lock);
810 }                                                 817 }
811                                                   818 
812 /*                                                819 /*
813  * Commit the given transaction to the log.       820  * Commit the given transaction to the log.
814  *                                                821  *
815  * XFS disk error handling mechanism is not ba    822  * XFS disk error handling mechanism is not based on a typical
816  * transaction abort mechanism. Logically afte    823  * transaction abort mechanism. Logically after the filesystem
817  * gets marked 'SHUTDOWN', we can't let any ne    824  * gets marked 'SHUTDOWN', we can't let any new transactions
818  * be durable - ie. committed to disk - becaus    825  * be durable - ie. committed to disk - because some metadata might
819  * be inconsistent. In such cases, this return    826  * be inconsistent. In such cases, this returns an error, and the
820  * caller may assume that all locked objects j    827  * caller may assume that all locked objects joined to the transaction
821  * have already been unlocked as if the commit    828  * have already been unlocked as if the commit had succeeded.
822  * Do not reference the transaction structure     829  * Do not reference the transaction structure after this call.
823  */                                               830  */
824 static int                                        831 static int
825 __xfs_trans_commit(                               832 __xfs_trans_commit(
826         struct xfs_trans        *tp,              833         struct xfs_trans        *tp,
827         bool                    regrant)          834         bool                    regrant)
828 {                                                 835 {
829         struct xfs_mount        *mp = tp->t_mo    836         struct xfs_mount        *mp = tp->t_mountp;
830         struct xlog             *log = mp->m_l !! 837         xfs_lsn_t               commit_lsn = -1;
831         xfs_csn_t               commit_seq = 0 << 
832         int                     error = 0;        838         int                     error = 0;
833         int                     sync = tp->t_f    839         int                     sync = tp->t_flags & XFS_TRANS_SYNC;
834                                                   840 
835         trace_xfs_trans_commit(tp, _RET_IP_);     841         trace_xfs_trans_commit(tp, _RET_IP_);
836                                                   842 
837         error = xfs_trans_run_precommits(tp);  << 
838         if (error) {                           << 
839                 if (tp->t_flags & XFS_TRANS_PE << 
840                         xfs_defer_cancel(tp);  << 
841                 goto out_unreserve;            << 
842         }                                      << 
843                                                << 
844         /*                                        843         /*
845          * Finish deferred items on final comm    844          * Finish deferred items on final commit. Only permanent transactions
846          * should ever have deferred ops.         845          * should ever have deferred ops.
847          */                                       846          */
848         WARN_ON_ONCE(!list_empty(&tp->t_dfops)    847         WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
849                      !(tp->t_flags & XFS_TRANS    848                      !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
850         if (!regrant && (tp->t_flags & XFS_TRA    849         if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
851                 error = xfs_defer_finish_norol    850                 error = xfs_defer_finish_noroll(&tp);
852                 if (error)                        851                 if (error)
853                         goto out_unreserve;       852                         goto out_unreserve;
854                                                << 
855                 /* Run precommits from final t << 
856                 error = xfs_trans_run_precommi << 
857                 if (error)                     << 
858                         goto out_unreserve;    << 
859         }                                         853         }
860                                                   854 
861         /*                                        855         /*
862          * If there is nothing to be logged by    856          * If there is nothing to be logged by the transaction,
863          * then unlock all of the items associ    857          * then unlock all of the items associated with the
864          * transaction and free the transactio    858          * transaction and free the transaction structure.
865          * Also make sure to return any reserv    859          * Also make sure to return any reserved blocks to
866          * the free pool.                         860          * the free pool.
867          */                                       861          */
868         if (!(tp->t_flags & XFS_TRANS_DIRTY))     862         if (!(tp->t_flags & XFS_TRANS_DIRTY))
869                 goto out_unreserve;               863                 goto out_unreserve;
870                                                   864 
871         /*                                     !! 865         if (XFS_FORCED_SHUTDOWN(mp)) {
872          * We must check against log shutdown  << 
873          * items and leave them dirty, inconsi << 
874          * the log is active. This leaves them << 
875          * disk, and that will lead to on-disk << 
876          */                                    << 
877         if (xlog_is_shutdown(log)) {           << 
878                 error = -EIO;                     866                 error = -EIO;
879                 goto out_unreserve;               867                 goto out_unreserve;
880         }                                         868         }
881                                                   869 
882         ASSERT(tp->t_ticket != NULL);             870         ASSERT(tp->t_ticket != NULL);
883                                                   871 
884         /*                                        872         /*
885          * If we need to update the superblock    873          * If we need to update the superblock, then do it now.
886          */                                       874          */
887         if (tp->t_flags & XFS_TRANS_SB_DIRTY)     875         if (tp->t_flags & XFS_TRANS_SB_DIRTY)
888                 xfs_trans_apply_sb_deltas(tp);    876                 xfs_trans_apply_sb_deltas(tp);
889         xfs_trans_apply_dquot_deltas(tp);         877         xfs_trans_apply_dquot_deltas(tp);
890                                                   878 
891         xlog_cil_commit(log, tp, &commit_seq,  !! 879         xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
892                                                   880 
                                                   >> 881         current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
893         xfs_trans_free(tp);                       882         xfs_trans_free(tp);
894                                                   883 
895         /*                                        884         /*
896          * If the transaction needs to be sync    885          * If the transaction needs to be synchronous, then force the
897          * log out now and wait for it.           886          * log out now and wait for it.
898          */                                       887          */
899         if (sync) {                               888         if (sync) {
900                 error = xfs_log_force_seq(mp,  !! 889                 error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
901                 XFS_STATS_INC(mp, xs_trans_syn    890                 XFS_STATS_INC(mp, xs_trans_sync);
902         } else {                                  891         } else {
903                 XFS_STATS_INC(mp, xs_trans_asy    892                 XFS_STATS_INC(mp, xs_trans_async);
904         }                                         893         }
905                                                   894 
906         return error;                             895         return error;
907                                                   896 
908 out_unreserve:                                    897 out_unreserve:
909         xfs_trans_unreserve_and_mod_sb(tp);       898         xfs_trans_unreserve_and_mod_sb(tp);
910                                                   899 
911         /*                                        900         /*
912          * It is indeed possible for the trans    901          * It is indeed possible for the transaction to be not dirty but
913          * the dqinfo portion to be.  All that    902          * the dqinfo portion to be.  All that means is that we have some
914          * (non-persistent) quota reservations    903          * (non-persistent) quota reservations that need to be unreserved.
915          */                                       904          */
916         xfs_trans_unreserve_and_mod_dquots(tp)    905         xfs_trans_unreserve_and_mod_dquots(tp);
917         if (tp->t_ticket) {                       906         if (tp->t_ticket) {
918                 if (regrant && !xlog_is_shutdo !! 907                 if (regrant && !XLOG_FORCED_SHUTDOWN(mp->m_log))
919                         xfs_log_ticket_regrant !! 908                         xfs_log_ticket_regrant(mp->m_log, tp->t_ticket);
920                 else                              909                 else
921                         xfs_log_ticket_ungrant !! 910                         xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
922                 tp->t_ticket = NULL;              911                 tp->t_ticket = NULL;
923         }                                         912         }
                                                   >> 913         current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
924         xfs_trans_free_items(tp, !!error);        914         xfs_trans_free_items(tp, !!error);
925         xfs_trans_free(tp);                       915         xfs_trans_free(tp);
926                                                   916 
927         XFS_STATS_INC(mp, xs_trans_empty);        917         XFS_STATS_INC(mp, xs_trans_empty);
928         return error;                             918         return error;
929 }                                                 919 }
930                                                   920 
931 int                                               921 int
932 xfs_trans_commit(                                 922 xfs_trans_commit(
933         struct xfs_trans        *tp)              923         struct xfs_trans        *tp)
934 {                                                 924 {
935         return __xfs_trans_commit(tp, false);     925         return __xfs_trans_commit(tp, false);
936 }                                                 926 }
937                                                   927 
938 /*                                                928 /*
939  * Unlock all of the transaction's items and f !! 929  * Unlock all of the transaction's items and free the transaction.
940  * transaction is dirty, we must shut down the !! 930  * The transaction must not have modified any of its items, because
941  * way to restore them to their previous state !! 931  * there is no way to restore them to their previous state.
942  *                                             << 
943  * If the transaction has made a log reservati << 
944  * well.                                       << 
945  *                                                932  *
946  * This is a high level function (equivalent t !! 933  * If the transaction has made a log reservation, make sure to release
947  * be called after the transaction has effecti !! 934  * it as well.
948  * being shut down. However, if the mount has  << 
949  * transaction is dirty we will shut the mount << 
950  * guarantees that the log is shut down, too.  << 
951  * careful with shutdown state and dirty items << 
952  * xfs_trans_commit().                         << 
953  */                                               935  */
954 void                                              936 void
955 xfs_trans_cancel(                                 937 xfs_trans_cancel(
956         struct xfs_trans        *tp)              938         struct xfs_trans        *tp)
957 {                                                 939 {
958         struct xfs_mount        *mp = tp->t_mo    940         struct xfs_mount        *mp = tp->t_mountp;
959         struct xlog             *log = mp->m_l << 
960         bool                    dirty = (tp->t    941         bool                    dirty = (tp->t_flags & XFS_TRANS_DIRTY);
961                                                   942 
962         trace_xfs_trans_cancel(tp, _RET_IP_);     943         trace_xfs_trans_cancel(tp, _RET_IP_);
963                                                   944 
964         /*                                     !! 945         if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
965          * It's never valid to cancel a transa << 
966          * because the transaction is effectiv << 
967          * loudly before freeing the in-memory << 
968          * filesystem.                         << 
969          */                                    << 
970         if (!list_empty(&tp->t_dfops)) {       << 
971                 ASSERT(tp->t_flags & XFS_TRANS << 
972                 dirty = true;                  << 
973                 xfs_defer_cancel(tp);             946                 xfs_defer_cancel(tp);
974         }                                      << 
975                                                   947 
976         /*                                        948         /*
977          * See if the caller is relying on us  !! 949          * See if the caller is relying on us to shut down the
978          * only want an error report if there  !! 950          * filesystem.  This happens in paths where we detect
979          * progress, so we only need to check  !! 951          * corruption and decide to give up.
980          * here.                               << 
981          */                                       952          */
982         if (dirty && !xfs_is_shutdown(mp)) {   !! 953         if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
983                 XFS_ERROR_REPORT("xfs_trans_ca    954                 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
984                 xfs_force_shutdown(mp, SHUTDOW    955                 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
985         }                                         956         }
986 #ifdef DEBUG                                      957 #ifdef DEBUG
987         /* Log items need to be consistent unt !! 958         if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
988         if (!dirty && !xlog_is_shutdown(log))  << 
989                 struct xfs_log_item *lip;         959                 struct xfs_log_item *lip;
990                                                   960 
991                 list_for_each_entry(lip, &tp->    961                 list_for_each_entry(lip, &tp->t_items, li_trans)
992                         ASSERT(!xlog_item_is_i !! 962                         ASSERT(!(lip->li_type == XFS_LI_EFD));
993         }                                         963         }
994 #endif                                            964 #endif
995         xfs_trans_unreserve_and_mod_sb(tp);       965         xfs_trans_unreserve_and_mod_sb(tp);
996         xfs_trans_unreserve_and_mod_dquots(tp)    966         xfs_trans_unreserve_and_mod_dquots(tp);
997                                                   967 
998         if (tp->t_ticket) {                       968         if (tp->t_ticket) {
999                 xfs_log_ticket_ungrant(log, tp !! 969                 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
1000                 tp->t_ticket = NULL;             970                 tp->t_ticket = NULL;
1001         }                                        971         }
1002                                                  972 
                                                   >> 973         /* mark this thread as no longer being in a transaction */
                                                   >> 974         current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
                                                   >> 975 
1003         xfs_trans_free_items(tp, dirty);         976         xfs_trans_free_items(tp, dirty);
1004         xfs_trans_free(tp);                      977         xfs_trans_free(tp);
1005 }                                                978 }
1006                                                  979 
1007 /*                                               980 /*
1008  * Roll from one trans in the sequence of PER    981  * Roll from one trans in the sequence of PERMANENT transactions to
1009  * the next: permanent transactions are only     982  * the next: permanent transactions are only flushed out when
1010  * committed with xfs_trans_commit(), but we     983  * committed with xfs_trans_commit(), but we still want as soon
1011  * as possible to let chunks of it go to the     984  * as possible to let chunks of it go to the log. So we commit the
1012  * chunk we've been working on and get a new     985  * chunk we've been working on and get a new transaction to continue.
1013  */                                              986  */
1014 int                                              987 int
1015 xfs_trans_roll(                                  988 xfs_trans_roll(
1016         struct xfs_trans        **tpp)           989         struct xfs_trans        **tpp)
1017 {                                                990 {
1018         struct xfs_trans        *trans = *tpp    991         struct xfs_trans        *trans = *tpp;
1019         struct xfs_trans_res    tres;            992         struct xfs_trans_res    tres;
1020         int                     error;           993         int                     error;
1021                                                  994 
1022         trace_xfs_trans_roll(trans, _RET_IP_)    995         trace_xfs_trans_roll(trans, _RET_IP_);
1023                                                  996 
1024         /*                                       997         /*
1025          * Copy the critical parameters from     998          * Copy the critical parameters from one trans to the next.
1026          */                                      999          */
1027         tres.tr_logres = trans->t_log_res;       1000         tres.tr_logres = trans->t_log_res;
1028         tres.tr_logcount = trans->t_log_count    1001         tres.tr_logcount = trans->t_log_count;
1029                                                  1002 
1030         *tpp = xfs_trans_dup(trans);             1003         *tpp = xfs_trans_dup(trans);
1031                                                  1004 
1032         /*                                       1005         /*
1033          * Commit the current transaction.       1006          * Commit the current transaction.
1034          * If this commit failed, then it'd j    1007          * If this commit failed, then it'd just unlock those items that
1035          * are not marked ihold. That also me    1008          * are not marked ihold. That also means that a filesystem shutdown
1036          * is in progress. The caller takes t    1009          * is in progress. The caller takes the responsibility to cancel
1037          * the duplicate transaction that get    1010          * the duplicate transaction that gets returned.
1038          */                                      1011          */
1039         error = __xfs_trans_commit(trans, tru    1012         error = __xfs_trans_commit(trans, true);
1040         if (error)                               1013         if (error)
1041                 return error;                    1014                 return error;
1042                                                  1015 
1043         /*                                       1016         /*
1044          * Reserve space in the log for the n    1017          * Reserve space in the log for the next transaction.
1045          * This also pushes items in the "AIL    1018          * This also pushes items in the "AIL", the list of logged items,
1046          * out to disk if they are taking up     1019          * out to disk if they are taking up space at the tail of the log
1047          * that we want to use.  This require    1020          * that we want to use.  This requires that either nothing be locked
1048          * across this call, or that anything    1021          * across this call, or that anything that is locked be logged in
1049          * the prior and the next transaction    1022          * the prior and the next transactions.
1050          */                                      1023          */
1051         tres.tr_logflags = XFS_TRANS_PERM_LOG    1024         tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1052         return xfs_trans_reserve(*tpp, &tres,    1025         return xfs_trans_reserve(*tpp, &tres, 0, 0);
1053 }                                             << 
1054                                               << 
1055 /*                                            << 
1056  * Allocate an transaction, lock and join the << 
1057  *                                            << 
1058  * The caller must ensure that the on-disk dq << 
1059  * already been allocated and initialized.  T << 
1060  * releasing ILOCK_EXCL if a new transaction  << 
1061  */                                           << 
1062 int                                           << 
1063 xfs_trans_alloc_inode(                        << 
1064         struct xfs_inode        *ip,          << 
1065         struct xfs_trans_res    *resv,        << 
1066         unsigned int            dblocks,      << 
1067         unsigned int            rblocks,      << 
1068         bool                    force,        << 
1069         struct xfs_trans        **tpp)        << 
1070 {                                             << 
1071         struct xfs_trans        *tp;          << 
1072         struct xfs_mount        *mp = ip->i_m << 
1073         bool                    retried = fal << 
1074         int                     error;        << 
1075                                               << 
1076 retry:                                        << 
1077         error = xfs_trans_alloc(mp, resv, dbl << 
1078                         xfs_extlen_to_rtxlen( << 
1079                         force ? XFS_TRANS_RES << 
1080         if (error)                            << 
1081                 return error;                 << 
1082                                               << 
1083         xfs_ilock(ip, XFS_ILOCK_EXCL);        << 
1084         xfs_trans_ijoin(tp, ip, 0);           << 
1085                                               << 
1086         error = xfs_qm_dqattach_locked(ip, fa << 
1087         if (error) {                          << 
1088                 /* Caller should have allocat << 
1089                 ASSERT(error != -ENOENT);     << 
1090                 goto out_cancel;              << 
1091         }                                     << 
1092                                               << 
1093         error = xfs_trans_reserve_quota_nblks << 
1094         if ((error == -EDQUOT || error == -EN << 
1095                 xfs_trans_cancel(tp);         << 
1096                 xfs_iunlock(ip, XFS_ILOCK_EXC << 
1097                 xfs_blockgc_free_quota(ip, 0) << 
1098                 retried = true;               << 
1099                 goto retry;                   << 
1100         }                                     << 
1101         if (error)                            << 
1102                 goto out_cancel;              << 
1103                                               << 
1104         *tpp = tp;                            << 
1105         return 0;                             << 
1106                                               << 
1107 out_cancel:                                   << 
1108         xfs_trans_cancel(tp);                 << 
1109         xfs_iunlock(ip, XFS_ILOCK_EXCL);      << 
1110         return error;                         << 
1111 }                                             << 
1112                                               << 
1113 /*                                            << 
1114  * Try to reserve more blocks for a transacti << 
1115  *                                            << 
1116  * This is for callers that need to attach re << 
1117  * those resources to determine the space res << 
1118  * modify the attached resources.  In other w << 
1119  * fail due to ENOSPC, so the caller must be  << 
1120  * without shutting down the fs.              << 
1121  */                                           << 
1122 int                                           << 
1123 xfs_trans_reserve_more(                       << 
1124         struct xfs_trans        *tp,          << 
1125         unsigned int            blocks,       << 
1126         unsigned int            rtextents)    << 
1127 {                                             << 
1128         struct xfs_trans_res    resv = { };   << 
1129                                               << 
1130         return xfs_trans_reserve(tp, &resv, b << 
1131 }                                             << 
1132                                               << 
1133 /*                                            << 
1134  * Try to reserve more blocks and file quota  << 
1135  * conditions of usage as xfs_trans_reserve_m << 
1136  */                                           << 
1137 int                                           << 
1138 xfs_trans_reserve_more_inode(                 << 
1139         struct xfs_trans        *tp,          << 
1140         struct xfs_inode        *ip,          << 
1141         unsigned int            dblocks,      << 
1142         unsigned int            rblocks,      << 
1143         bool                    force_quota)  << 
1144 {                                             << 
1145         struct xfs_trans_res    resv = { };   << 
1146         struct xfs_mount        *mp = ip->i_m << 
1147         unsigned int            rtx = xfs_ext << 
1148         int                     error;        << 
1149                                               << 
1150         xfs_assert_ilocked(ip, XFS_ILOCK_EXCL << 
1151                                               << 
1152         error = xfs_trans_reserve(tp, &resv,  << 
1153         if (error)                            << 
1154                 return error;                 << 
1155                                               << 
1156         if (!XFS_IS_QUOTA_ON(mp) || xfs_is_qu << 
1157                 return 0;                     << 
1158                                               << 
1159         if (tp->t_flags & XFS_TRANS_RESERVE)  << 
1160                 force_quota = true;           << 
1161                                               << 
1162         error = xfs_trans_reserve_quota_nblks << 
1163                         force_quota);         << 
1164         if (!error)                           << 
1165                 return 0;                     << 
1166                                               << 
1167         /* Quota failed, give back the new re << 
1168         xfs_add_fdblocks(mp, dblocks);        << 
1169         tp->t_blk_res -= dblocks;             << 
1170         xfs_add_frextents(mp, rtx);           << 
1171         tp->t_rtx_res -= rtx;                 << 
1172         return error;                         << 
1173 }                                             << 
1174                                               << 
1175 /*                                            << 
1176  * Allocate an transaction in preparation for << 
1177  * against the given dquots.  Callers are not << 
1178  */                                           << 
1179 int                                           << 
1180 xfs_trans_alloc_icreate(                      << 
1181         struct xfs_mount        *mp,          << 
1182         struct xfs_trans_res    *resv,        << 
1183         struct xfs_dquot        *udqp,        << 
1184         struct xfs_dquot        *gdqp,        << 
1185         struct xfs_dquot        *pdqp,        << 
1186         unsigned int            dblocks,      << 
1187         struct xfs_trans        **tpp)        << 
1188 {                                             << 
1189         struct xfs_trans        *tp;          << 
1190         bool                    retried = fal << 
1191         int                     error;        << 
1192                                               << 
1193 retry:                                        << 
1194         error = xfs_trans_alloc(mp, resv, dbl << 
1195         if (error)                            << 
1196                 return error;                 << 
1197                                               << 
1198         error = xfs_trans_reserve_quota_icrea << 
1199         if ((error == -EDQUOT || error == -EN << 
1200                 xfs_trans_cancel(tp);         << 
1201                 xfs_blockgc_free_dquots(mp, u << 
1202                 retried = true;               << 
1203                 goto retry;                   << 
1204         }                                     << 
1205         if (error) {                          << 
1206                 xfs_trans_cancel(tp);         << 
1207                 return error;                 << 
1208         }                                     << 
1209                                               << 
1210         *tpp = tp;                            << 
1211         return 0;                             << 
1212 }                                             << 
1213                                               << 
1214 /*                                            << 
1215  * Allocate an transaction, lock and join the << 
1216  * in preparation for inode attribute changes << 
1217  * changes.                                   << 
1218  *                                            << 
1219  * The caller must ensure that the on-disk dq << 
1220  * already been allocated and initialized.  T << 
1221  * transaction is committed or cancelled.     << 
1222  */                                           << 
1223 int                                           << 
1224 xfs_trans_alloc_ichange(                      << 
1225         struct xfs_inode        *ip,          << 
1226         struct xfs_dquot        *new_udqp,    << 
1227         struct xfs_dquot        *new_gdqp,    << 
1228         struct xfs_dquot        *new_pdqp,    << 
1229         bool                    force,        << 
1230         struct xfs_trans        **tpp)        << 
1231 {                                             << 
1232         struct xfs_trans        *tp;          << 
1233         struct xfs_mount        *mp = ip->i_m << 
1234         struct xfs_dquot        *udqp;        << 
1235         struct xfs_dquot        *gdqp;        << 
1236         struct xfs_dquot        *pdqp;        << 
1237         bool                    retried = fal << 
1238         int                     error;        << 
1239                                               << 
1240 retry:                                        << 
1241         error = xfs_trans_alloc(mp, &M_RES(mp << 
1242         if (error)                            << 
1243                 return error;                 << 
1244                                               << 
1245         xfs_ilock(ip, XFS_ILOCK_EXCL);        << 
1246         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXC << 
1247                                               << 
1248         error = xfs_qm_dqattach_locked(ip, fa << 
1249         if (error) {                          << 
1250                 /* Caller should have allocat << 
1251                 ASSERT(error != -ENOENT);     << 
1252                 goto out_cancel;              << 
1253         }                                     << 
1254                                               << 
1255         /*                                    << 
1256          * For each quota type, skip quota re << 
1257          * now match the ones that came from  << 
1258          * pass one in.  The inode's dquots c << 
1259          * perform a blockgc scan, so we must << 
1260          */                                   << 
1261         udqp = (new_udqp != ip->i_udquot) ? n << 
1262         gdqp = (new_gdqp != ip->i_gdquot) ? n << 
1263         pdqp = (new_pdqp != ip->i_pdquot) ? n << 
1264         if (udqp || gdqp || pdqp) {           << 
1265                 unsigned int    qflags = XFS_ << 
1266                                               << 
1267                 if (force)                    << 
1268                         qflags |= XFS_QMOPT_F << 
1269                                               << 
1270                 /*                            << 
1271                  * Reserve enough quota to ha << 
1272                  * for a delayed allocation.  << 
1273                  * delalloc reservation betwe << 
1274                  * though that part is only s << 
1275                  */                           << 
1276                 error = xfs_trans_reserve_quo << 
1277                                 pdqp, ip->i_n << 
1278                                 1, qflags);   << 
1279                 if ((error == -EDQUOT || erro << 
1280                         xfs_trans_cancel(tp); << 
1281                         xfs_blockgc_free_dquo << 
1282                         retried = true;       << 
1283                         goto retry;           << 
1284                 }                             << 
1285                 if (error)                    << 
1286                         goto out_cancel;      << 
1287         }                                     << 
1288                                               << 
1289         *tpp = tp;                            << 
1290         return 0;                             << 
1291                                               << 
1292 out_cancel:                                   << 
1293         xfs_trans_cancel(tp);                 << 
1294         return error;                         << 
1295 }                                             << 
1296                                               << 
1297 /*                                            << 
1298  * Allocate an transaction, lock and join the << 
1299  * and reserve quota for a directory update.  << 
1300  * @dblocks will be set to zero for a reserva << 
1301  * @nospace_error will be set to a negative e << 
1302  * constraint we hit.                         << 
1303  *                                            << 
1304  * The caller must ensure that the on-disk dq << 
1305  * already been allocated and initialized.  T << 
1306  * transaction is committed or cancelled.     << 
1307  *                                            << 
1308  * Caller is responsible for unlocking the in << 
1309  */                                           << 
1310 int                                           << 
1311 xfs_trans_alloc_dir(                          << 
1312         struct xfs_inode        *dp,          << 
1313         struct xfs_trans_res    *resv,        << 
1314         struct xfs_inode        *ip,          << 
1315         unsigned int            *dblocks,     << 
1316         struct xfs_trans        **tpp,        << 
1317         int                     *nospace_erro << 
1318 {                                             << 
1319         struct xfs_trans        *tp;          << 
1320         struct xfs_mount        *mp = ip->i_m << 
1321         unsigned int            resblks;      << 
1322         bool                    retried = fal << 
1323         int                     error;        << 
1324                                               << 
1325 retry:                                        << 
1326         *nospace_error = 0;                   << 
1327         resblks = *dblocks;                   << 
1328         error = xfs_trans_alloc(mp, resv, res << 
1329         if (error == -ENOSPC) {               << 
1330                 *nospace_error = error;       << 
1331                 resblks = 0;                  << 
1332                 error = xfs_trans_alloc(mp, r << 
1333         }                                     << 
1334         if (error)                            << 
1335                 return error;                 << 
1336                                               << 
1337         xfs_lock_two_inodes(dp, XFS_ILOCK_EXC << 
1338                                               << 
1339         xfs_trans_ijoin(tp, dp, 0);           << 
1340         xfs_trans_ijoin(tp, ip, 0);           << 
1341                                               << 
1342         error = xfs_qm_dqattach_locked(dp, fa << 
1343         if (error) {                          << 
1344                 /* Caller should have allocat << 
1345                 ASSERT(error != -ENOENT);     << 
1346                 goto out_cancel;              << 
1347         }                                     << 
1348                                               << 
1349         error = xfs_qm_dqattach_locked(ip, fa << 
1350         if (error) {                          << 
1351                 /* Caller should have allocat << 
1352                 ASSERT(error != -ENOENT);     << 
1353                 goto out_cancel;              << 
1354         }                                     << 
1355                                               << 
1356         if (resblks == 0)                     << 
1357                 goto done;                    << 
1358                                               << 
1359         error = xfs_trans_reserve_quota_nblks << 
1360         if (error == -EDQUOT || error == -ENO << 
1361                 if (!retried) {               << 
1362                         xfs_trans_cancel(tp); << 
1363                         xfs_iunlock(dp, XFS_I << 
1364                         if (dp != ip)         << 
1365                                 xfs_iunlock(i << 
1366                         xfs_blockgc_free_quot << 
1367                         retried = true;       << 
1368                         goto retry;           << 
1369                 }                             << 
1370                                               << 
1371                 *nospace_error = error;       << 
1372                 resblks = 0;                  << 
1373                 error = 0;                    << 
1374         }                                     << 
1375         if (error)                            << 
1376                 goto out_cancel;              << 
1377                                               << 
1378 done:                                         << 
1379         *tpp = tp;                            << 
1380         *dblocks = resblks;                   << 
1381         return 0;                             << 
1382                                               << 
1383 out_cancel:                                   << 
1384         xfs_trans_cancel(tp);                 << 
1385         return error;                         << 
1386 }                                                1026 }
1387                                                  1027 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php