1 // SPDX-License-Identifier: GPL-2.0 << 2 /* 1 /* 3 * Copyright (c) 2000-2003,2005 Silicon Graphi 2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4 * Copyright (C) 2010 Red Hat, Inc. 3 * Copyright (C) 2010 Red Hat, Inc. 5 * All Rights Reserved. 4 * All Rights Reserved. >> 5 * >> 6 * This program is free software; you can redistribute it and/or >> 7 * modify it under the terms of the GNU General Public License as >> 8 * published by the Free Software Foundation. >> 9 * >> 10 * This program is distributed in the hope that it would be useful, >> 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of >> 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the >> 13 * GNU General Public License for more details. >> 14 * >> 15 * You should have received a copy of the GNU General Public License >> 16 * along with this program; if not, write the Free Software Foundation, >> 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 6 */ 18 */ 7 #include "xfs.h" 19 #include "xfs.h" 8 #include "xfs_fs.h" 20 #include "xfs_fs.h" 9 #include "xfs_shared.h" 21 #include "xfs_shared.h" 10 #include "xfs_format.h" 22 #include "xfs_format.h" 11 #include "xfs_log_format.h" 23 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 24 #include "xfs_trans_resv.h" 13 #include "xfs_mount.h" 25 #include "xfs_mount.h" >> 26 #include "xfs_inode.h" 14 #include "xfs_extent_busy.h" 27 #include "xfs_extent_busy.h" 15 #include "xfs_quota.h" 28 #include "xfs_quota.h" 16 #include "xfs_trans.h" 29 #include "xfs_trans.h" 17 #include "xfs_trans_priv.h" 30 #include "xfs_trans_priv.h" 18 #include "xfs_log.h" 31 #include "xfs_log.h" 19 #include "xfs_log_priv.h" << 20 #include "xfs_trace.h" 32 #include "xfs_trace.h" 21 #include "xfs_error.h" 33 #include "xfs_error.h" 22 #include "xfs_defer.h" !! 34 23 #include "xfs_inode.h" !! 35 kmem_zone_t *xfs_trans_zone; 24 #include "xfs_dquot_item.h" !! 36 kmem_zone_t *xfs_log_item_desc_zone; 25 #include "xfs_dquot.h" << 26 #include "xfs_icache.h" << 27 #include "xfs_rtbitmap.h" << 28 << 29 struct kmem_cache *xfs_trans_cache; << 30 << 31 #if defined(CONFIG_TRACEPOINTS) << 32 static void << 33 xfs_trans_trace_reservations( << 34 struct xfs_mount *mp) << 35 { << 36 struct xfs_trans_res *res; << 37 struct xfs_trans_res *end_res; << 38 int i; << 39 << 40 res = (struct xfs_trans_res *)M_RES(mp << 41 end_res = (struct xfs_trans_res *)(M_R << 42 for (i = 0; res < end_res; i++, res++) << 43 trace_xfs_trans_resv_calc(mp, << 44 } << 45 #else << 46 # define xfs_trans_trace_reservations(mp) << 47 #endif << 48 37 49 /* 38 /* 50 * Initialize the precomputed transaction rese 39 * Initialize the precomputed transaction reservation values 51 * in the mount structure. 40 * in the mount structure. 52 */ 41 */ 53 void 42 void 54 xfs_trans_init( 43 xfs_trans_init( 55 struct xfs_mount *mp) 44 struct xfs_mount *mp) 56 { 45 { 57 xfs_trans_resv_calc(mp, M_RES(mp)); 46 xfs_trans_resv_calc(mp, M_RES(mp)); 58 xfs_trans_trace_reservations(mp); << 59 } 47 } 60 48 61 /* 49 /* 62 * Free the transaction structure. If there i 50 * Free the transaction structure. If there is more clean up 63 * to do when the structure is freed, add it h 51 * to do when the structure is freed, add it here. 64 */ 52 */ 65 STATIC void 53 STATIC void 66 xfs_trans_free( 54 xfs_trans_free( 67 struct xfs_trans *tp) 55 struct xfs_trans *tp) 68 { 56 { 69 xfs_extent_busy_sort(&tp->t_busy); 57 xfs_extent_busy_sort(&tp->t_busy); 70 xfs_extent_busy_clear(tp->t_mountp, &t 58 xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false); 71 59 72 trace_xfs_trans_free(tp, _RET_IP_); !! 60 atomic_dec(&tp->t_mountp->m_active_trans); 73 xfs_trans_clear_context(tp); << 74 if (!(tp->t_flags & XFS_TRANS_NO_WRITE 61 if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT)) 75 sb_end_intwrite(tp->t_mountp-> 62 sb_end_intwrite(tp->t_mountp->m_super); 76 xfs_trans_free_dqinfo(tp); 63 xfs_trans_free_dqinfo(tp); 77 kmem_cache_free(xfs_trans_cache, tp); !! 64 kmem_zone_free(xfs_trans_zone, tp); 78 } 65 } 79 66 80 /* 67 /* 81 * This is called to create a new transaction 68 * This is called to create a new transaction which will share the 82 * permanent log reservation of the given tran 69 * permanent log reservation of the given transaction. The remaining 83 * unused block and rt extent reservations are 70 * unused block and rt extent reservations are also inherited. This 84 * implies that the original transaction is no 71 * implies that the original transaction is no longer allowed to allocate 85 * blocks. Locks and log items, however, are 72 * blocks. Locks and log items, however, are no inherited. They must 86 * be added to the new transaction explicitly. 73 * be added to the new transaction explicitly. 87 */ 74 */ 88 STATIC struct xfs_trans * !! 75 STATIC xfs_trans_t * 89 xfs_trans_dup( 76 xfs_trans_dup( 90 struct xfs_trans *tp) !! 77 xfs_trans_t *tp) 91 { 78 { 92 struct xfs_trans *ntp; !! 79 xfs_trans_t *ntp; 93 << 94 trace_xfs_trans_dup(tp, _RET_IP_); << 95 80 96 ntp = kmem_cache_zalloc(xfs_trans_cach !! 81 ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 97 82 98 /* 83 /* 99 * Initialize the new transaction stru 84 * Initialize the new transaction structure. 100 */ 85 */ 101 ntp->t_magic = XFS_TRANS_HEADER_MAGIC; 86 ntp->t_magic = XFS_TRANS_HEADER_MAGIC; 102 ntp->t_mountp = tp->t_mountp; 87 ntp->t_mountp = tp->t_mountp; 103 INIT_LIST_HEAD(&ntp->t_items); 88 INIT_LIST_HEAD(&ntp->t_items); 104 INIT_LIST_HEAD(&ntp->t_busy); 89 INIT_LIST_HEAD(&ntp->t_busy); 105 INIT_LIST_HEAD(&ntp->t_dfops); << 106 ntp->t_highest_agno = NULLAGNUMBER; << 107 90 108 ASSERT(tp->t_flags & XFS_TRANS_PERM_LO 91 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 109 ASSERT(tp->t_ticket != NULL); 92 ASSERT(tp->t_ticket != NULL); 110 93 111 ntp->t_flags = XFS_TRANS_PERM_LOG_RES 94 ntp->t_flags = XFS_TRANS_PERM_LOG_RES | 112 (tp->t_flags & XFS_TRAN 95 (tp->t_flags & XFS_TRANS_RESERVE) | 113 (tp->t_flags & XFS_TRAN !! 96 (tp->t_flags & XFS_TRANS_NO_WRITECOUNT); 114 (tp->t_flags & XFS_TRAN << 115 /* We gave our writer reference to the 97 /* We gave our writer reference to the new transaction */ 116 tp->t_flags |= XFS_TRANS_NO_WRITECOUNT 98 tp->t_flags |= XFS_TRANS_NO_WRITECOUNT; 117 ntp->t_ticket = xfs_log_ticket_get(tp- 99 ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket); 118 << 119 ASSERT(tp->t_blk_res >= tp->t_blk_res_ << 120 ntp->t_blk_res = tp->t_blk_res - tp->t 100 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; 121 tp->t_blk_res = tp->t_blk_res_used; 101 tp->t_blk_res = tp->t_blk_res_used; 122 << 123 ntp->t_rtx_res = tp->t_rtx_res - tp->t 102 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; 124 tp->t_rtx_res = tp->t_rtx_res_used; 103 tp->t_rtx_res = tp->t_rtx_res_used; 125 !! 104 ntp->t_pflags = tp->t_pflags; 126 xfs_trans_switch_context(tp, ntp); << 127 << 128 /* move deferred ops over to the new t << 129 xfs_defer_move(ntp, tp); << 130 105 131 xfs_trans_dup_dqinfo(tp, ntp); 106 xfs_trans_dup_dqinfo(tp, ntp); >> 107 >> 108 atomic_inc(&tp->t_mountp->m_active_trans); 132 return ntp; 109 return ntp; 133 } 110 } 134 111 135 /* 112 /* 136 * This is called to reserve free disk blocks 113 * This is called to reserve free disk blocks and log space for the 137 * given transaction. This must be done befor 114 * given transaction. This must be done before allocating any resources 138 * within the transaction. 115 * within the transaction. 139 * 116 * 140 * This will return ENOSPC if there are not en 117 * This will return ENOSPC if there are not enough blocks available. 141 * It will sleep waiting for available log spa 118 * It will sleep waiting for available log space. 142 * The only valid value for the flags paramete 119 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which 143 * is used by long running transactions. If a 120 * is used by long running transactions. If any one of the reservations 144 * fails then they will all be backed out. 121 * fails then they will all be backed out. 145 * 122 * 146 * This does not do quota reservations. That t 123 * This does not do quota reservations. That typically is done by the 147 * caller afterwards. 124 * caller afterwards. 148 */ 125 */ 149 static int 126 static int 150 xfs_trans_reserve( 127 xfs_trans_reserve( 151 struct xfs_trans *tp, 128 struct xfs_trans *tp, 152 struct xfs_trans_res *resp, 129 struct xfs_trans_res *resp, 153 uint blocks, 130 uint blocks, 154 uint rtextents) 131 uint rtextents) 155 { 132 { 156 struct xfs_mount *mp = tp->t_mo !! 133 int error = 0; 157 int error = 0; !! 134 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 158 bool rsvd = (tp->t_ !! 135 >> 136 /* Mark this thread as being in a transaction */ >> 137 current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); 159 138 160 /* 139 /* 161 * Attempt to reserve the needed disk 140 * Attempt to reserve the needed disk blocks by decrementing 162 * the number needed from the number a 141 * the number needed from the number available. This will 163 * fail if the count would go below ze 142 * fail if the count would go below zero. 164 */ 143 */ 165 if (blocks > 0) { 144 if (blocks > 0) { 166 error = xfs_dec_fdblocks(mp, b !! 145 error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd); 167 if (error != 0) !! 146 if (error != 0) { >> 147 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); 168 return -ENOSPC; 148 return -ENOSPC; >> 149 } 169 tp->t_blk_res += blocks; 150 tp->t_blk_res += blocks; 170 } 151 } 171 152 172 /* 153 /* 173 * Reserve the log space needed for th 154 * Reserve the log space needed for this transaction. 174 */ 155 */ 175 if (resp->tr_logres > 0) { 156 if (resp->tr_logres > 0) { 176 bool permanent = false; 157 bool permanent = false; 177 158 178 ASSERT(tp->t_log_res == 0 || 159 ASSERT(tp->t_log_res == 0 || 179 tp->t_log_res == resp-> 160 tp->t_log_res == resp->tr_logres); 180 ASSERT(tp->t_log_count == 0 || 161 ASSERT(tp->t_log_count == 0 || 181 tp->t_log_count == resp 162 tp->t_log_count == resp->tr_logcount); 182 163 183 if (resp->tr_logflags & XFS_TR 164 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) { 184 tp->t_flags |= XFS_TRA 165 tp->t_flags |= XFS_TRANS_PERM_LOG_RES; 185 permanent = true; 166 permanent = true; 186 } else { 167 } else { 187 ASSERT(tp->t_ticket == 168 ASSERT(tp->t_ticket == NULL); 188 ASSERT(!(tp->t_flags & 169 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 189 } 170 } 190 171 191 if (tp->t_ticket != NULL) { 172 if (tp->t_ticket != NULL) { 192 ASSERT(resp->tr_logfla 173 ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES); 193 error = xfs_log_regran !! 174 error = xfs_log_regrant(tp->t_mountp, tp->t_ticket); 194 } else { 175 } else { 195 error = xfs_log_reserv !! 176 error = xfs_log_reserve(tp->t_mountp, >> 177 resp->tr_logres, 196 178 resp->tr_logcount, 197 !! 179 &tp->t_ticket, XFS_TRANSACTION, >> 180 permanent); 198 } 181 } 199 182 200 if (error) 183 if (error) 201 goto undo_blocks; 184 goto undo_blocks; 202 185 203 tp->t_log_res = resp->tr_logre 186 tp->t_log_res = resp->tr_logres; 204 tp->t_log_count = resp->tr_log 187 tp->t_log_count = resp->tr_logcount; 205 } 188 } 206 189 207 /* 190 /* 208 * Attempt to reserve the needed realt 191 * Attempt to reserve the needed realtime extents by decrementing 209 * the number needed from the number a 192 * the number needed from the number available. This will 210 * fail if the count would go below ze 193 * fail if the count would go below zero. 211 */ 194 */ 212 if (rtextents > 0) { 195 if (rtextents > 0) { 213 error = xfs_dec_frextents(mp, !! 196 error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents)); 214 if (error) { 197 if (error) { 215 error = -ENOSPC; 198 error = -ENOSPC; 216 goto undo_log; 199 goto undo_log; 217 } 200 } 218 tp->t_rtx_res += rtextents; 201 tp->t_rtx_res += rtextents; 219 } 202 } 220 203 221 return 0; 204 return 0; 222 205 223 /* 206 /* 224 * Error cases jump to one of these la 207 * Error cases jump to one of these labels to undo any 225 * reservations which have already bee 208 * reservations which have already been performed. 226 */ 209 */ 227 undo_log: 210 undo_log: 228 if (resp->tr_logres > 0) { 211 if (resp->tr_logres > 0) { 229 xfs_log_ticket_ungrant(mp->m_l !! 212 xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false); 230 tp->t_ticket = NULL; 213 tp->t_ticket = NULL; 231 tp->t_log_res = 0; 214 tp->t_log_res = 0; 232 tp->t_flags &= ~XFS_TRANS_PERM 215 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; 233 } 216 } 234 217 235 undo_blocks: 218 undo_blocks: 236 if (blocks > 0) { 219 if (blocks > 0) { 237 xfs_add_fdblocks(mp, blocks); !! 220 xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd); 238 tp->t_blk_res = 0; 221 tp->t_blk_res = 0; 239 } 222 } >> 223 >> 224 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); >> 225 240 return error; 226 return error; 241 } 227 } 242 228 243 int 229 int 244 xfs_trans_alloc( 230 xfs_trans_alloc( 245 struct xfs_mount *mp, 231 struct xfs_mount *mp, 246 struct xfs_trans_res *resp, 232 struct xfs_trans_res *resp, 247 uint blocks, 233 uint blocks, 248 uint rtextents, 234 uint rtextents, 249 uint flags, 235 uint flags, 250 struct xfs_trans **tpp) 236 struct xfs_trans **tpp) 251 { 237 { 252 struct xfs_trans *tp; 238 struct xfs_trans *tp; 253 bool want_retry = t << 254 int error; 239 int error; 255 240 256 /* << 257 * Allocate the handle before we do ou << 258 * GFP_NOFS allocation context so that << 259 * by doing GFP_KERNEL allocations ins << 260 */ << 261 retry: << 262 tp = kmem_cache_zalloc(xfs_trans_cache << 263 if (!(flags & XFS_TRANS_NO_WRITECOUNT) 241 if (!(flags & XFS_TRANS_NO_WRITECOUNT)) 264 sb_start_intwrite(mp->m_super) 242 sb_start_intwrite(mp->m_super); 265 xfs_trans_set_context(tp); << 266 243 267 /* !! 244 WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); 268 * Zero-reservation ("empty") transact !! 245 atomic_inc(&mp->m_active_trans); 269 * they're allowed to run while we're << 270 */ << 271 WARN_ON(resp->tr_logres > 0 && << 272 mp->m_super->s_writers.frozen << 273 ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) << 274 xfs_has_lazysbcount(mp)); << 275 246 >> 247 tp = kmem_zone_zalloc(xfs_trans_zone, >> 248 (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP); 276 tp->t_magic = XFS_TRANS_HEADER_MAGIC; 249 tp->t_magic = XFS_TRANS_HEADER_MAGIC; 277 tp->t_flags = flags; 250 tp->t_flags = flags; 278 tp->t_mountp = mp; 251 tp->t_mountp = mp; 279 INIT_LIST_HEAD(&tp->t_items); 252 INIT_LIST_HEAD(&tp->t_items); 280 INIT_LIST_HEAD(&tp->t_busy); 253 INIT_LIST_HEAD(&tp->t_busy); 281 INIT_LIST_HEAD(&tp->t_dfops); << 282 tp->t_highest_agno = NULLAGNUMBER; << 283 254 284 error = xfs_trans_reserve(tp, resp, bl 255 error = xfs_trans_reserve(tp, resp, blocks, rtextents); 285 if (error == -ENOSPC && want_retry) { << 286 xfs_trans_cancel(tp); << 287 << 288 /* << 289 * We weren't able to reserve << 290 * Flush the other speculative << 291 * Do not perform a synchronou << 292 * other locks. << 293 */ << 294 error = xfs_blockgc_flush_all( << 295 if (error) << 296 return error; << 297 want_retry = false; << 298 goto retry; << 299 } << 300 if (error) { 256 if (error) { 301 xfs_trans_cancel(tp); 257 xfs_trans_cancel(tp); 302 return error; 258 return error; 303 } 259 } 304 260 305 trace_xfs_trans_alloc(tp, _RET_IP_); << 306 << 307 *tpp = tp; 261 *tpp = tp; 308 return 0; 262 return 0; 309 } 263 } 310 264 311 /* 265 /* 312 * Create an empty transaction with no reserva 266 * Create an empty transaction with no reservation. This is a defensive 313 * mechanism for routines that query metadata !! 267 * mechanism for routines that query metadata without actually modifying 314 * if the metadata being queried is somehow cr !! 268 * them -- if the metadata being queried is somehow cross-linked (think a 315 * pointer that points higher in the tree), we !! 269 * btree block pointer that points higher in the tree), we risk deadlock. 316 * grabbed as part of a transaction can be re- !! 270 * However, blocks grabbed as part of a transaction can be re-grabbed. 317 * notice the corrupt block and the operation !! 271 * The verifiers will notice the corrupt block and the operation will fail 318 * without deadlocking. !! 272 * back to userspace without deadlocking. 319 * << 320 * Note the zero-length reservation; this tran << 321 * any dirty data. << 322 * 273 * 323 * Callers should obtain freeze protection to !! 274 * Note the zero-length reservation; this transaction MUST be cancelled 324 * where we can be grabbing buffers at the sam !! 275 * without any dirty data. 325 * drain the buffer LRU list. << 326 */ 276 */ 327 int 277 int 328 xfs_trans_alloc_empty( 278 xfs_trans_alloc_empty( 329 struct xfs_mount *mp, 279 struct xfs_mount *mp, 330 struct xfs_trans **tpp) 280 struct xfs_trans **tpp) 331 { 281 { 332 struct xfs_trans_res resv = 282 struct xfs_trans_res resv = {0}; 333 283 334 return xfs_trans_alloc(mp, &resv, 0, 0 284 return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp); 335 } 285 } 336 286 337 /* 287 /* 338 * Record the indicated change to the given fi 288 * Record the indicated change to the given field for application 339 * to the file system's superblock when the tr 289 * to the file system's superblock when the transaction commits. 340 * For now, just store the change in the trans 290 * For now, just store the change in the transaction structure. 341 * 291 * 342 * Mark the transaction structure to indicate 292 * Mark the transaction structure to indicate that the superblock 343 * needs to be updated before committing. 293 * needs to be updated before committing. 344 * 294 * 345 * Because we may not be keeping track of allo 295 * Because we may not be keeping track of allocated/free inodes and 346 * used filesystem blocks in the superblock, w 296 * used filesystem blocks in the superblock, we do not mark the 347 * superblock dirty in this transaction if we 297 * superblock dirty in this transaction if we modify these fields. 348 * We still need to update the transaction del 298 * We still need to update the transaction deltas so that they get 349 * applied to the incore superblock, but we do 299 * applied to the incore superblock, but we don't want them to 350 * cause the superblock to get locked and logg 300 * cause the superblock to get locked and logged if these are the 351 * only fields in the superblock that the tran 301 * only fields in the superblock that the transaction modifies. 352 */ 302 */ 353 void 303 void 354 xfs_trans_mod_sb( 304 xfs_trans_mod_sb( 355 xfs_trans_t *tp, 305 xfs_trans_t *tp, 356 uint field, 306 uint field, 357 int64_t delta) 307 int64_t delta) 358 { 308 { 359 uint32_t flags = (XFS_TRANS_DIR 309 uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY); 360 xfs_mount_t *mp = tp->t_mountp; 310 xfs_mount_t *mp = tp->t_mountp; 361 311 362 switch (field) { 312 switch (field) { 363 case XFS_TRANS_SB_ICOUNT: 313 case XFS_TRANS_SB_ICOUNT: 364 tp->t_icount_delta += delta; 314 tp->t_icount_delta += delta; 365 if (xfs_has_lazysbcount(mp)) !! 315 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 366 flags &= ~XFS_TRANS_SB 316 flags &= ~XFS_TRANS_SB_DIRTY; 367 break; 317 break; 368 case XFS_TRANS_SB_IFREE: 318 case XFS_TRANS_SB_IFREE: 369 tp->t_ifree_delta += delta; 319 tp->t_ifree_delta += delta; 370 if (xfs_has_lazysbcount(mp)) !! 320 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 371 flags &= ~XFS_TRANS_SB 321 flags &= ~XFS_TRANS_SB_DIRTY; 372 break; 322 break; 373 case XFS_TRANS_SB_FDBLOCKS: 323 case XFS_TRANS_SB_FDBLOCKS: 374 /* 324 /* 375 * Track the number of blocks !! 325 * Track the number of blocks allocated in the 376 * Make sure it does not excee !! 326 * transaction. Make sure it does not exceed the 377 * shutdown as this can lead t !! 327 * number reserved. 378 */ 328 */ 379 if (delta < 0) { 329 if (delta < 0) { 380 tp->t_blk_res_used += 330 tp->t_blk_res_used += (uint)-delta; 381 if (tp->t_blk_res_used !! 331 ASSERT(tp->t_blk_res_used <= tp->t_blk_res); 382 xfs_force_shut << 383 } else if (delta > 0 && (tp->t << 384 int64_t blkres_delta; << 385 << 386 /* << 387 * Return freed blocks << 388 * instead of the glob << 389 * overflow the trans << 390 * reservation across << 391 * repeatedly free and << 392 */ << 393 blkres_delta = min_t(i << 394 U << 395 tp->t_blk_res += blkre << 396 delta -= blkres_delta; << 397 } 332 } 398 tp->t_fdblocks_delta += delta; 333 tp->t_fdblocks_delta += delta; 399 if (xfs_has_lazysbcount(mp)) !! 334 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 400 flags &= ~XFS_TRANS_SB 335 flags &= ~XFS_TRANS_SB_DIRTY; 401 break; 336 break; 402 case XFS_TRANS_SB_RES_FDBLOCKS: 337 case XFS_TRANS_SB_RES_FDBLOCKS: 403 /* 338 /* 404 * The allocation has already 339 * The allocation has already been applied to the 405 * in-core superblock's counte 340 * in-core superblock's counter. This should only 406 * be applied to the on-disk s 341 * be applied to the on-disk superblock. 407 */ 342 */ 408 tp->t_res_fdblocks_delta += de 343 tp->t_res_fdblocks_delta += delta; 409 if (xfs_has_lazysbcount(mp)) !! 344 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 410 flags &= ~XFS_TRANS_SB 345 flags &= ~XFS_TRANS_SB_DIRTY; 411 break; 346 break; 412 case XFS_TRANS_SB_FREXTENTS: 347 case XFS_TRANS_SB_FREXTENTS: 413 /* 348 /* 414 * Track the number of blocks 349 * Track the number of blocks allocated in the 415 * transaction. Make sure it 350 * transaction. Make sure it does not exceed the 416 * number reserved. 351 * number reserved. 417 */ 352 */ 418 if (delta < 0) { 353 if (delta < 0) { 419 tp->t_rtx_res_used += 354 tp->t_rtx_res_used += (uint)-delta; 420 ASSERT(tp->t_rtx_res_u 355 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res); 421 } 356 } 422 tp->t_frextents_delta += delta 357 tp->t_frextents_delta += delta; 423 break; 358 break; 424 case XFS_TRANS_SB_RES_FREXTENTS: 359 case XFS_TRANS_SB_RES_FREXTENTS: 425 /* 360 /* 426 * The allocation has already 361 * The allocation has already been applied to the 427 * in-core superblock's counte 362 * in-core superblock's counter. This should only 428 * be applied to the on-disk s 363 * be applied to the on-disk superblock. 429 */ 364 */ 430 ASSERT(delta < 0); 365 ASSERT(delta < 0); 431 tp->t_res_frextents_delta += d 366 tp->t_res_frextents_delta += delta; 432 break; 367 break; 433 case XFS_TRANS_SB_DBLOCKS: 368 case XFS_TRANS_SB_DBLOCKS: >> 369 ASSERT(delta > 0); 434 tp->t_dblocks_delta += delta; 370 tp->t_dblocks_delta += delta; 435 break; 371 break; 436 case XFS_TRANS_SB_AGCOUNT: 372 case XFS_TRANS_SB_AGCOUNT: 437 ASSERT(delta > 0); 373 ASSERT(delta > 0); 438 tp->t_agcount_delta += delta; 374 tp->t_agcount_delta += delta; 439 break; 375 break; 440 case XFS_TRANS_SB_IMAXPCT: 376 case XFS_TRANS_SB_IMAXPCT: 441 tp->t_imaxpct_delta += delta; 377 tp->t_imaxpct_delta += delta; 442 break; 378 break; 443 case XFS_TRANS_SB_REXTSIZE: 379 case XFS_TRANS_SB_REXTSIZE: 444 tp->t_rextsize_delta += delta; 380 tp->t_rextsize_delta += delta; 445 break; 381 break; 446 case XFS_TRANS_SB_RBMBLOCKS: 382 case XFS_TRANS_SB_RBMBLOCKS: 447 tp->t_rbmblocks_delta += delta 383 tp->t_rbmblocks_delta += delta; 448 break; 384 break; 449 case XFS_TRANS_SB_RBLOCKS: 385 case XFS_TRANS_SB_RBLOCKS: 450 tp->t_rblocks_delta += delta; 386 tp->t_rblocks_delta += delta; 451 break; 387 break; 452 case XFS_TRANS_SB_REXTENTS: 388 case XFS_TRANS_SB_REXTENTS: 453 tp->t_rextents_delta += delta; 389 tp->t_rextents_delta += delta; 454 break; 390 break; 455 case XFS_TRANS_SB_REXTSLOG: 391 case XFS_TRANS_SB_REXTSLOG: 456 tp->t_rextslog_delta += delta; 392 tp->t_rextslog_delta += delta; 457 break; 393 break; 458 default: 394 default: 459 ASSERT(0); 395 ASSERT(0); 460 return; 396 return; 461 } 397 } 462 398 463 tp->t_flags |= flags; 399 tp->t_flags |= flags; 464 } 400 } 465 401 466 /* 402 /* 467 * xfs_trans_apply_sb_deltas() is called from 403 * xfs_trans_apply_sb_deltas() is called from the commit code 468 * to bring the superblock buffer into the cur 404 * to bring the superblock buffer into the current transaction 469 * and modify it as requested by earlier calls 405 * and modify it as requested by earlier calls to xfs_trans_mod_sb(). 470 * 406 * 471 * For now we just look at each field allowed 407 * For now we just look at each field allowed to change and change 472 * it if necessary. 408 * it if necessary. 473 */ 409 */ 474 STATIC void 410 STATIC void 475 xfs_trans_apply_sb_deltas( 411 xfs_trans_apply_sb_deltas( 476 xfs_trans_t *tp) 412 xfs_trans_t *tp) 477 { 413 { 478 struct xfs_dsb *sbp; !! 414 xfs_dsb_t *sbp; 479 struct xfs_buf *bp; !! 415 xfs_buf_t *bp; 480 int whole = 0; 416 int whole = 0; 481 417 482 bp = xfs_trans_getsb(tp); !! 418 bp = xfs_trans_getsb(tp, tp->t_mountp, 0); 483 sbp = bp->b_addr; !! 419 sbp = XFS_BUF_TO_SBP(bp); >> 420 >> 421 /* >> 422 * Check that superblock mods match the mods made to AGF counters. >> 423 */ >> 424 ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) == >> 425 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta + >> 426 tp->t_ag_btree_delta)); 484 427 485 /* 428 /* 486 * Only update the superblock counters 429 * Only update the superblock counters if we are logging them 487 */ 430 */ 488 if (!xfs_has_lazysbcount((tp->t_mountp !! 431 if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) { 489 if (tp->t_icount_delta) 432 if (tp->t_icount_delta) 490 be64_add_cpu(&sbp->sb_ 433 be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta); 491 if (tp->t_ifree_delta) 434 if (tp->t_ifree_delta) 492 be64_add_cpu(&sbp->sb_ 435 be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta); 493 if (tp->t_fdblocks_delta) 436 if (tp->t_fdblocks_delta) 494 be64_add_cpu(&sbp->sb_ 437 be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta); 495 if (tp->t_res_fdblocks_delta) 438 if (tp->t_res_fdblocks_delta) 496 be64_add_cpu(&sbp->sb_ 439 be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta); 497 } 440 } 498 441 499 /* !! 442 if (tp->t_frextents_delta) 500 * Updating frextents requires careful !! 443 be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta); 501 * behave like the lazysb counters bec !! 444 if (tp->t_res_frextents_delta) 502 * recovery in older kenels to recompu !! 445 be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta); 503 * This means that the ondisk frextent << 504 * rtbitmap. << 505 * << 506 * Therefore, log the frextents change << 507 * update the incore superblock so tha << 508 * write the correct value ondisk. << 509 * << 510 * Don't touch m_frextents because it << 511 * and those are handled by the unrese << 512 */ << 513 if (tp->t_frextents_delta || tp->t_res << 514 struct xfs_mount *mp = << 515 int64_t rtxdel << 516 << 517 rtxdelta = tp->t_frextents_del << 518 << 519 spin_lock(&mp->m_sb_lock); << 520 be64_add_cpu(&sbp->sb_frextent << 521 mp->m_sb.sb_frextents += rtxde << 522 spin_unlock(&mp->m_sb_lock); << 523 } << 524 446 525 if (tp->t_dblocks_delta) { 447 if (tp->t_dblocks_delta) { 526 be64_add_cpu(&sbp->sb_dblocks, 448 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta); 527 whole = 1; 449 whole = 1; 528 } 450 } 529 if (tp->t_agcount_delta) { 451 if (tp->t_agcount_delta) { 530 be32_add_cpu(&sbp->sb_agcount, 452 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta); 531 whole = 1; 453 whole = 1; 532 } 454 } 533 if (tp->t_imaxpct_delta) { 455 if (tp->t_imaxpct_delta) { 534 sbp->sb_imax_pct += tp->t_imax 456 sbp->sb_imax_pct += tp->t_imaxpct_delta; 535 whole = 1; 457 whole = 1; 536 } 458 } 537 if (tp->t_rextsize_delta) { 459 if (tp->t_rextsize_delta) { 538 be32_add_cpu(&sbp->sb_rextsize 460 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta); 539 whole = 1; 461 whole = 1; 540 } 462 } 541 if (tp->t_rbmblocks_delta) { 463 if (tp->t_rbmblocks_delta) { 542 be32_add_cpu(&sbp->sb_rbmblock 464 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta); 543 whole = 1; 465 whole = 1; 544 } 466 } 545 if (tp->t_rblocks_delta) { 467 if (tp->t_rblocks_delta) { 546 be64_add_cpu(&sbp->sb_rblocks, 468 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta); 547 whole = 1; 469 whole = 1; 548 } 470 } 549 if (tp->t_rextents_delta) { 471 if (tp->t_rextents_delta) { 550 be64_add_cpu(&sbp->sb_rextents 472 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta); 551 whole = 1; 473 whole = 1; 552 } 474 } 553 if (tp->t_rextslog_delta) { 475 if (tp->t_rextslog_delta) { 554 sbp->sb_rextslog += tp->t_rext 476 sbp->sb_rextslog += tp->t_rextslog_delta; 555 whole = 1; 477 whole = 1; 556 } 478 } 557 479 558 xfs_trans_buf_set_type(tp, bp, XFS_BLF 480 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); 559 if (whole) 481 if (whole) 560 /* 482 /* 561 * Log the whole thing, the fi 483 * Log the whole thing, the fields are noncontiguous. 562 */ 484 */ 563 xfs_trans_log_buf(tp, bp, 0, s !! 485 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1); 564 else 486 else 565 /* 487 /* 566 * Since all the modifiable fi 488 * Since all the modifiable fields are contiguous, we 567 * can get away with this. 489 * can get away with this. 568 */ 490 */ 569 xfs_trans_log_buf(tp, bp, offs !! 491 xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount), 570 offsetof(str !! 492 offsetof(xfs_dsb_t, sb_frextents) + 571 sizeof(sbp-> 493 sizeof(sbp->sb_frextents) - 1); 572 } 494 } 573 495 >> 496 STATIC int >> 497 xfs_sb_mod8( >> 498 uint8_t *field, >> 499 int8_t delta) >> 500 { >> 501 int8_t counter = *field; >> 502 >> 503 counter += delta; >> 504 if (counter < 0) { >> 505 ASSERT(0); >> 506 return -EINVAL; >> 507 } >> 508 *field = counter; >> 509 return 0; >> 510 } >> 511 >> 512 STATIC int >> 513 xfs_sb_mod32( >> 514 uint32_t *field, >> 515 int32_t delta) >> 516 { >> 517 int32_t counter = *field; >> 518 >> 519 counter += delta; >> 520 if (counter < 0) { >> 521 ASSERT(0); >> 522 return -EINVAL; >> 523 } >> 524 *field = counter; >> 525 return 0; >> 526 } >> 527 >> 528 STATIC int >> 529 xfs_sb_mod64( >> 530 uint64_t *field, >> 531 int64_t delta) >> 532 { >> 533 int64_t counter = *field; >> 534 >> 535 counter += delta; >> 536 if (counter < 0) { >> 537 ASSERT(0); >> 538 return -EINVAL; >> 539 } >> 540 *field = counter; >> 541 return 0; >> 542 } >> 543 574 /* 544 /* 575 * xfs_trans_unreserve_and_mod_sb() is called !! 545 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations 576 * apply superblock counter changes to the in- !! 546 * and apply superblock counter changes to the in-core superblock. The 577 * t_res_fdblocks_delta and t_res_frextents_de 547 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT 578 * applied to the in-core superblock. The ide 548 * applied to the in-core superblock. The idea is that that has already been 579 * done. 549 * done. 580 * 550 * 581 * If we are not logging superblock counters, 551 * If we are not logging superblock counters, then the inode allocated/free and 582 * used block counts are not updated in the on 552 * used block counts are not updated in the on disk superblock. In this case, 583 * XFS_TRANS_SB_DIRTY will not be set when the 553 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we 584 * still need to update the incore superblock 554 * still need to update the incore superblock with the changes. 585 * << 586 * Deltas for the inode count are +/-64, hence << 587 * so we don't need to take the counter lock o << 588 */ 555 */ 589 #define XFS_ICOUNT_BATCH 128 << 590 << 591 void 556 void 592 xfs_trans_unreserve_and_mod_sb( 557 xfs_trans_unreserve_and_mod_sb( 593 struct xfs_trans *tp) 558 struct xfs_trans *tp) 594 { 559 { 595 struct xfs_mount *mp = tp->t_mo 560 struct xfs_mount *mp = tp->t_mountp; 596 int64_t blkdelta = tp- !! 561 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 597 int64_t rtxdelta = tp- !! 562 int64_t blkdelta = 0; >> 563 int64_t rtxdelta = 0; 598 int64_t idelta = 0; 564 int64_t idelta = 0; 599 int64_t ifreedelta = 0 565 int64_t ifreedelta = 0; >> 566 int error; 600 567 601 /* !! 568 /* calculate deltas */ 602 * Calculate the deltas. !! 569 if (tp->t_blk_res > 0) 603 * !! 570 blkdelta = tp->t_blk_res; 604 * t_fdblocks_delta and t_frextents_de !! 571 if ((tp->t_fdblocks_delta != 0) && 605 * !! 572 (xfs_sb_version_haslazysbcount(&mp->m_sb) || 606 * - positive values indicate blocks !! 573 (tp->t_flags & XFS_TRANS_SB_DIRTY))) 607 * - negative values indicate blocks << 608 * << 609 * Negative values can only happen if << 610 * reservation that covers the allocat << 611 * that the calculated delta values mu << 612 * can only put back previous allocate << 613 */ << 614 ASSERT(tp->t_blk_res || tp->t_fdblocks << 615 if (xfs_has_lazysbcount(mp) || (tp->t_ << 616 blkdelta += tp->t_fdblocks_del 574 blkdelta += tp->t_fdblocks_delta; 617 ASSERT(blkdelta >= 0); << 618 } << 619 575 620 ASSERT(tp->t_rtx_res || tp->t_frextent !! 576 if (tp->t_rtx_res > 0) 621 if (tp->t_flags & XFS_TRANS_SB_DIRTY) !! 577 rtxdelta = tp->t_rtx_res; >> 578 if ((tp->t_frextents_delta != 0) && >> 579 (tp->t_flags & XFS_TRANS_SB_DIRTY)) 622 rtxdelta += tp->t_frextents_de 580 rtxdelta += tp->t_frextents_delta; 623 ASSERT(rtxdelta >= 0); << 624 } << 625 581 626 if (xfs_has_lazysbcount(mp) || (tp->t_ !! 582 if (xfs_sb_version_haslazysbcount(&mp->m_sb) || >> 583 (tp->t_flags & XFS_TRANS_SB_DIRTY)) { 627 idelta = tp->t_icount_delta; 584 idelta = tp->t_icount_delta; 628 ifreedelta = tp->t_ifree_delta 585 ifreedelta = tp->t_ifree_delta; 629 } 586 } 630 587 631 /* apply the per-cpu counters */ 588 /* apply the per-cpu counters */ 632 if (blkdelta) !! 589 if (blkdelta) { 633 xfs_add_fdblocks(mp, blkdelta) !! 590 error = xfs_mod_fdblocks(mp, blkdelta, rsvd); 634 !! 591 if (error) 635 if (idelta) !! 592 goto out; 636 percpu_counter_add_batch(&mp-> !! 593 } 637 XFS_I << 638 594 639 if (ifreedelta) !! 595 if (idelta) { 640 percpu_counter_add(&mp->m_ifre !! 596 error = xfs_mod_icount(mp, idelta); >> 597 if (error) >> 598 goto out_undo_fdblocks; >> 599 } 641 600 642 if (rtxdelta) !! 601 if (ifreedelta) { 643 xfs_add_frextents(mp, rtxdelta !! 602 error = xfs_mod_ifree(mp, ifreedelta); >> 603 if (error) >> 604 goto out_undo_icount; >> 605 } 644 606 645 if (!(tp->t_flags & XFS_TRANS_SB_DIRTY !! 607 if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY)) 646 return; 608 return; 647 609 648 /* apply remaining deltas */ 610 /* apply remaining deltas */ 649 spin_lock(&mp->m_sb_lock); 611 spin_lock(&mp->m_sb_lock); 650 mp->m_sb.sb_fdblocks += tp->t_fdblocks !! 612 if (rtxdelta) { 651 mp->m_sb.sb_icount += idelta; !! 613 error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta); 652 mp->m_sb.sb_ifree += ifreedelta; !! 614 if (error) 653 /* !! 615 goto out_undo_ifree; 654 * Do not touch sb_frextents here beca !! 616 } 655 * reservation. sb_frextents is not p !! 617 656 * must be consistent with the ondisk !! 618 if (tp->t_dblocks_delta != 0) { 657 * incore reservations. !! 619 error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta); 658 */ !! 620 if (error) 659 mp->m_sb.sb_dblocks += tp->t_dblocks_d !! 621 goto out_undo_frextents; 660 mp->m_sb.sb_agcount += tp->t_agcount_d !! 622 } 661 mp->m_sb.sb_imax_pct += tp->t_imaxpct_ !! 623 if (tp->t_agcount_delta != 0) { 662 mp->m_sb.sb_rextsize += tp->t_rextsize !! 624 error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta); 663 if (tp->t_rextsize_delta) { !! 625 if (error) 664 mp->m_rtxblklog = log2_if_powe !! 626 goto out_undo_dblocks; 665 mp->m_rtxblkmask = mask64_if_p !! 627 } >> 628 if (tp->t_imaxpct_delta != 0) { >> 629 error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta); >> 630 if (error) >> 631 goto out_undo_agcount; >> 632 } >> 633 if (tp->t_rextsize_delta != 0) { >> 634 error = xfs_sb_mod32(&mp->m_sb.sb_rextsize, >> 635 tp->t_rextsize_delta); >> 636 if (error) >> 637 goto out_undo_imaxpct; >> 638 } >> 639 if (tp->t_rbmblocks_delta != 0) { >> 640 error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, >> 641 tp->t_rbmblocks_delta); >> 642 if (error) >> 643 goto out_undo_rextsize; >> 644 } >> 645 if (tp->t_rblocks_delta != 0) { >> 646 error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta); >> 647 if (error) >> 648 goto out_undo_rbmblocks; >> 649 } >> 650 if (tp->t_rextents_delta != 0) { >> 651 error = xfs_sb_mod64(&mp->m_sb.sb_rextents, >> 652 tp->t_rextents_delta); >> 653 if (error) >> 654 goto out_undo_rblocks; >> 655 } >> 656 if (tp->t_rextslog_delta != 0) { >> 657 error = xfs_sb_mod8(&mp->m_sb.sb_rextslog, >> 658 tp->t_rextslog_delta); >> 659 if (error) >> 660 goto out_undo_rextents; 666 } 661 } 667 mp->m_sb.sb_rbmblocks += tp->t_rbmbloc << 668 mp->m_sb.sb_rblocks += tp->t_rblocks_d << 669 mp->m_sb.sb_rextents += tp->t_rextents << 670 mp->m_sb.sb_rextslog += tp->t_rextslog << 671 spin_unlock(&mp->m_sb_lock); 662 spin_unlock(&mp->m_sb_lock); >> 663 return; 672 664 673 /* !! 665 out_undo_rextents: 674 * Debug checks outside of the spinloc !! 666 if (tp->t_rextents_delta) 675 * machine if they fail. !! 667 xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta); 676 */ !! 668 out_undo_rblocks: 677 ASSERT(mp->m_sb.sb_imax_pct >= 0); !! 669 if (tp->t_rblocks_delta) 678 ASSERT(mp->m_sb.sb_rextslog >= 0); !! 670 xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta); >> 671 out_undo_rbmblocks: >> 672 if (tp->t_rbmblocks_delta) >> 673 xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta); >> 674 out_undo_rextsize: >> 675 if (tp->t_rextsize_delta) >> 676 xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta); >> 677 out_undo_imaxpct: >> 678 if (tp->t_rextsize_delta) >> 679 xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta); >> 680 out_undo_agcount: >> 681 if (tp->t_agcount_delta) >> 682 xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta); >> 683 out_undo_dblocks: >> 684 if (tp->t_dblocks_delta) >> 685 xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta); >> 686 out_undo_frextents: >> 687 if (rtxdelta) >> 688 xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta); >> 689 out_undo_ifree: >> 690 spin_unlock(&mp->m_sb_lock); >> 691 if (ifreedelta) >> 692 xfs_mod_ifree(mp, -ifreedelta); >> 693 out_undo_icount: >> 694 if (idelta) >> 695 xfs_mod_icount(mp, -idelta); >> 696 out_undo_fdblocks: >> 697 if (blkdelta) >> 698 xfs_mod_fdblocks(mp, -blkdelta, rsvd); >> 699 out: >> 700 ASSERT(error == 0); >> 701 return; 679 } 702 } 680 703 681 /* Add the given log item to the transaction's !! 704 /* >> 705 * Add the given log item to the transaction's list of log items. >> 706 * >> 707 * The log item will now point to its new descriptor with its li_desc field. >> 708 */ 682 void 709 void 683 xfs_trans_add_item( 710 xfs_trans_add_item( 684 struct xfs_trans *tp, 711 struct xfs_trans *tp, 685 struct xfs_log_item *lip) 712 struct xfs_log_item *lip) 686 { 713 { 687 ASSERT(lip->li_log == tp->t_mountp->m_ !! 714 struct xfs_log_item_desc *lidp; >> 715 >> 716 ASSERT(lip->li_mountp == tp->t_mountp); 688 ASSERT(lip->li_ailp == tp->t_mountp->m 717 ASSERT(lip->li_ailp == tp->t_mountp->m_ail); 689 ASSERT(list_empty(&lip->li_trans)); << 690 ASSERT(!test_bit(XFS_LI_DIRTY, &lip->l << 691 718 692 list_add_tail(&lip->li_trans, &tp->t_i !! 719 lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS); 693 trace_xfs_trans_add_item(tp, _RET_IP_) !! 720 >> 721 lidp->lid_item = lip; >> 722 lidp->lid_flags = 0; >> 723 list_add_tail(&lidp->lid_trans, &tp->t_items); >> 724 >> 725 lip->li_desc = lidp; >> 726 } >> 727 >> 728 STATIC void >> 729 xfs_trans_free_item_desc( >> 730 struct xfs_log_item_desc *lidp) >> 731 { >> 732 list_del_init(&lidp->lid_trans); >> 733 kmem_zone_free(xfs_log_item_desc_zone, lidp); 694 } 734 } 695 735 696 /* 736 /* 697 * Unlink the log item from the transaction. t !! 737 * Unlink and free the given descriptor. 698 * considered dirty in this transaction, as th << 699 * finished, either by abort or commit complet << 700 */ 738 */ 701 void 739 void 702 xfs_trans_del_item( 740 xfs_trans_del_item( 703 struct xfs_log_item *lip) 741 struct xfs_log_item *lip) 704 { 742 { 705 clear_bit(XFS_LI_DIRTY, &lip->li_flags !! 743 xfs_trans_free_item_desc(lip->li_desc); 706 list_del_init(&lip->li_trans); !! 744 lip->li_desc = NULL; 707 } 745 } 708 746 709 /* Detach and unlock all of the items in a tra !! 747 /* 710 static void !! 748 * Unlock all of the items of a transaction and free all the descriptors >> 749 * of that transaction. >> 750 */ >> 751 void 711 xfs_trans_free_items( 752 xfs_trans_free_items( 712 struct xfs_trans *tp, 753 struct xfs_trans *tp, >> 754 xfs_lsn_t commit_lsn, 713 bool abort) 755 bool abort) 714 { 756 { 715 struct xfs_log_item *lip, *next; !! 757 struct xfs_log_item_desc *lidp, *next; 716 758 717 trace_xfs_trans_free_items(tp, _RET_IP !! 759 list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) { >> 760 struct xfs_log_item *lip = lidp->lid_item; 718 761 719 list_for_each_entry_safe(lip, next, &t !! 762 lip->li_desc = NULL; 720 xfs_trans_del_item(lip); !! 763 >> 764 if (commit_lsn != NULLCOMMITLSN) >> 765 lip->li_ops->iop_committing(lip, commit_lsn); 721 if (abort) 766 if (abort) 722 set_bit(XFS_LI_ABORTED !! 767 lip->li_flags |= XFS_LI_ABORTED; 723 if (lip->li_ops->iop_release) !! 768 lip->li_ops->iop_unlock(lip); 724 lip->li_ops->iop_relea !! 769 >> 770 xfs_trans_free_item_desc(lidp); 725 } 771 } 726 } 772 } 727 773 728 /* !! 774 static inline void 729 * Sort transaction items prior to running pre !! 775 xfs_log_item_batch_insert( 730 * attempt to order the items such that they w !! 776 struct xfs_ail *ailp, 731 * order. Items that have no sort function are !! 777 struct xfs_ail_cursor *cur, 732 * and so are locked last. !! 778 struct xfs_log_item **log_items, 733 * !! 779 int nr_items, 734 * This may need refinement as different types !! 780 xfs_lsn_t commit_lsn) 735 * !! 781 { 736 * Function is more complex than it needs to b !! 782 int i; 737 * values and the function only returns 32 bit << 738 */ << 739 static int << 740 xfs_trans_precommit_sort( << 741 void *unused_arg, << 742 const struct list_head *a, << 743 const struct list_head *b) << 744 { << 745 struct xfs_log_item *lia = contain << 746 struct << 747 struct xfs_log_item *lib = contain << 748 struct << 749 int64_t diff; << 750 783 751 /* !! 784 spin_lock(&ailp->xa_lock); 752 * If both items are non-sortable, lea !! 785 /* xfs_trans_ail_update_bulk drops ailp->xa_lock */ 753 * sortable, move the non-sortable ite !! 786 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn); 754 */ !! 787 755 if (!lia->li_ops->iop_sort && !lib->li !! 788 for (i = 0; i < nr_items; i++) { 756 return 0; !! 789 struct xfs_log_item *lip = log_items[i]; 757 if (!lia->li_ops->iop_sort) !! 790 758 return 1; !! 791 lip->li_ops->iop_unpin(lip, 0); 759 if (!lib->li_ops->iop_sort) !! 792 } 760 return -1; << 761 << 762 diff = lia->li_ops->iop_sort(lia) - li << 763 if (diff < 0) << 764 return -1; << 765 if (diff > 0) << 766 return 1; << 767 return 0; << 768 } 793 } 769 794 770 /* 795 /* 771 * Run transaction precommit functions. !! 796 * Bulk operation version of xfs_trans_committed that takes a log vector of >> 797 * items to insert into the AIL. This uses bulk AIL insertion techniques to >> 798 * minimise lock traffic. >> 799 * >> 800 * If we are called with the aborted flag set, it is because a log write during >> 801 * a CIL checkpoint commit has failed. In this case, all the items in the >> 802 * checkpoint have already gone through iop_commited and iop_unlock, which >> 803 * means that checkpoint commit abort handling is treated exactly the same >> 804 * as an iclog write error even though we haven't started any IO yet. Hence in >> 805 * this case all we need to do is iop_committed processing, followed by an >> 806 * iop_unpin(aborted) call. 772 * 807 * 773 * If there is an error in any of the callouts !! 808 * The AIL cursor is used to optimise the insert process. If commit_lsn is not 774 * trigger a shutdown to abort the transaction !! 809 * at the end of the AIL, the insert cursor avoids the need to walk 775 * from errors at this point as the transactio !! 810 * the AIL to find the insertion point on every xfs_log_item_batch_insert() >> 811 * call. This saves a lot of needless list walking and is a net win, even >> 812 * though it slightly increases that amount of AIL lock traffic to set it up >> 813 * and tear it down. 776 */ 814 */ 777 static int !! 815 void 778 xfs_trans_run_precommits( !! 816 xfs_trans_committed_bulk( 779 struct xfs_trans *tp) !! 817 struct xfs_ail *ailp, 780 { !! 818 struct xfs_log_vec *log_vector, 781 struct xfs_mount *mp = tp->t_mo !! 819 xfs_lsn_t commit_lsn, 782 struct xfs_log_item *lip, *n; !! 820 int aborted) 783 int error = 0; !! 821 { >> 822 #define LOG_ITEM_BATCH_SIZE 32 >> 823 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE]; >> 824 struct xfs_log_vec *lv; >> 825 struct xfs_ail_cursor cur; >> 826 int i = 0; >> 827 >> 828 spin_lock(&ailp->xa_lock); >> 829 xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn); >> 830 spin_unlock(&ailp->xa_lock); >> 831 >> 832 /* unpin all the log items */ >> 833 for (lv = log_vector; lv; lv = lv->lv_next ) { >> 834 struct xfs_log_item *lip = lv->lv_item; >> 835 xfs_lsn_t item_lsn; >> 836 >> 837 if (aborted) >> 838 lip->li_flags |= XFS_LI_ABORTED; >> 839 item_lsn = lip->li_ops->iop_committed(lip, commit_lsn); 784 840 785 /* !! 841 /* item_lsn of -1 means the item needs no further processing */ 786 * Sort the item list to avoid ABBA de !! 842 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) 787 * running precommit operations that l !! 843 continue; 788 * inode cluster buffers. << 789 */ << 790 list_sort(NULL, &tp->t_items, xfs_tran << 791 844 792 /* !! 845 /* 793 * Precommit operations can remove the !! 846 * if we are aborting the operation, no point in inserting the 794 * if the log item exists purely to de !! 847 * object into the AIL as we are in a shutdown situation. 795 * can be ordered against other operat !! 848 */ 796 * list_for_each_entry_safe() here. !! 849 if (aborted) { 797 */ !! 850 ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount)); 798 list_for_each_entry_safe(lip, n, &tp-> !! 851 lip->li_ops->iop_unpin(lip, 1); 799 if (!test_bit(XFS_LI_DIRTY, &l !! 852 continue; >> 853 } >> 854 >> 855 if (item_lsn != commit_lsn) { >> 856 >> 857 /* >> 858 * Not a bulk update option due to unusual item_lsn. >> 859 * Push into AIL immediately, rechecking the lsn once >> 860 * we have the ail lock. Then unpin the item. This does >> 861 * not affect the AIL cursor the bulk insert path is >> 862 * using. >> 863 */ >> 864 spin_lock(&ailp->xa_lock); >> 865 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) >> 866 xfs_trans_ail_update(ailp, lip, item_lsn); >> 867 else >> 868 spin_unlock(&ailp->xa_lock); >> 869 lip->li_ops->iop_unpin(lip, 0); 800 continue; 870 continue; 801 if (lip->li_ops->iop_precommit !! 871 } 802 error = lip->li_ops->i !! 872 803 if (error) !! 873 /* Item is a candidate for bulk AIL insert. */ 804 break; !! 874 log_items[i++] = lv->lv_item; >> 875 if (i >= LOG_ITEM_BATCH_SIZE) { >> 876 xfs_log_item_batch_insert(ailp, &cur, log_items, >> 877 LOG_ITEM_BATCH_SIZE, commit_lsn); >> 878 i = 0; 805 } 879 } 806 } 880 } 807 if (error) !! 881 808 xfs_force_shutdown(mp, SHUTDOW !! 882 /* make sure we insert the remainder! */ 809 return error; !! 883 if (i) >> 884 xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn); >> 885 >> 886 spin_lock(&ailp->xa_lock); >> 887 xfs_trans_ail_cursor_done(&cur); >> 888 spin_unlock(&ailp->xa_lock); 810 } 889 } 811 890 812 /* 891 /* 813 * Commit the given transaction to the log. 892 * Commit the given transaction to the log. 814 * 893 * 815 * XFS disk error handling mechanism is not ba 894 * XFS disk error handling mechanism is not based on a typical 816 * transaction abort mechanism. Logically afte 895 * transaction abort mechanism. Logically after the filesystem 817 * gets marked 'SHUTDOWN', we can't let any ne 896 * gets marked 'SHUTDOWN', we can't let any new transactions 818 * be durable - ie. committed to disk - becaus 897 * be durable - ie. committed to disk - because some metadata might 819 * be inconsistent. In such cases, this return 898 * be inconsistent. In such cases, this returns an error, and the 820 * caller may assume that all locked objects j 899 * caller may assume that all locked objects joined to the transaction 821 * have already been unlocked as if the commit 900 * have already been unlocked as if the commit had succeeded. 822 * Do not reference the transaction structure 901 * Do not reference the transaction structure after this call. 823 */ 902 */ 824 static int 903 static int 825 __xfs_trans_commit( 904 __xfs_trans_commit( 826 struct xfs_trans *tp, 905 struct xfs_trans *tp, 827 bool regrant) 906 bool regrant) 828 { 907 { 829 struct xfs_mount *mp = tp->t_mo 908 struct xfs_mount *mp = tp->t_mountp; 830 struct xlog *log = mp->m_l !! 909 xfs_lsn_t commit_lsn = -1; 831 xfs_csn_t commit_seq = 0 << 832 int error = 0; 910 int error = 0; 833 int sync = tp->t_f 911 int sync = tp->t_flags & XFS_TRANS_SYNC; 834 912 835 trace_xfs_trans_commit(tp, _RET_IP_); << 836 << 837 error = xfs_trans_run_precommits(tp); << 838 if (error) { << 839 if (tp->t_flags & XFS_TRANS_PE << 840 xfs_defer_cancel(tp); << 841 goto out_unreserve; << 842 } << 843 << 844 /* << 845 * Finish deferred items on final comm << 846 * should ever have deferred ops. << 847 */ << 848 WARN_ON_ONCE(!list_empty(&tp->t_dfops) << 849 !(tp->t_flags & XFS_TRANS << 850 if (!regrant && (tp->t_flags & XFS_TRA << 851 error = xfs_defer_finish_norol << 852 if (error) << 853 goto out_unreserve; << 854 << 855 /* Run precommits from final t << 856 error = xfs_trans_run_precommi << 857 if (error) << 858 goto out_unreserve; << 859 } << 860 << 861 /* 913 /* 862 * If there is nothing to be logged by 914 * If there is nothing to be logged by the transaction, 863 * then unlock all of the items associ 915 * then unlock all of the items associated with the 864 * transaction and free the transactio 916 * transaction and free the transaction structure. 865 * Also make sure to return any reserv 917 * Also make sure to return any reserved blocks to 866 * the free pool. 918 * the free pool. 867 */ 919 */ 868 if (!(tp->t_flags & XFS_TRANS_DIRTY)) 920 if (!(tp->t_flags & XFS_TRANS_DIRTY)) 869 goto out_unreserve; 921 goto out_unreserve; 870 922 871 /* !! 923 if (XFS_FORCED_SHUTDOWN(mp)) { 872 * We must check against log shutdown << 873 * items and leave them dirty, inconsi << 874 * the log is active. This leaves them << 875 * disk, and that will lead to on-disk << 876 */ << 877 if (xlog_is_shutdown(log)) { << 878 error = -EIO; 924 error = -EIO; 879 goto out_unreserve; 925 goto out_unreserve; 880 } 926 } 881 927 882 ASSERT(tp->t_ticket != NULL); 928 ASSERT(tp->t_ticket != NULL); 883 929 884 /* 930 /* 885 * If we need to update the superblock 931 * If we need to update the superblock, then do it now. 886 */ 932 */ 887 if (tp->t_flags & XFS_TRANS_SB_DIRTY) 933 if (tp->t_flags & XFS_TRANS_SB_DIRTY) 888 xfs_trans_apply_sb_deltas(tp); 934 xfs_trans_apply_sb_deltas(tp); 889 xfs_trans_apply_dquot_deltas(tp); 935 xfs_trans_apply_dquot_deltas(tp); 890 936 891 xlog_cil_commit(log, tp, &commit_seq, !! 937 xfs_log_commit_cil(mp, tp, &commit_lsn, regrant); 892 938 >> 939 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); 893 xfs_trans_free(tp); 940 xfs_trans_free(tp); 894 941 895 /* 942 /* 896 * If the transaction needs to be sync 943 * If the transaction needs to be synchronous, then force the 897 * log out now and wait for it. 944 * log out now and wait for it. 898 */ 945 */ 899 if (sync) { 946 if (sync) { 900 error = xfs_log_force_seq(mp, !! 947 error = _xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL); 901 XFS_STATS_INC(mp, xs_trans_syn 948 XFS_STATS_INC(mp, xs_trans_sync); 902 } else { 949 } else { 903 XFS_STATS_INC(mp, xs_trans_asy 950 XFS_STATS_INC(mp, xs_trans_async); 904 } 951 } 905 952 906 return error; 953 return error; 907 954 908 out_unreserve: 955 out_unreserve: 909 xfs_trans_unreserve_and_mod_sb(tp); 956 xfs_trans_unreserve_and_mod_sb(tp); 910 957 911 /* 958 /* 912 * It is indeed possible for the trans 959 * It is indeed possible for the transaction to be not dirty but 913 * the dqinfo portion to be. All that 960 * the dqinfo portion to be. All that means is that we have some 914 * (non-persistent) quota reservations 961 * (non-persistent) quota reservations that need to be unreserved. 915 */ 962 */ 916 xfs_trans_unreserve_and_mod_dquots(tp) 963 xfs_trans_unreserve_and_mod_dquots(tp); 917 if (tp->t_ticket) { 964 if (tp->t_ticket) { 918 if (regrant && !xlog_is_shutdo !! 965 commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant); 919 xfs_log_ticket_regrant !! 966 if (commit_lsn == -1 && !error) 920 else !! 967 error = -EIO; 921 xfs_log_ticket_ungrant << 922 tp->t_ticket = NULL; << 923 } 968 } 924 xfs_trans_free_items(tp, !!error); !! 969 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); >> 970 xfs_trans_free_items(tp, NULLCOMMITLSN, !!error); 925 xfs_trans_free(tp); 971 xfs_trans_free(tp); 926 972 927 XFS_STATS_INC(mp, xs_trans_empty); 973 XFS_STATS_INC(mp, xs_trans_empty); 928 return error; 974 return error; 929 } 975 } 930 976 931 int 977 int 932 xfs_trans_commit( 978 xfs_trans_commit( 933 struct xfs_trans *tp) 979 struct xfs_trans *tp) 934 { 980 { 935 return __xfs_trans_commit(tp, false); 981 return __xfs_trans_commit(tp, false); 936 } 982 } 937 983 938 /* 984 /* 939 * Unlock all of the transaction's items and f !! 985 * Unlock all of the transaction's items and free the transaction. 940 * transaction is dirty, we must shut down the !! 986 * The transaction must not have modified any of its items, because 941 * way to restore them to their previous state !! 987 * there is no way to restore them to their previous state. 942 * << 943 * If the transaction has made a log reservati << 944 * well. << 945 * 988 * 946 * This is a high level function (equivalent t !! 989 * If the transaction has made a log reservation, make sure to release 947 * be called after the transaction has effecti !! 990 * it as well. 948 * being shut down. However, if the mount has << 949 * transaction is dirty we will shut the mount << 950 * guarantees that the log is shut down, too. << 951 * careful with shutdown state and dirty items << 952 * xfs_trans_commit(). << 953 */ 991 */ 954 void 992 void 955 xfs_trans_cancel( 993 xfs_trans_cancel( 956 struct xfs_trans *tp) 994 struct xfs_trans *tp) 957 { 995 { 958 struct xfs_mount *mp = tp->t_mo 996 struct xfs_mount *mp = tp->t_mountp; 959 struct xlog *log = mp->m_l << 960 bool dirty = (tp->t 997 bool dirty = (tp->t_flags & XFS_TRANS_DIRTY); 961 998 962 trace_xfs_trans_cancel(tp, _RET_IP_); << 963 << 964 /* << 965 * It's never valid to cancel a transa << 966 * because the transaction is effectiv << 967 * loudly before freeing the in-memory << 968 * filesystem. << 969 */ << 970 if (!list_empty(&tp->t_dfops)) { << 971 ASSERT(tp->t_flags & XFS_TRANS << 972 dirty = true; << 973 xfs_defer_cancel(tp); << 974 } << 975 << 976 /* 999 /* 977 * See if the caller is relying on us !! 1000 * See if the caller is relying on us to shut down the 978 * only want an error report if there !! 1001 * filesystem. This happens in paths where we detect 979 * progress, so we only need to check !! 1002 * corruption and decide to give up. 980 * here. << 981 */ 1003 */ 982 if (dirty && !xfs_is_shutdown(mp)) { !! 1004 if (dirty && !XFS_FORCED_SHUTDOWN(mp)) { 983 XFS_ERROR_REPORT("xfs_trans_ca 1005 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); 984 xfs_force_shutdown(mp, SHUTDOW 1006 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 985 } 1007 } 986 #ifdef DEBUG 1008 #ifdef DEBUG 987 /* Log items need to be consistent unt !! 1009 if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) { 988 if (!dirty && !xlog_is_shutdown(log)) !! 1010 struct xfs_log_item_desc *lidp; 989 struct xfs_log_item *lip; << 990 1011 991 list_for_each_entry(lip, &tp-> !! 1012 list_for_each_entry(lidp, &tp->t_items, lid_trans) 992 ASSERT(!xlog_item_is_i !! 1013 ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD)); 993 } 1014 } 994 #endif 1015 #endif 995 xfs_trans_unreserve_and_mod_sb(tp); 1016 xfs_trans_unreserve_and_mod_sb(tp); 996 xfs_trans_unreserve_and_mod_dquots(tp) 1017 xfs_trans_unreserve_and_mod_dquots(tp); 997 1018 998 if (tp->t_ticket) { !! 1019 if (tp->t_ticket) 999 xfs_log_ticket_ungrant(log, tp !! 1020 xfs_log_done(mp, tp->t_ticket, NULL, false); 1000 tp->t_ticket = NULL; !! 1021 1001 } !! 1022 /* mark this thread as no longer being in a transaction */ >> 1023 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); 1002 1024 1003 xfs_trans_free_items(tp, dirty); !! 1025 xfs_trans_free_items(tp, NULLCOMMITLSN, dirty); 1004 xfs_trans_free(tp); 1026 xfs_trans_free(tp); 1005 } 1027 } 1006 1028 1007 /* 1029 /* 1008 * Roll from one trans in the sequence of PER 1030 * Roll from one trans in the sequence of PERMANENT transactions to 1009 * the next: permanent transactions are only 1031 * the next: permanent transactions are only flushed out when 1010 * committed with xfs_trans_commit(), but we 1032 * committed with xfs_trans_commit(), but we still want as soon 1011 * as possible to let chunks of it go to the 1033 * as possible to let chunks of it go to the log. So we commit the 1012 * chunk we've been working on and get a new 1034 * chunk we've been working on and get a new transaction to continue. 1013 */ 1035 */ 1014 int 1036 int 1015 xfs_trans_roll( 1037 xfs_trans_roll( 1016 struct xfs_trans **tpp) 1038 struct xfs_trans **tpp) 1017 { 1039 { 1018 struct xfs_trans *trans = *tpp 1040 struct xfs_trans *trans = *tpp; 1019 struct xfs_trans_res tres; 1041 struct xfs_trans_res tres; 1020 int error; 1042 int error; 1021 1043 1022 trace_xfs_trans_roll(trans, _RET_IP_) << 1023 << 1024 /* 1044 /* 1025 * Copy the critical parameters from 1045 * Copy the critical parameters from one trans to the next. 1026 */ 1046 */ 1027 tres.tr_logres = trans->t_log_res; 1047 tres.tr_logres = trans->t_log_res; 1028 tres.tr_logcount = trans->t_log_count 1048 tres.tr_logcount = trans->t_log_count; 1029 1049 1030 *tpp = xfs_trans_dup(trans); 1050 *tpp = xfs_trans_dup(trans); 1031 1051 1032 /* 1052 /* 1033 * Commit the current transaction. 1053 * Commit the current transaction. 1034 * If this commit failed, then it'd j 1054 * If this commit failed, then it'd just unlock those items that 1035 * are not marked ihold. That also me 1055 * are not marked ihold. That also means that a filesystem shutdown 1036 * is in progress. The caller takes t 1056 * is in progress. The caller takes the responsibility to cancel 1037 * the duplicate transaction that get 1057 * the duplicate transaction that gets returned. 1038 */ 1058 */ 1039 error = __xfs_trans_commit(trans, tru 1059 error = __xfs_trans_commit(trans, true); 1040 if (error) 1060 if (error) 1041 return error; 1061 return error; 1042 1062 1043 /* 1063 /* 1044 * Reserve space in the log for the n 1064 * Reserve space in the log for the next transaction. 1045 * This also pushes items in the "AIL 1065 * This also pushes items in the "AIL", the list of logged items, 1046 * out to disk if they are taking up 1066 * out to disk if they are taking up space at the tail of the log 1047 * that we want to use. This require 1067 * that we want to use. This requires that either nothing be locked 1048 * across this call, or that anything 1068 * across this call, or that anything that is locked be logged in 1049 * the prior and the next transaction 1069 * the prior and the next transactions. 1050 */ 1070 */ 1051 tres.tr_logflags = XFS_TRANS_PERM_LOG 1071 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; 1052 return xfs_trans_reserve(*tpp, &tres, 1072 return xfs_trans_reserve(*tpp, &tres, 0, 0); 1053 } << 1054 << 1055 /* << 1056 * Allocate an transaction, lock and join the << 1057 * << 1058 * The caller must ensure that the on-disk dq << 1059 * already been allocated and initialized. T << 1060 * releasing ILOCK_EXCL if a new transaction << 1061 */ << 1062 int << 1063 xfs_trans_alloc_inode( << 1064 struct xfs_inode *ip, << 1065 struct xfs_trans_res *resv, << 1066 unsigned int dblocks, << 1067 unsigned int rblocks, << 1068 bool force, << 1069 struct xfs_trans **tpp) << 1070 { << 1071 struct xfs_trans *tp; << 1072 struct xfs_mount *mp = ip->i_m << 1073 bool retried = fal << 1074 int error; << 1075 << 1076 retry: << 1077 error = xfs_trans_alloc(mp, resv, dbl << 1078 xfs_extlen_to_rtxlen( << 1079 force ? XFS_TRANS_RES << 1080 if (error) << 1081 return error; << 1082 << 1083 xfs_ilock(ip, XFS_ILOCK_EXCL); << 1084 xfs_trans_ijoin(tp, ip, 0); << 1085 << 1086 error = xfs_qm_dqattach_locked(ip, fa << 1087 if (error) { << 1088 /* Caller should have allocat << 1089 ASSERT(error != -ENOENT); << 1090 goto out_cancel; << 1091 } << 1092 << 1093 error = xfs_trans_reserve_quota_nblks << 1094 if ((error == -EDQUOT || error == -EN << 1095 xfs_trans_cancel(tp); << 1096 xfs_iunlock(ip, XFS_ILOCK_EXC << 1097 xfs_blockgc_free_quota(ip, 0) << 1098 retried = true; << 1099 goto retry; << 1100 } << 1101 if (error) << 1102 goto out_cancel; << 1103 << 1104 *tpp = tp; << 1105 return 0; << 1106 << 1107 out_cancel: << 1108 xfs_trans_cancel(tp); << 1109 xfs_iunlock(ip, XFS_ILOCK_EXCL); << 1110 return error; << 1111 } << 1112 << 1113 /* << 1114 * Try to reserve more blocks for a transacti << 1115 * << 1116 * This is for callers that need to attach re << 1117 * those resources to determine the space res << 1118 * modify the attached resources. In other w << 1119 * fail due to ENOSPC, so the caller must be << 1120 * without shutting down the fs. << 1121 */ << 1122 int << 1123 xfs_trans_reserve_more( << 1124 struct xfs_trans *tp, << 1125 unsigned int blocks, << 1126 unsigned int rtextents) << 1127 { << 1128 struct xfs_trans_res resv = { }; << 1129 << 1130 return xfs_trans_reserve(tp, &resv, b << 1131 } << 1132 << 1133 /* << 1134 * Try to reserve more blocks and file quota << 1135 * conditions of usage as xfs_trans_reserve_m << 1136 */ << 1137 int << 1138 xfs_trans_reserve_more_inode( << 1139 struct xfs_trans *tp, << 1140 struct xfs_inode *ip, << 1141 unsigned int dblocks, << 1142 unsigned int rblocks, << 1143 bool force_quota) << 1144 { << 1145 struct xfs_trans_res resv = { }; << 1146 struct xfs_mount *mp = ip->i_m << 1147 unsigned int rtx = xfs_ext << 1148 int error; << 1149 << 1150 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL << 1151 << 1152 error = xfs_trans_reserve(tp, &resv, << 1153 if (error) << 1154 return error; << 1155 << 1156 if (!XFS_IS_QUOTA_ON(mp) || xfs_is_qu << 1157 return 0; << 1158 << 1159 if (tp->t_flags & XFS_TRANS_RESERVE) << 1160 force_quota = true; << 1161 << 1162 error = xfs_trans_reserve_quota_nblks << 1163 force_quota); << 1164 if (!error) << 1165 return 0; << 1166 << 1167 /* Quota failed, give back the new re << 1168 xfs_add_fdblocks(mp, dblocks); << 1169 tp->t_blk_res -= dblocks; << 1170 xfs_add_frextents(mp, rtx); << 1171 tp->t_rtx_res -= rtx; << 1172 return error; << 1173 } << 1174 << 1175 /* << 1176 * Allocate an transaction in preparation for << 1177 * against the given dquots. Callers are not << 1178 */ << 1179 int << 1180 xfs_trans_alloc_icreate( << 1181 struct xfs_mount *mp, << 1182 struct xfs_trans_res *resv, << 1183 struct xfs_dquot *udqp, << 1184 struct xfs_dquot *gdqp, << 1185 struct xfs_dquot *pdqp, << 1186 unsigned int dblocks, << 1187 struct xfs_trans **tpp) << 1188 { << 1189 struct xfs_trans *tp; << 1190 bool retried = fal << 1191 int error; << 1192 << 1193 retry: << 1194 error = xfs_trans_alloc(mp, resv, dbl << 1195 if (error) << 1196 return error; << 1197 << 1198 error = xfs_trans_reserve_quota_icrea << 1199 if ((error == -EDQUOT || error == -EN << 1200 xfs_trans_cancel(tp); << 1201 xfs_blockgc_free_dquots(mp, u << 1202 retried = true; << 1203 goto retry; << 1204 } << 1205 if (error) { << 1206 xfs_trans_cancel(tp); << 1207 return error; << 1208 } << 1209 << 1210 *tpp = tp; << 1211 return 0; << 1212 } << 1213 << 1214 /* << 1215 * Allocate an transaction, lock and join the << 1216 * in preparation for inode attribute changes << 1217 * changes. << 1218 * << 1219 * The caller must ensure that the on-disk dq << 1220 * already been allocated and initialized. T << 1221 * transaction is committed or cancelled. << 1222 */ << 1223 int << 1224 xfs_trans_alloc_ichange( << 1225 struct xfs_inode *ip, << 1226 struct xfs_dquot *new_udqp, << 1227 struct xfs_dquot *new_gdqp, << 1228 struct xfs_dquot *new_pdqp, << 1229 bool force, << 1230 struct xfs_trans **tpp) << 1231 { << 1232 struct xfs_trans *tp; << 1233 struct xfs_mount *mp = ip->i_m << 1234 struct xfs_dquot *udqp; << 1235 struct xfs_dquot *gdqp; << 1236 struct xfs_dquot *pdqp; << 1237 bool retried = fal << 1238 int error; << 1239 << 1240 retry: << 1241 error = xfs_trans_alloc(mp, &M_RES(mp << 1242 if (error) << 1243 return error; << 1244 << 1245 xfs_ilock(ip, XFS_ILOCK_EXCL); << 1246 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXC << 1247 << 1248 error = xfs_qm_dqattach_locked(ip, fa << 1249 if (error) { << 1250 /* Caller should have allocat << 1251 ASSERT(error != -ENOENT); << 1252 goto out_cancel; << 1253 } << 1254 << 1255 /* << 1256 * For each quota type, skip quota re << 1257 * now match the ones that came from << 1258 * pass one in. The inode's dquots c << 1259 * perform a blockgc scan, so we must << 1260 */ << 1261 udqp = (new_udqp != ip->i_udquot) ? n << 1262 gdqp = (new_gdqp != ip->i_gdquot) ? n << 1263 pdqp = (new_pdqp != ip->i_pdquot) ? n << 1264 if (udqp || gdqp || pdqp) { << 1265 unsigned int qflags = XFS_ << 1266 << 1267 if (force) << 1268 qflags |= XFS_QMOPT_F << 1269 << 1270 /* << 1271 * Reserve enough quota to ha << 1272 * for a delayed allocation. << 1273 * delalloc reservation betwe << 1274 * though that part is only s << 1275 */ << 1276 error = xfs_trans_reserve_quo << 1277 pdqp, ip->i_n << 1278 1, qflags); << 1279 if ((error == -EDQUOT || erro << 1280 xfs_trans_cancel(tp); << 1281 xfs_blockgc_free_dquo << 1282 retried = true; << 1283 goto retry; << 1284 } << 1285 if (error) << 1286 goto out_cancel; << 1287 } << 1288 << 1289 *tpp = tp; << 1290 return 0; << 1291 << 1292 out_cancel: << 1293 xfs_trans_cancel(tp); << 1294 return error; << 1295 } << 1296 << 1297 /* << 1298 * Allocate an transaction, lock and join the << 1299 * and reserve quota for a directory update. << 1300 * @dblocks will be set to zero for a reserva << 1301 * @nospace_error will be set to a negative e << 1302 * constraint we hit. << 1303 * << 1304 * The caller must ensure that the on-disk dq << 1305 * already been allocated and initialized. T << 1306 * transaction is committed or cancelled. << 1307 * << 1308 * Caller is responsible for unlocking the in << 1309 */ << 1310 int << 1311 xfs_trans_alloc_dir( << 1312 struct xfs_inode *dp, << 1313 struct xfs_trans_res *resv, << 1314 struct xfs_inode *ip, << 1315 unsigned int *dblocks, << 1316 struct xfs_trans **tpp, << 1317 int *nospace_erro << 1318 { << 1319 struct xfs_trans *tp; << 1320 struct xfs_mount *mp = ip->i_m << 1321 unsigned int resblks; << 1322 bool retried = fal << 1323 int error; << 1324 << 1325 retry: << 1326 *nospace_error = 0; << 1327 resblks = *dblocks; << 1328 error = xfs_trans_alloc(mp, resv, res << 1329 if (error == -ENOSPC) { << 1330 *nospace_error = error; << 1331 resblks = 0; << 1332 error = xfs_trans_alloc(mp, r << 1333 } << 1334 if (error) << 1335 return error; << 1336 << 1337 xfs_lock_two_inodes(dp, XFS_ILOCK_EXC << 1338 << 1339 xfs_trans_ijoin(tp, dp, 0); << 1340 xfs_trans_ijoin(tp, ip, 0); << 1341 << 1342 error = xfs_qm_dqattach_locked(dp, fa << 1343 if (error) { << 1344 /* Caller should have allocat << 1345 ASSERT(error != -ENOENT); << 1346 goto out_cancel; << 1347 } << 1348 << 1349 error = xfs_qm_dqattach_locked(ip, fa << 1350 if (error) { << 1351 /* Caller should have allocat << 1352 ASSERT(error != -ENOENT); << 1353 goto out_cancel; << 1354 } << 1355 << 1356 if (resblks == 0) << 1357 goto done; << 1358 << 1359 error = xfs_trans_reserve_quota_nblks << 1360 if (error == -EDQUOT || error == -ENO << 1361 if (!retried) { << 1362 xfs_trans_cancel(tp); << 1363 xfs_iunlock(dp, XFS_I << 1364 if (dp != ip) << 1365 xfs_iunlock(i << 1366 xfs_blockgc_free_quot << 1367 retried = true; << 1368 goto retry; << 1369 } << 1370 << 1371 *nospace_error = error; << 1372 resblks = 0; << 1373 error = 0; << 1374 } << 1375 if (error) << 1376 goto out_cancel; << 1377 << 1378 done: << 1379 *tpp = tp; << 1380 *dblocks = resblks; << 1381 return 0; << 1382 << 1383 out_cancel: << 1384 xfs_trans_cancel(tp); << 1385 return error; << 1386 } 1073 } 1387 1074
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.