1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * Copyright (c) 2000-2003,2005 Silicon Graphi 3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4 * Copyright (C) 2010 Red Hat, Inc. 4 * Copyright (C) 2010 Red Hat, Inc. 5 * All Rights Reserved. 5 * All Rights Reserved. 6 */ 6 */ 7 #include "xfs.h" 7 #include "xfs.h" 8 #include "xfs_fs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_mount.h" 13 #include "xfs_mount.h" >> 14 #include "xfs_inode.h" 14 #include "xfs_extent_busy.h" 15 #include "xfs_extent_busy.h" 15 #include "xfs_quota.h" 16 #include "xfs_quota.h" 16 #include "xfs_trans.h" 17 #include "xfs_trans.h" 17 #include "xfs_trans_priv.h" 18 #include "xfs_trans_priv.h" 18 #include "xfs_log.h" 19 #include "xfs_log.h" 19 #include "xfs_log_priv.h" << 20 #include "xfs_trace.h" 20 #include "xfs_trace.h" 21 #include "xfs_error.h" 21 #include "xfs_error.h" 22 #include "xfs_defer.h" 22 #include "xfs_defer.h" 23 #include "xfs_inode.h" << 24 #include "xfs_dquot_item.h" << 25 #include "xfs_dquot.h" << 26 #include "xfs_icache.h" << 27 #include "xfs_rtbitmap.h" << 28 23 29 struct kmem_cache *xfs_trans_cache; !! 24 kmem_zone_t *xfs_trans_zone; 30 25 31 #if defined(CONFIG_TRACEPOINTS) 26 #if defined(CONFIG_TRACEPOINTS) 32 static void 27 static void 33 xfs_trans_trace_reservations( 28 xfs_trans_trace_reservations( 34 struct xfs_mount *mp) 29 struct xfs_mount *mp) 35 { 30 { >> 31 struct xfs_trans_res resv; 36 struct xfs_trans_res *res; 32 struct xfs_trans_res *res; 37 struct xfs_trans_res *end_res; 33 struct xfs_trans_res *end_res; 38 int i; 34 int i; 39 35 40 res = (struct xfs_trans_res *)M_RES(mp 36 res = (struct xfs_trans_res *)M_RES(mp); 41 end_res = (struct xfs_trans_res *)(M_R 37 end_res = (struct xfs_trans_res *)(M_RES(mp) + 1); 42 for (i = 0; res < end_res; i++, res++) 38 for (i = 0; res < end_res; i++, res++) 43 trace_xfs_trans_resv_calc(mp, 39 trace_xfs_trans_resv_calc(mp, i, res); >> 40 xfs_log_get_max_trans_res(mp, &resv); >> 41 trace_xfs_trans_resv_calc(mp, -1, &resv); 44 } 42 } 45 #else 43 #else 46 # define xfs_trans_trace_reservations(mp) 44 # define xfs_trans_trace_reservations(mp) 47 #endif 45 #endif 48 46 49 /* 47 /* 50 * Initialize the precomputed transaction rese 48 * Initialize the precomputed transaction reservation values 51 * in the mount structure. 49 * in the mount structure. 52 */ 50 */ 53 void 51 void 54 xfs_trans_init( 52 xfs_trans_init( 55 struct xfs_mount *mp) 53 struct xfs_mount *mp) 56 { 54 { 57 xfs_trans_resv_calc(mp, M_RES(mp)); 55 xfs_trans_resv_calc(mp, M_RES(mp)); 58 xfs_trans_trace_reservations(mp); 56 xfs_trans_trace_reservations(mp); 59 } 57 } 60 58 61 /* 59 /* 62 * Free the transaction structure. If there i 60 * Free the transaction structure. If there is more clean up 63 * to do when the structure is freed, add it h 61 * to do when the structure is freed, add it here. 64 */ 62 */ 65 STATIC void 63 STATIC void 66 xfs_trans_free( 64 xfs_trans_free( 67 struct xfs_trans *tp) 65 struct xfs_trans *tp) 68 { 66 { 69 xfs_extent_busy_sort(&tp->t_busy); 67 xfs_extent_busy_sort(&tp->t_busy); 70 xfs_extent_busy_clear(tp->t_mountp, &t 68 xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false); 71 69 72 trace_xfs_trans_free(tp, _RET_IP_); 70 trace_xfs_trans_free(tp, _RET_IP_); 73 xfs_trans_clear_context(tp); !! 71 atomic_dec(&tp->t_mountp->m_active_trans); 74 if (!(tp->t_flags & XFS_TRANS_NO_WRITE 72 if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT)) 75 sb_end_intwrite(tp->t_mountp-> 73 sb_end_intwrite(tp->t_mountp->m_super); 76 xfs_trans_free_dqinfo(tp); 74 xfs_trans_free_dqinfo(tp); 77 kmem_cache_free(xfs_trans_cache, tp); !! 75 kmem_zone_free(xfs_trans_zone, tp); 78 } 76 } 79 77 80 /* 78 /* 81 * This is called to create a new transaction 79 * This is called to create a new transaction which will share the 82 * permanent log reservation of the given tran 80 * permanent log reservation of the given transaction. The remaining 83 * unused block and rt extent reservations are 81 * unused block and rt extent reservations are also inherited. This 84 * implies that the original transaction is no 82 * implies that the original transaction is no longer allowed to allocate 85 * blocks. Locks and log items, however, are 83 * blocks. Locks and log items, however, are no inherited. They must 86 * be added to the new transaction explicitly. 84 * be added to the new transaction explicitly. 87 */ 85 */ 88 STATIC struct xfs_trans * 86 STATIC struct xfs_trans * 89 xfs_trans_dup( 87 xfs_trans_dup( 90 struct xfs_trans *tp) 88 struct xfs_trans *tp) 91 { 89 { 92 struct xfs_trans *ntp; 90 struct xfs_trans *ntp; 93 91 94 trace_xfs_trans_dup(tp, _RET_IP_); 92 trace_xfs_trans_dup(tp, _RET_IP_); 95 93 96 ntp = kmem_cache_zalloc(xfs_trans_cach !! 94 ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 97 95 98 /* 96 /* 99 * Initialize the new transaction stru 97 * Initialize the new transaction structure. 100 */ 98 */ 101 ntp->t_magic = XFS_TRANS_HEADER_MAGIC; 99 ntp->t_magic = XFS_TRANS_HEADER_MAGIC; 102 ntp->t_mountp = tp->t_mountp; 100 ntp->t_mountp = tp->t_mountp; 103 INIT_LIST_HEAD(&ntp->t_items); 101 INIT_LIST_HEAD(&ntp->t_items); 104 INIT_LIST_HEAD(&ntp->t_busy); 102 INIT_LIST_HEAD(&ntp->t_busy); 105 INIT_LIST_HEAD(&ntp->t_dfops); 103 INIT_LIST_HEAD(&ntp->t_dfops); 106 ntp->t_highest_agno = NULLAGNUMBER; !! 104 ntp->t_firstblock = NULLFSBLOCK; 107 105 108 ASSERT(tp->t_flags & XFS_TRANS_PERM_LO 106 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 109 ASSERT(tp->t_ticket != NULL); 107 ASSERT(tp->t_ticket != NULL); 110 108 111 ntp->t_flags = XFS_TRANS_PERM_LOG_RES 109 ntp->t_flags = XFS_TRANS_PERM_LOG_RES | 112 (tp->t_flags & XFS_TRAN 110 (tp->t_flags & XFS_TRANS_RESERVE) | 113 (tp->t_flags & XFS_TRAN !! 111 (tp->t_flags & XFS_TRANS_NO_WRITECOUNT); 114 (tp->t_flags & XFS_TRAN << 115 /* We gave our writer reference to the 112 /* We gave our writer reference to the new transaction */ 116 tp->t_flags |= XFS_TRANS_NO_WRITECOUNT 113 tp->t_flags |= XFS_TRANS_NO_WRITECOUNT; 117 ntp->t_ticket = xfs_log_ticket_get(tp- 114 ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket); 118 115 119 ASSERT(tp->t_blk_res >= tp->t_blk_res_ 116 ASSERT(tp->t_blk_res >= tp->t_blk_res_used); 120 ntp->t_blk_res = tp->t_blk_res - tp->t 117 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; 121 tp->t_blk_res = tp->t_blk_res_used; 118 tp->t_blk_res = tp->t_blk_res_used; 122 119 123 ntp->t_rtx_res = tp->t_rtx_res - tp->t 120 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; 124 tp->t_rtx_res = tp->t_rtx_res_used; 121 tp->t_rtx_res = tp->t_rtx_res_used; 125 !! 122 ntp->t_pflags = tp->t_pflags; 126 xfs_trans_switch_context(tp, ntp); << 127 123 128 /* move deferred ops over to the new t 124 /* move deferred ops over to the new tp */ 129 xfs_defer_move(ntp, tp); 125 xfs_defer_move(ntp, tp); 130 126 131 xfs_trans_dup_dqinfo(tp, ntp); 127 xfs_trans_dup_dqinfo(tp, ntp); >> 128 >> 129 atomic_inc(&tp->t_mountp->m_active_trans); 132 return ntp; 130 return ntp; 133 } 131 } 134 132 135 /* 133 /* 136 * This is called to reserve free disk blocks 134 * This is called to reserve free disk blocks and log space for the 137 * given transaction. This must be done befor 135 * given transaction. This must be done before allocating any resources 138 * within the transaction. 136 * within the transaction. 139 * 137 * 140 * This will return ENOSPC if there are not en 138 * This will return ENOSPC if there are not enough blocks available. 141 * It will sleep waiting for available log spa 139 * It will sleep waiting for available log space. 142 * The only valid value for the flags paramete 140 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which 143 * is used by long running transactions. If a 141 * is used by long running transactions. If any one of the reservations 144 * fails then they will all be backed out. 142 * fails then they will all be backed out. 145 * 143 * 146 * This does not do quota reservations. That t 144 * This does not do quota reservations. That typically is done by the 147 * caller afterwards. 145 * caller afterwards. 148 */ 146 */ 149 static int 147 static int 150 xfs_trans_reserve( 148 xfs_trans_reserve( 151 struct xfs_trans *tp, 149 struct xfs_trans *tp, 152 struct xfs_trans_res *resp, 150 struct xfs_trans_res *resp, 153 uint blocks, 151 uint blocks, 154 uint rtextents) 152 uint rtextents) 155 { 153 { 156 struct xfs_mount *mp = tp->t_mo !! 154 int error = 0; 157 int error = 0; !! 155 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 158 bool rsvd = (tp->t_ !! 156 >> 157 /* Mark this thread as being in a transaction */ >> 158 current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); 159 159 160 /* 160 /* 161 * Attempt to reserve the needed disk 161 * Attempt to reserve the needed disk blocks by decrementing 162 * the number needed from the number a 162 * the number needed from the number available. This will 163 * fail if the count would go below ze 163 * fail if the count would go below zero. 164 */ 164 */ 165 if (blocks > 0) { 165 if (blocks > 0) { 166 error = xfs_dec_fdblocks(mp, b !! 166 error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd); 167 if (error != 0) !! 167 if (error != 0) { >> 168 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); 168 return -ENOSPC; 169 return -ENOSPC; >> 170 } 169 tp->t_blk_res += blocks; 171 tp->t_blk_res += blocks; 170 } 172 } 171 173 172 /* 174 /* 173 * Reserve the log space needed for th 175 * Reserve the log space needed for this transaction. 174 */ 176 */ 175 if (resp->tr_logres > 0) { 177 if (resp->tr_logres > 0) { 176 bool permanent = false; 178 bool permanent = false; 177 179 178 ASSERT(tp->t_log_res == 0 || 180 ASSERT(tp->t_log_res == 0 || 179 tp->t_log_res == resp-> 181 tp->t_log_res == resp->tr_logres); 180 ASSERT(tp->t_log_count == 0 || 182 ASSERT(tp->t_log_count == 0 || 181 tp->t_log_count == resp 183 tp->t_log_count == resp->tr_logcount); 182 184 183 if (resp->tr_logflags & XFS_TR 185 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) { 184 tp->t_flags |= XFS_TRA 186 tp->t_flags |= XFS_TRANS_PERM_LOG_RES; 185 permanent = true; 187 permanent = true; 186 } else { 188 } else { 187 ASSERT(tp->t_ticket == 189 ASSERT(tp->t_ticket == NULL); 188 ASSERT(!(tp->t_flags & 190 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 189 } 191 } 190 192 191 if (tp->t_ticket != NULL) { 193 if (tp->t_ticket != NULL) { 192 ASSERT(resp->tr_logfla 194 ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES); 193 error = xfs_log_regran !! 195 error = xfs_log_regrant(tp->t_mountp, tp->t_ticket); 194 } else { 196 } else { 195 error = xfs_log_reserv !! 197 error = xfs_log_reserve(tp->t_mountp, >> 198 resp->tr_logres, 196 199 resp->tr_logcount, 197 !! 200 &tp->t_ticket, XFS_TRANSACTION, >> 201 permanent); 198 } 202 } 199 203 200 if (error) 204 if (error) 201 goto undo_blocks; 205 goto undo_blocks; 202 206 203 tp->t_log_res = resp->tr_logre 207 tp->t_log_res = resp->tr_logres; 204 tp->t_log_count = resp->tr_log 208 tp->t_log_count = resp->tr_logcount; 205 } 209 } 206 210 207 /* 211 /* 208 * Attempt to reserve the needed realt 212 * Attempt to reserve the needed realtime extents by decrementing 209 * the number needed from the number a 213 * the number needed from the number available. This will 210 * fail if the count would go below ze 214 * fail if the count would go below zero. 211 */ 215 */ 212 if (rtextents > 0) { 216 if (rtextents > 0) { 213 error = xfs_dec_frextents(mp, !! 217 error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents)); 214 if (error) { 218 if (error) { 215 error = -ENOSPC; 219 error = -ENOSPC; 216 goto undo_log; 220 goto undo_log; 217 } 221 } 218 tp->t_rtx_res += rtextents; 222 tp->t_rtx_res += rtextents; 219 } 223 } 220 224 221 return 0; 225 return 0; 222 226 223 /* 227 /* 224 * Error cases jump to one of these la 228 * Error cases jump to one of these labels to undo any 225 * reservations which have already bee 229 * reservations which have already been performed. 226 */ 230 */ 227 undo_log: 231 undo_log: 228 if (resp->tr_logres > 0) { 232 if (resp->tr_logres > 0) { 229 xfs_log_ticket_ungrant(mp->m_l !! 233 xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false); 230 tp->t_ticket = NULL; 234 tp->t_ticket = NULL; 231 tp->t_log_res = 0; 235 tp->t_log_res = 0; 232 tp->t_flags &= ~XFS_TRANS_PERM 236 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; 233 } 237 } 234 238 235 undo_blocks: 239 undo_blocks: 236 if (blocks > 0) { 240 if (blocks > 0) { 237 xfs_add_fdblocks(mp, blocks); !! 241 xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd); 238 tp->t_blk_res = 0; 242 tp->t_blk_res = 0; 239 } 243 } >> 244 >> 245 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); >> 246 240 return error; 247 return error; 241 } 248 } 242 249 243 int 250 int 244 xfs_trans_alloc( 251 xfs_trans_alloc( 245 struct xfs_mount *mp, 252 struct xfs_mount *mp, 246 struct xfs_trans_res *resp, 253 struct xfs_trans_res *resp, 247 uint blocks, 254 uint blocks, 248 uint rtextents, 255 uint rtextents, 249 uint flags, 256 uint flags, 250 struct xfs_trans **tpp) 257 struct xfs_trans **tpp) 251 { 258 { 252 struct xfs_trans *tp; 259 struct xfs_trans *tp; 253 bool want_retry = t << 254 int error; 260 int error; 255 261 256 /* 262 /* 257 * Allocate the handle before we do ou 263 * Allocate the handle before we do our freeze accounting and setting up 258 * GFP_NOFS allocation context so that 264 * GFP_NOFS allocation context so that we avoid lockdep false positives 259 * by doing GFP_KERNEL allocations ins 265 * by doing GFP_KERNEL allocations inside sb_start_intwrite(). 260 */ 266 */ 261 retry: !! 267 tp = kmem_zone_zalloc(xfs_trans_zone, 262 tp = kmem_cache_zalloc(xfs_trans_cache !! 268 (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP); >> 269 263 if (!(flags & XFS_TRANS_NO_WRITECOUNT) 270 if (!(flags & XFS_TRANS_NO_WRITECOUNT)) 264 sb_start_intwrite(mp->m_super) 271 sb_start_intwrite(mp->m_super); 265 xfs_trans_set_context(tp); << 266 272 267 /* 273 /* 268 * Zero-reservation ("empty") transact 274 * Zero-reservation ("empty") transactions can't modify anything, so 269 * they're allowed to run while we're 275 * they're allowed to run while we're frozen. 270 */ 276 */ 271 WARN_ON(resp->tr_logres > 0 && 277 WARN_ON(resp->tr_logres > 0 && 272 mp->m_super->s_writers.frozen 278 mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); 273 ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) !! 279 atomic_inc(&mp->m_active_trans); 274 xfs_has_lazysbcount(mp)); << 275 280 276 tp->t_magic = XFS_TRANS_HEADER_MAGIC; 281 tp->t_magic = XFS_TRANS_HEADER_MAGIC; 277 tp->t_flags = flags; 282 tp->t_flags = flags; 278 tp->t_mountp = mp; 283 tp->t_mountp = mp; 279 INIT_LIST_HEAD(&tp->t_items); 284 INIT_LIST_HEAD(&tp->t_items); 280 INIT_LIST_HEAD(&tp->t_busy); 285 INIT_LIST_HEAD(&tp->t_busy); 281 INIT_LIST_HEAD(&tp->t_dfops); 286 INIT_LIST_HEAD(&tp->t_dfops); 282 tp->t_highest_agno = NULLAGNUMBER; !! 287 tp->t_firstblock = NULLFSBLOCK; 283 288 284 error = xfs_trans_reserve(tp, resp, bl 289 error = xfs_trans_reserve(tp, resp, blocks, rtextents); 285 if (error == -ENOSPC && want_retry) { << 286 xfs_trans_cancel(tp); << 287 << 288 /* << 289 * We weren't able to reserve << 290 * Flush the other speculative << 291 * Do not perform a synchronou << 292 * other locks. << 293 */ << 294 error = xfs_blockgc_flush_all( << 295 if (error) << 296 return error; << 297 want_retry = false; << 298 goto retry; << 299 } << 300 if (error) { 290 if (error) { 301 xfs_trans_cancel(tp); 291 xfs_trans_cancel(tp); 302 return error; 292 return error; 303 } 293 } 304 294 305 trace_xfs_trans_alloc(tp, _RET_IP_); 295 trace_xfs_trans_alloc(tp, _RET_IP_); 306 296 307 *tpp = tp; 297 *tpp = tp; 308 return 0; 298 return 0; 309 } 299 } 310 300 311 /* 301 /* 312 * Create an empty transaction with no reserva 302 * Create an empty transaction with no reservation. This is a defensive 313 * mechanism for routines that query metadata !! 303 * mechanism for routines that query metadata without actually modifying 314 * if the metadata being queried is somehow cr !! 304 * them -- if the metadata being queried is somehow cross-linked (think a 315 * pointer that points higher in the tree), we !! 305 * btree block pointer that points higher in the tree), we risk deadlock. 316 * grabbed as part of a transaction can be re- !! 306 * However, blocks grabbed as part of a transaction can be re-grabbed. 317 * notice the corrupt block and the operation !! 307 * The verifiers will notice the corrupt block and the operation will fail 318 * without deadlocking. !! 308 * back to userspace without deadlocking. 319 * 309 * 320 * Note the zero-length reservation; this tran !! 310 * Note the zero-length reservation; this transaction MUST be cancelled 321 * any dirty data. !! 311 * without any dirty data. 322 * << 323 * Callers should obtain freeze protection to << 324 * where we can be grabbing buffers at the sam << 325 * drain the buffer LRU list. << 326 */ 312 */ 327 int 313 int 328 xfs_trans_alloc_empty( 314 xfs_trans_alloc_empty( 329 struct xfs_mount *mp, 315 struct xfs_mount *mp, 330 struct xfs_trans **tpp) 316 struct xfs_trans **tpp) 331 { 317 { 332 struct xfs_trans_res resv = 318 struct xfs_trans_res resv = {0}; 333 319 334 return xfs_trans_alloc(mp, &resv, 0, 0 320 return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp); 335 } 321 } 336 322 337 /* 323 /* 338 * Record the indicated change to the given fi 324 * Record the indicated change to the given field for application 339 * to the file system's superblock when the tr 325 * to the file system's superblock when the transaction commits. 340 * For now, just store the change in the trans 326 * For now, just store the change in the transaction structure. 341 * 327 * 342 * Mark the transaction structure to indicate 328 * Mark the transaction structure to indicate that the superblock 343 * needs to be updated before committing. 329 * needs to be updated before committing. 344 * 330 * 345 * Because we may not be keeping track of allo 331 * Because we may not be keeping track of allocated/free inodes and 346 * used filesystem blocks in the superblock, w 332 * used filesystem blocks in the superblock, we do not mark the 347 * superblock dirty in this transaction if we 333 * superblock dirty in this transaction if we modify these fields. 348 * We still need to update the transaction del 334 * We still need to update the transaction deltas so that they get 349 * applied to the incore superblock, but we do 335 * applied to the incore superblock, but we don't want them to 350 * cause the superblock to get locked and logg 336 * cause the superblock to get locked and logged if these are the 351 * only fields in the superblock that the tran 337 * only fields in the superblock that the transaction modifies. 352 */ 338 */ 353 void 339 void 354 xfs_trans_mod_sb( 340 xfs_trans_mod_sb( 355 xfs_trans_t *tp, 341 xfs_trans_t *tp, 356 uint field, 342 uint field, 357 int64_t delta) 343 int64_t delta) 358 { 344 { 359 uint32_t flags = (XFS_TRANS_DIR 345 uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY); 360 xfs_mount_t *mp = tp->t_mountp; 346 xfs_mount_t *mp = tp->t_mountp; 361 347 362 switch (field) { 348 switch (field) { 363 case XFS_TRANS_SB_ICOUNT: 349 case XFS_TRANS_SB_ICOUNT: 364 tp->t_icount_delta += delta; 350 tp->t_icount_delta += delta; 365 if (xfs_has_lazysbcount(mp)) !! 351 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 366 flags &= ~XFS_TRANS_SB 352 flags &= ~XFS_TRANS_SB_DIRTY; 367 break; 353 break; 368 case XFS_TRANS_SB_IFREE: 354 case XFS_TRANS_SB_IFREE: 369 tp->t_ifree_delta += delta; 355 tp->t_ifree_delta += delta; 370 if (xfs_has_lazysbcount(mp)) !! 356 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 371 flags &= ~XFS_TRANS_SB 357 flags &= ~XFS_TRANS_SB_DIRTY; 372 break; 358 break; 373 case XFS_TRANS_SB_FDBLOCKS: 359 case XFS_TRANS_SB_FDBLOCKS: 374 /* 360 /* 375 * Track the number of blocks 361 * Track the number of blocks allocated in the transaction. 376 * Make sure it does not excee 362 * Make sure it does not exceed the number reserved. If so, 377 * shutdown as this can lead t 363 * shutdown as this can lead to accounting inconsistency. 378 */ 364 */ 379 if (delta < 0) { 365 if (delta < 0) { 380 tp->t_blk_res_used += 366 tp->t_blk_res_used += (uint)-delta; 381 if (tp->t_blk_res_used 367 if (tp->t_blk_res_used > tp->t_blk_res) 382 xfs_force_shut 368 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 383 } else if (delta > 0 && (tp->t << 384 int64_t blkres_delta; << 385 << 386 /* << 387 * Return freed blocks << 388 * instead of the glob << 389 * overflow the trans << 390 * reservation across << 391 * repeatedly free and << 392 */ << 393 blkres_delta = min_t(i << 394 U << 395 tp->t_blk_res += blkre << 396 delta -= blkres_delta; << 397 } 369 } 398 tp->t_fdblocks_delta += delta; 370 tp->t_fdblocks_delta += delta; 399 if (xfs_has_lazysbcount(mp)) !! 371 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 400 flags &= ~XFS_TRANS_SB 372 flags &= ~XFS_TRANS_SB_DIRTY; 401 break; 373 break; 402 case XFS_TRANS_SB_RES_FDBLOCKS: 374 case XFS_TRANS_SB_RES_FDBLOCKS: 403 /* 375 /* 404 * The allocation has already 376 * The allocation has already been applied to the 405 * in-core superblock's counte 377 * in-core superblock's counter. This should only 406 * be applied to the on-disk s 378 * be applied to the on-disk superblock. 407 */ 379 */ 408 tp->t_res_fdblocks_delta += de 380 tp->t_res_fdblocks_delta += delta; 409 if (xfs_has_lazysbcount(mp)) !! 381 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 410 flags &= ~XFS_TRANS_SB 382 flags &= ~XFS_TRANS_SB_DIRTY; 411 break; 383 break; 412 case XFS_TRANS_SB_FREXTENTS: 384 case XFS_TRANS_SB_FREXTENTS: 413 /* 385 /* 414 * Track the number of blocks 386 * Track the number of blocks allocated in the 415 * transaction. Make sure it 387 * transaction. Make sure it does not exceed the 416 * number reserved. 388 * number reserved. 417 */ 389 */ 418 if (delta < 0) { 390 if (delta < 0) { 419 tp->t_rtx_res_used += 391 tp->t_rtx_res_used += (uint)-delta; 420 ASSERT(tp->t_rtx_res_u 392 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res); 421 } 393 } 422 tp->t_frextents_delta += delta 394 tp->t_frextents_delta += delta; 423 break; 395 break; 424 case XFS_TRANS_SB_RES_FREXTENTS: 396 case XFS_TRANS_SB_RES_FREXTENTS: 425 /* 397 /* 426 * The allocation has already 398 * The allocation has already been applied to the 427 * in-core superblock's counte 399 * in-core superblock's counter. This should only 428 * be applied to the on-disk s 400 * be applied to the on-disk superblock. 429 */ 401 */ 430 ASSERT(delta < 0); 402 ASSERT(delta < 0); 431 tp->t_res_frextents_delta += d 403 tp->t_res_frextents_delta += delta; 432 break; 404 break; 433 case XFS_TRANS_SB_DBLOCKS: 405 case XFS_TRANS_SB_DBLOCKS: >> 406 ASSERT(delta > 0); 434 tp->t_dblocks_delta += delta; 407 tp->t_dblocks_delta += delta; 435 break; 408 break; 436 case XFS_TRANS_SB_AGCOUNT: 409 case XFS_TRANS_SB_AGCOUNT: 437 ASSERT(delta > 0); 410 ASSERT(delta > 0); 438 tp->t_agcount_delta += delta; 411 tp->t_agcount_delta += delta; 439 break; 412 break; 440 case XFS_TRANS_SB_IMAXPCT: 413 case XFS_TRANS_SB_IMAXPCT: 441 tp->t_imaxpct_delta += delta; 414 tp->t_imaxpct_delta += delta; 442 break; 415 break; 443 case XFS_TRANS_SB_REXTSIZE: 416 case XFS_TRANS_SB_REXTSIZE: 444 tp->t_rextsize_delta += delta; 417 tp->t_rextsize_delta += delta; 445 break; 418 break; 446 case XFS_TRANS_SB_RBMBLOCKS: 419 case XFS_TRANS_SB_RBMBLOCKS: 447 tp->t_rbmblocks_delta += delta 420 tp->t_rbmblocks_delta += delta; 448 break; 421 break; 449 case XFS_TRANS_SB_RBLOCKS: 422 case XFS_TRANS_SB_RBLOCKS: 450 tp->t_rblocks_delta += delta; 423 tp->t_rblocks_delta += delta; 451 break; 424 break; 452 case XFS_TRANS_SB_REXTENTS: 425 case XFS_TRANS_SB_REXTENTS: 453 tp->t_rextents_delta += delta; 426 tp->t_rextents_delta += delta; 454 break; 427 break; 455 case XFS_TRANS_SB_REXTSLOG: 428 case XFS_TRANS_SB_REXTSLOG: 456 tp->t_rextslog_delta += delta; 429 tp->t_rextslog_delta += delta; 457 break; 430 break; 458 default: 431 default: 459 ASSERT(0); 432 ASSERT(0); 460 return; 433 return; 461 } 434 } 462 435 463 tp->t_flags |= flags; 436 tp->t_flags |= flags; 464 } 437 } 465 438 466 /* 439 /* 467 * xfs_trans_apply_sb_deltas() is called from 440 * xfs_trans_apply_sb_deltas() is called from the commit code 468 * to bring the superblock buffer into the cur 441 * to bring the superblock buffer into the current transaction 469 * and modify it as requested by earlier calls 442 * and modify it as requested by earlier calls to xfs_trans_mod_sb(). 470 * 443 * 471 * For now we just look at each field allowed 444 * For now we just look at each field allowed to change and change 472 * it if necessary. 445 * it if necessary. 473 */ 446 */ 474 STATIC void 447 STATIC void 475 xfs_trans_apply_sb_deltas( 448 xfs_trans_apply_sb_deltas( 476 xfs_trans_t *tp) 449 xfs_trans_t *tp) 477 { 450 { 478 struct xfs_dsb *sbp; !! 451 xfs_dsb_t *sbp; 479 struct xfs_buf *bp; !! 452 xfs_buf_t *bp; 480 int whole = 0; 453 int whole = 0; 481 454 482 bp = xfs_trans_getsb(tp); !! 455 bp = xfs_trans_getsb(tp, tp->t_mountp, 0); 483 sbp = bp->b_addr; !! 456 sbp = XFS_BUF_TO_SBP(bp); >> 457 >> 458 /* >> 459 * Check that superblock mods match the mods made to AGF counters. >> 460 */ >> 461 ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) == >> 462 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta + >> 463 tp->t_ag_btree_delta)); 484 464 485 /* 465 /* 486 * Only update the superblock counters 466 * Only update the superblock counters if we are logging them 487 */ 467 */ 488 if (!xfs_has_lazysbcount((tp->t_mountp !! 468 if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) { 489 if (tp->t_icount_delta) 469 if (tp->t_icount_delta) 490 be64_add_cpu(&sbp->sb_ 470 be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta); 491 if (tp->t_ifree_delta) 471 if (tp->t_ifree_delta) 492 be64_add_cpu(&sbp->sb_ 472 be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta); 493 if (tp->t_fdblocks_delta) 473 if (tp->t_fdblocks_delta) 494 be64_add_cpu(&sbp->sb_ 474 be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta); 495 if (tp->t_res_fdblocks_delta) 475 if (tp->t_res_fdblocks_delta) 496 be64_add_cpu(&sbp->sb_ 476 be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta); 497 } 477 } 498 478 499 /* !! 479 if (tp->t_frextents_delta) 500 * Updating frextents requires careful !! 480 be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta); 501 * behave like the lazysb counters bec !! 481 if (tp->t_res_frextents_delta) 502 * recovery in older kenels to recompu !! 482 be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta); 503 * This means that the ondisk frextent << 504 * rtbitmap. << 505 * << 506 * Therefore, log the frextents change << 507 * update the incore superblock so tha << 508 * write the correct value ondisk. << 509 * << 510 * Don't touch m_frextents because it << 511 * and those are handled by the unrese << 512 */ << 513 if (tp->t_frextents_delta || tp->t_res << 514 struct xfs_mount *mp = << 515 int64_t rtxdel << 516 << 517 rtxdelta = tp->t_frextents_del << 518 << 519 spin_lock(&mp->m_sb_lock); << 520 be64_add_cpu(&sbp->sb_frextent << 521 mp->m_sb.sb_frextents += rtxde << 522 spin_unlock(&mp->m_sb_lock); << 523 } << 524 483 525 if (tp->t_dblocks_delta) { 484 if (tp->t_dblocks_delta) { 526 be64_add_cpu(&sbp->sb_dblocks, 485 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta); 527 whole = 1; 486 whole = 1; 528 } 487 } 529 if (tp->t_agcount_delta) { 488 if (tp->t_agcount_delta) { 530 be32_add_cpu(&sbp->sb_agcount, 489 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta); 531 whole = 1; 490 whole = 1; 532 } 491 } 533 if (tp->t_imaxpct_delta) { 492 if (tp->t_imaxpct_delta) { 534 sbp->sb_imax_pct += tp->t_imax 493 sbp->sb_imax_pct += tp->t_imaxpct_delta; 535 whole = 1; 494 whole = 1; 536 } 495 } 537 if (tp->t_rextsize_delta) { 496 if (tp->t_rextsize_delta) { 538 be32_add_cpu(&sbp->sb_rextsize 497 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta); 539 whole = 1; 498 whole = 1; 540 } 499 } 541 if (tp->t_rbmblocks_delta) { 500 if (tp->t_rbmblocks_delta) { 542 be32_add_cpu(&sbp->sb_rbmblock 501 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta); 543 whole = 1; 502 whole = 1; 544 } 503 } 545 if (tp->t_rblocks_delta) { 504 if (tp->t_rblocks_delta) { 546 be64_add_cpu(&sbp->sb_rblocks, 505 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta); 547 whole = 1; 506 whole = 1; 548 } 507 } 549 if (tp->t_rextents_delta) { 508 if (tp->t_rextents_delta) { 550 be64_add_cpu(&sbp->sb_rextents 509 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta); 551 whole = 1; 510 whole = 1; 552 } 511 } 553 if (tp->t_rextslog_delta) { 512 if (tp->t_rextslog_delta) { 554 sbp->sb_rextslog += tp->t_rext 513 sbp->sb_rextslog += tp->t_rextslog_delta; 555 whole = 1; 514 whole = 1; 556 } 515 } 557 516 558 xfs_trans_buf_set_type(tp, bp, XFS_BLF 517 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); 559 if (whole) 518 if (whole) 560 /* 519 /* 561 * Log the whole thing, the fi 520 * Log the whole thing, the fields are noncontiguous. 562 */ 521 */ 563 xfs_trans_log_buf(tp, bp, 0, s !! 522 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1); 564 else 523 else 565 /* 524 /* 566 * Since all the modifiable fi 525 * Since all the modifiable fields are contiguous, we 567 * can get away with this. 526 * can get away with this. 568 */ 527 */ 569 xfs_trans_log_buf(tp, bp, offs !! 528 xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount), 570 offsetof(str !! 529 offsetof(xfs_dsb_t, sb_frextents) + 571 sizeof(sbp-> 530 sizeof(sbp->sb_frextents) - 1); 572 } 531 } 573 532 >> 533 STATIC int >> 534 xfs_sb_mod8( >> 535 uint8_t *field, >> 536 int8_t delta) >> 537 { >> 538 int8_t counter = *field; >> 539 >> 540 counter += delta; >> 541 if (counter < 0) { >> 542 ASSERT(0); >> 543 return -EINVAL; >> 544 } >> 545 *field = counter; >> 546 return 0; >> 547 } >> 548 >> 549 STATIC int >> 550 xfs_sb_mod32( >> 551 uint32_t *field, >> 552 int32_t delta) >> 553 { >> 554 int32_t counter = *field; >> 555 >> 556 counter += delta; >> 557 if (counter < 0) { >> 558 ASSERT(0); >> 559 return -EINVAL; >> 560 } >> 561 *field = counter; >> 562 return 0; >> 563 } >> 564 >> 565 STATIC int >> 566 xfs_sb_mod64( >> 567 uint64_t *field, >> 568 int64_t delta) >> 569 { >> 570 int64_t counter = *field; >> 571 >> 572 counter += delta; >> 573 if (counter < 0) { >> 574 ASSERT(0); >> 575 return -EINVAL; >> 576 } >> 577 *field = counter; >> 578 return 0; >> 579 } >> 580 574 /* 581 /* 575 * xfs_trans_unreserve_and_mod_sb() is called !! 582 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations 576 * apply superblock counter changes to the in- !! 583 * and apply superblock counter changes to the in-core superblock. The 577 * t_res_fdblocks_delta and t_res_frextents_de 584 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT 578 * applied to the in-core superblock. The ide 585 * applied to the in-core superblock. The idea is that that has already been 579 * done. 586 * done. 580 * 587 * 581 * If we are not logging superblock counters, 588 * If we are not logging superblock counters, then the inode allocated/free and 582 * used block counts are not updated in the on 589 * used block counts are not updated in the on disk superblock. In this case, 583 * XFS_TRANS_SB_DIRTY will not be set when the 590 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we 584 * still need to update the incore superblock 591 * still need to update the incore superblock with the changes. 585 * << 586 * Deltas for the inode count are +/-64, hence << 587 * so we don't need to take the counter lock o << 588 */ 592 */ 589 #define XFS_ICOUNT_BATCH 128 << 590 << 591 void 593 void 592 xfs_trans_unreserve_and_mod_sb( 594 xfs_trans_unreserve_and_mod_sb( 593 struct xfs_trans *tp) 595 struct xfs_trans *tp) 594 { 596 { 595 struct xfs_mount *mp = tp->t_mo 597 struct xfs_mount *mp = tp->t_mountp; 596 int64_t blkdelta = tp- !! 598 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 597 int64_t rtxdelta = tp- !! 599 int64_t blkdelta = 0; >> 600 int64_t rtxdelta = 0; 598 int64_t idelta = 0; 601 int64_t idelta = 0; 599 int64_t ifreedelta = 0 602 int64_t ifreedelta = 0; >> 603 int error; 600 604 601 /* !! 605 /* calculate deltas */ 602 * Calculate the deltas. !! 606 if (tp->t_blk_res > 0) 603 * !! 607 blkdelta = tp->t_blk_res; 604 * t_fdblocks_delta and t_frextents_de !! 608 if ((tp->t_fdblocks_delta != 0) && 605 * !! 609 (xfs_sb_version_haslazysbcount(&mp->m_sb) || 606 * - positive values indicate blocks !! 610 (tp->t_flags & XFS_TRANS_SB_DIRTY))) 607 * - negative values indicate blocks << 608 * << 609 * Negative values can only happen if << 610 * reservation that covers the allocat << 611 * that the calculated delta values mu << 612 * can only put back previous allocate << 613 */ << 614 ASSERT(tp->t_blk_res || tp->t_fdblocks << 615 if (xfs_has_lazysbcount(mp) || (tp->t_ << 616 blkdelta += tp->t_fdblocks_del 611 blkdelta += tp->t_fdblocks_delta; 617 ASSERT(blkdelta >= 0); << 618 } << 619 612 620 ASSERT(tp->t_rtx_res || tp->t_frextent !! 613 if (tp->t_rtx_res > 0) 621 if (tp->t_flags & XFS_TRANS_SB_DIRTY) !! 614 rtxdelta = tp->t_rtx_res; >> 615 if ((tp->t_frextents_delta != 0) && >> 616 (tp->t_flags & XFS_TRANS_SB_DIRTY)) 622 rtxdelta += tp->t_frextents_de 617 rtxdelta += tp->t_frextents_delta; 623 ASSERT(rtxdelta >= 0); << 624 } << 625 618 626 if (xfs_has_lazysbcount(mp) || (tp->t_ !! 619 if (xfs_sb_version_haslazysbcount(&mp->m_sb) || >> 620 (tp->t_flags & XFS_TRANS_SB_DIRTY)) { 627 idelta = tp->t_icount_delta; 621 idelta = tp->t_icount_delta; 628 ifreedelta = tp->t_ifree_delta 622 ifreedelta = tp->t_ifree_delta; 629 } 623 } 630 624 631 /* apply the per-cpu counters */ 625 /* apply the per-cpu counters */ 632 if (blkdelta) !! 626 if (blkdelta) { 633 xfs_add_fdblocks(mp, blkdelta) !! 627 error = xfs_mod_fdblocks(mp, blkdelta, rsvd); 634 !! 628 if (error) 635 if (idelta) !! 629 goto out; 636 percpu_counter_add_batch(&mp-> !! 630 } 637 XFS_I << 638 631 639 if (ifreedelta) !! 632 if (idelta) { 640 percpu_counter_add(&mp->m_ifre !! 633 error = xfs_mod_icount(mp, idelta); >> 634 if (error) >> 635 goto out_undo_fdblocks; >> 636 } 641 637 642 if (rtxdelta) !! 638 if (ifreedelta) { 643 xfs_add_frextents(mp, rtxdelta !! 639 error = xfs_mod_ifree(mp, ifreedelta); >> 640 if (error) >> 641 goto out_undo_icount; >> 642 } 644 643 645 if (!(tp->t_flags & XFS_TRANS_SB_DIRTY !! 644 if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY)) 646 return; 645 return; 647 646 648 /* apply remaining deltas */ 647 /* apply remaining deltas */ 649 spin_lock(&mp->m_sb_lock); 648 spin_lock(&mp->m_sb_lock); 650 mp->m_sb.sb_fdblocks += tp->t_fdblocks !! 649 if (rtxdelta) { 651 mp->m_sb.sb_icount += idelta; !! 650 error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta); 652 mp->m_sb.sb_ifree += ifreedelta; !! 651 if (error) 653 /* !! 652 goto out_undo_ifree; 654 * Do not touch sb_frextents here beca !! 653 } 655 * reservation. sb_frextents is not p !! 654 656 * must be consistent with the ondisk !! 655 if (tp->t_dblocks_delta != 0) { 657 * incore reservations. !! 656 error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta); 658 */ !! 657 if (error) 659 mp->m_sb.sb_dblocks += tp->t_dblocks_d !! 658 goto out_undo_frextents; 660 mp->m_sb.sb_agcount += tp->t_agcount_d !! 659 } 661 mp->m_sb.sb_imax_pct += tp->t_imaxpct_ !! 660 if (tp->t_agcount_delta != 0) { 662 mp->m_sb.sb_rextsize += tp->t_rextsize !! 661 error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta); 663 if (tp->t_rextsize_delta) { !! 662 if (error) 664 mp->m_rtxblklog = log2_if_powe !! 663 goto out_undo_dblocks; 665 mp->m_rtxblkmask = mask64_if_p !! 664 } >> 665 if (tp->t_imaxpct_delta != 0) { >> 666 error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta); >> 667 if (error) >> 668 goto out_undo_agcount; >> 669 } >> 670 if (tp->t_rextsize_delta != 0) { >> 671 error = xfs_sb_mod32(&mp->m_sb.sb_rextsize, >> 672 tp->t_rextsize_delta); >> 673 if (error) >> 674 goto out_undo_imaxpct; >> 675 } >> 676 if (tp->t_rbmblocks_delta != 0) { >> 677 error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, >> 678 tp->t_rbmblocks_delta); >> 679 if (error) >> 680 goto out_undo_rextsize; >> 681 } >> 682 if (tp->t_rblocks_delta != 0) { >> 683 error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta); >> 684 if (error) >> 685 goto out_undo_rbmblocks; >> 686 } >> 687 if (tp->t_rextents_delta != 0) { >> 688 error = xfs_sb_mod64(&mp->m_sb.sb_rextents, >> 689 tp->t_rextents_delta); >> 690 if (error) >> 691 goto out_undo_rblocks; >> 692 } >> 693 if (tp->t_rextslog_delta != 0) { >> 694 error = xfs_sb_mod8(&mp->m_sb.sb_rextslog, >> 695 tp->t_rextslog_delta); >> 696 if (error) >> 697 goto out_undo_rextents; 666 } 698 } 667 mp->m_sb.sb_rbmblocks += tp->t_rbmbloc << 668 mp->m_sb.sb_rblocks += tp->t_rblocks_d << 669 mp->m_sb.sb_rextents += tp->t_rextents << 670 mp->m_sb.sb_rextslog += tp->t_rextslog << 671 spin_unlock(&mp->m_sb_lock); 699 spin_unlock(&mp->m_sb_lock); >> 700 return; 672 701 673 /* !! 702 out_undo_rextents: 674 * Debug checks outside of the spinloc !! 703 if (tp->t_rextents_delta) 675 * machine if they fail. !! 704 xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta); 676 */ !! 705 out_undo_rblocks: 677 ASSERT(mp->m_sb.sb_imax_pct >= 0); !! 706 if (tp->t_rblocks_delta) 678 ASSERT(mp->m_sb.sb_rextslog >= 0); !! 707 xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta); >> 708 out_undo_rbmblocks: >> 709 if (tp->t_rbmblocks_delta) >> 710 xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta); >> 711 out_undo_rextsize: >> 712 if (tp->t_rextsize_delta) >> 713 xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta); >> 714 out_undo_imaxpct: >> 715 if (tp->t_rextsize_delta) >> 716 xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta); >> 717 out_undo_agcount: >> 718 if (tp->t_agcount_delta) >> 719 xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta); >> 720 out_undo_dblocks: >> 721 if (tp->t_dblocks_delta) >> 722 xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta); >> 723 out_undo_frextents: >> 724 if (rtxdelta) >> 725 xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta); >> 726 out_undo_ifree: >> 727 spin_unlock(&mp->m_sb_lock); >> 728 if (ifreedelta) >> 729 xfs_mod_ifree(mp, -ifreedelta); >> 730 out_undo_icount: >> 731 if (idelta) >> 732 xfs_mod_icount(mp, -idelta); >> 733 out_undo_fdblocks: >> 734 if (blkdelta) >> 735 xfs_mod_fdblocks(mp, -blkdelta, rsvd); >> 736 out: >> 737 ASSERT(error == 0); >> 738 return; 679 } 739 } 680 740 681 /* Add the given log item to the transaction's 741 /* Add the given log item to the transaction's list of log items. */ 682 void 742 void 683 xfs_trans_add_item( 743 xfs_trans_add_item( 684 struct xfs_trans *tp, 744 struct xfs_trans *tp, 685 struct xfs_log_item *lip) 745 struct xfs_log_item *lip) 686 { 746 { 687 ASSERT(lip->li_log == tp->t_mountp->m_ !! 747 ASSERT(lip->li_mountp == tp->t_mountp); 688 ASSERT(lip->li_ailp == tp->t_mountp->m 748 ASSERT(lip->li_ailp == tp->t_mountp->m_ail); 689 ASSERT(list_empty(&lip->li_trans)); 749 ASSERT(list_empty(&lip->li_trans)); 690 ASSERT(!test_bit(XFS_LI_DIRTY, &lip->l 750 ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags)); 691 751 692 list_add_tail(&lip->li_trans, &tp->t_i 752 list_add_tail(&lip->li_trans, &tp->t_items); 693 trace_xfs_trans_add_item(tp, _RET_IP_) 753 trace_xfs_trans_add_item(tp, _RET_IP_); 694 } 754 } 695 755 696 /* 756 /* 697 * Unlink the log item from the transaction. t 757 * Unlink the log item from the transaction. the log item is no longer 698 * considered dirty in this transaction, as th 758 * considered dirty in this transaction, as the linked transaction has 699 * finished, either by abort or commit complet 759 * finished, either by abort or commit completion. 700 */ 760 */ 701 void 761 void 702 xfs_trans_del_item( 762 xfs_trans_del_item( 703 struct xfs_log_item *lip) 763 struct xfs_log_item *lip) 704 { 764 { 705 clear_bit(XFS_LI_DIRTY, &lip->li_flags 765 clear_bit(XFS_LI_DIRTY, &lip->li_flags); 706 list_del_init(&lip->li_trans); 766 list_del_init(&lip->li_trans); 707 } 767 } 708 768 709 /* Detach and unlock all of the items in a tra 769 /* Detach and unlock all of the items in a transaction */ 710 static void !! 770 void 711 xfs_trans_free_items( 771 xfs_trans_free_items( 712 struct xfs_trans *tp, 772 struct xfs_trans *tp, >> 773 xfs_lsn_t commit_lsn, 713 bool abort) 774 bool abort) 714 { 775 { 715 struct xfs_log_item *lip, *next; 776 struct xfs_log_item *lip, *next; 716 777 717 trace_xfs_trans_free_items(tp, _RET_IP 778 trace_xfs_trans_free_items(tp, _RET_IP_); 718 779 719 list_for_each_entry_safe(lip, next, &t 780 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 720 xfs_trans_del_item(lip); 781 xfs_trans_del_item(lip); >> 782 if (commit_lsn != NULLCOMMITLSN) >> 783 lip->li_ops->iop_committing(lip, commit_lsn); 721 if (abort) 784 if (abort) 722 set_bit(XFS_LI_ABORTED 785 set_bit(XFS_LI_ABORTED, &lip->li_flags); 723 if (lip->li_ops->iop_release) !! 786 lip->li_ops->iop_unlock(lip); 724 lip->li_ops->iop_relea << 725 } 787 } 726 } 788 } 727 789 728 /* !! 790 static inline void 729 * Sort transaction items prior to running pre !! 791 xfs_log_item_batch_insert( 730 * attempt to order the items such that they w !! 792 struct xfs_ail *ailp, 731 * order. Items that have no sort function are !! 793 struct xfs_ail_cursor *cur, 732 * and so are locked last. !! 794 struct xfs_log_item **log_items, 733 * !! 795 int nr_items, 734 * This may need refinement as different types !! 796 xfs_lsn_t commit_lsn) 735 * !! 797 { 736 * Function is more complex than it needs to b !! 798 int i; 737 * values and the function only returns 32 bit << 738 */ << 739 static int << 740 xfs_trans_precommit_sort( << 741 void *unused_arg, << 742 const struct list_head *a, << 743 const struct list_head *b) << 744 { << 745 struct xfs_log_item *lia = contain << 746 struct << 747 struct xfs_log_item *lib = contain << 748 struct << 749 int64_t diff; << 750 799 751 /* !! 800 spin_lock(&ailp->ail_lock); 752 * If both items are non-sortable, lea !! 801 /* xfs_trans_ail_update_bulk drops ailp->ail_lock */ 753 * sortable, move the non-sortable ite !! 802 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn); 754 */ !! 803 755 if (!lia->li_ops->iop_sort && !lib->li !! 804 for (i = 0; i < nr_items; i++) { 756 return 0; !! 805 struct xfs_log_item *lip = log_items[i]; 757 if (!lia->li_ops->iop_sort) !! 806 758 return 1; !! 807 lip->li_ops->iop_unpin(lip, 0); 759 if (!lib->li_ops->iop_sort) !! 808 } 760 return -1; << 761 << 762 diff = lia->li_ops->iop_sort(lia) - li << 763 if (diff < 0) << 764 return -1; << 765 if (diff > 0) << 766 return 1; << 767 return 0; << 768 } 809 } 769 810 770 /* 811 /* 771 * Run transaction precommit functions. !! 812 * Bulk operation version of xfs_trans_committed that takes a log vector of >> 813 * items to insert into the AIL. This uses bulk AIL insertion techniques to >> 814 * minimise lock traffic. 772 * 815 * 773 * If there is an error in any of the callouts !! 816 * If we are called with the aborted flag set, it is because a log write during 774 * trigger a shutdown to abort the transaction !! 817 * a CIL checkpoint commit has failed. In this case, all the items in the 775 * from errors at this point as the transactio !! 818 * checkpoint have already gone through iop_commited and iop_unlock, which >> 819 * means that checkpoint commit abort handling is treated exactly the same >> 820 * as an iclog write error even though we haven't started any IO yet. Hence in >> 821 * this case all we need to do is iop_committed processing, followed by an >> 822 * iop_unpin(aborted) call. >> 823 * >> 824 * The AIL cursor is used to optimise the insert process. If commit_lsn is not >> 825 * at the end of the AIL, the insert cursor avoids the need to walk >> 826 * the AIL to find the insertion point on every xfs_log_item_batch_insert() >> 827 * call. This saves a lot of needless list walking and is a net win, even >> 828 * though it slightly increases that amount of AIL lock traffic to set it up >> 829 * and tear it down. 776 */ 830 */ 777 static int !! 831 void 778 xfs_trans_run_precommits( !! 832 xfs_trans_committed_bulk( 779 struct xfs_trans *tp) !! 833 struct xfs_ail *ailp, 780 { !! 834 struct xfs_log_vec *log_vector, 781 struct xfs_mount *mp = tp->t_mo !! 835 xfs_lsn_t commit_lsn, 782 struct xfs_log_item *lip, *n; !! 836 int aborted) 783 int error = 0; !! 837 { >> 838 #define LOG_ITEM_BATCH_SIZE 32 >> 839 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE]; >> 840 struct xfs_log_vec *lv; >> 841 struct xfs_ail_cursor cur; >> 842 int i = 0; >> 843 >> 844 spin_lock(&ailp->ail_lock); >> 845 xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn); >> 846 spin_unlock(&ailp->ail_lock); >> 847 >> 848 /* unpin all the log items */ >> 849 for (lv = log_vector; lv; lv = lv->lv_next ) { >> 850 struct xfs_log_item *lip = lv->lv_item; >> 851 xfs_lsn_t item_lsn; 784 852 785 /* !! 853 if (aborted) 786 * Sort the item list to avoid ABBA de !! 854 set_bit(XFS_LI_ABORTED, &lip->li_flags); 787 * running precommit operations that l !! 855 item_lsn = lip->li_ops->iop_committed(lip, commit_lsn); 788 * inode cluster buffers. << 789 */ << 790 list_sort(NULL, &tp->t_items, xfs_tran << 791 856 792 /* !! 857 /* item_lsn of -1 means the item needs no further processing */ 793 * Precommit operations can remove the !! 858 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) 794 * if the log item exists purely to de !! 859 continue; 795 * can be ordered against other operat !! 860 796 * list_for_each_entry_safe() here. !! 861 /* 797 */ !! 862 * if we are aborting the operation, no point in inserting the 798 list_for_each_entry_safe(lip, n, &tp-> !! 863 * object into the AIL as we are in a shutdown situation. 799 if (!test_bit(XFS_LI_DIRTY, &l !! 864 */ >> 865 if (aborted) { >> 866 ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount)); >> 867 lip->li_ops->iop_unpin(lip, 1); 800 continue; 868 continue; 801 if (lip->li_ops->iop_precommit !! 869 } 802 error = lip->li_ops->i !! 870 803 if (error) !! 871 if (item_lsn != commit_lsn) { 804 break; !! 872 >> 873 /* >> 874 * Not a bulk update option due to unusual item_lsn. >> 875 * Push into AIL immediately, rechecking the lsn once >> 876 * we have the ail lock. Then unpin the item. This does >> 877 * not affect the AIL cursor the bulk insert path is >> 878 * using. >> 879 */ >> 880 spin_lock(&ailp->ail_lock); >> 881 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) >> 882 xfs_trans_ail_update(ailp, lip, item_lsn); >> 883 else >> 884 spin_unlock(&ailp->ail_lock); >> 885 lip->li_ops->iop_unpin(lip, 0); >> 886 continue; >> 887 } >> 888 >> 889 /* Item is a candidate for bulk AIL insert. */ >> 890 log_items[i++] = lv->lv_item; >> 891 if (i >= LOG_ITEM_BATCH_SIZE) { >> 892 xfs_log_item_batch_insert(ailp, &cur, log_items, >> 893 LOG_ITEM_BATCH_SIZE, commit_lsn); >> 894 i = 0; 805 } 895 } 806 } 896 } 807 if (error) !! 897 808 xfs_force_shutdown(mp, SHUTDOW !! 898 /* make sure we insert the remainder! */ 809 return error; !! 899 if (i) >> 900 xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn); >> 901 >> 902 spin_lock(&ailp->ail_lock); >> 903 xfs_trans_ail_cursor_done(&cur); >> 904 spin_unlock(&ailp->ail_lock); 810 } 905 } 811 906 812 /* 907 /* 813 * Commit the given transaction to the log. 908 * Commit the given transaction to the log. 814 * 909 * 815 * XFS disk error handling mechanism is not ba 910 * XFS disk error handling mechanism is not based on a typical 816 * transaction abort mechanism. Logically afte 911 * transaction abort mechanism. Logically after the filesystem 817 * gets marked 'SHUTDOWN', we can't let any ne 912 * gets marked 'SHUTDOWN', we can't let any new transactions 818 * be durable - ie. committed to disk - becaus 913 * be durable - ie. committed to disk - because some metadata might 819 * be inconsistent. In such cases, this return 914 * be inconsistent. In such cases, this returns an error, and the 820 * caller may assume that all locked objects j 915 * caller may assume that all locked objects joined to the transaction 821 * have already been unlocked as if the commit 916 * have already been unlocked as if the commit had succeeded. 822 * Do not reference the transaction structure 917 * Do not reference the transaction structure after this call. 823 */ 918 */ 824 static int 919 static int 825 __xfs_trans_commit( 920 __xfs_trans_commit( 826 struct xfs_trans *tp, 921 struct xfs_trans *tp, 827 bool regrant) 922 bool regrant) 828 { 923 { 829 struct xfs_mount *mp = tp->t_mo 924 struct xfs_mount *mp = tp->t_mountp; 830 struct xlog *log = mp->m_l !! 925 xfs_lsn_t commit_lsn = -1; 831 xfs_csn_t commit_seq = 0 << 832 int error = 0; 926 int error = 0; 833 int sync = tp->t_f 927 int sync = tp->t_flags & XFS_TRANS_SYNC; 834 928 835 trace_xfs_trans_commit(tp, _RET_IP_); 929 trace_xfs_trans_commit(tp, _RET_IP_); 836 930 837 error = xfs_trans_run_precommits(tp); << 838 if (error) { << 839 if (tp->t_flags & XFS_TRANS_PE << 840 xfs_defer_cancel(tp); << 841 goto out_unreserve; << 842 } << 843 << 844 /* 931 /* 845 * Finish deferred items on final comm 932 * Finish deferred items on final commit. Only permanent transactions 846 * should ever have deferred ops. 933 * should ever have deferred ops. 847 */ 934 */ 848 WARN_ON_ONCE(!list_empty(&tp->t_dfops) 935 WARN_ON_ONCE(!list_empty(&tp->t_dfops) && 849 !(tp->t_flags & XFS_TRANS 936 !(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 850 if (!regrant && (tp->t_flags & XFS_TRA 937 if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) { 851 error = xfs_defer_finish_norol 938 error = xfs_defer_finish_noroll(&tp); 852 if (error) 939 if (error) 853 goto out_unreserve; 940 goto out_unreserve; 854 << 855 /* Run precommits from final t << 856 error = xfs_trans_run_precommi << 857 if (error) << 858 goto out_unreserve; << 859 } 941 } 860 942 861 /* 943 /* 862 * If there is nothing to be logged by 944 * If there is nothing to be logged by the transaction, 863 * then unlock all of the items associ 945 * then unlock all of the items associated with the 864 * transaction and free the transactio 946 * transaction and free the transaction structure. 865 * Also make sure to return any reserv 947 * Also make sure to return any reserved blocks to 866 * the free pool. 948 * the free pool. 867 */ 949 */ 868 if (!(tp->t_flags & XFS_TRANS_DIRTY)) 950 if (!(tp->t_flags & XFS_TRANS_DIRTY)) 869 goto out_unreserve; 951 goto out_unreserve; 870 952 871 /* !! 953 if (XFS_FORCED_SHUTDOWN(mp)) { 872 * We must check against log shutdown << 873 * items and leave them dirty, inconsi << 874 * the log is active. This leaves them << 875 * disk, and that will lead to on-disk << 876 */ << 877 if (xlog_is_shutdown(log)) { << 878 error = -EIO; 954 error = -EIO; 879 goto out_unreserve; 955 goto out_unreserve; 880 } 956 } 881 957 882 ASSERT(tp->t_ticket != NULL); 958 ASSERT(tp->t_ticket != NULL); 883 959 884 /* 960 /* 885 * If we need to update the superblock 961 * If we need to update the superblock, then do it now. 886 */ 962 */ 887 if (tp->t_flags & XFS_TRANS_SB_DIRTY) 963 if (tp->t_flags & XFS_TRANS_SB_DIRTY) 888 xfs_trans_apply_sb_deltas(tp); 964 xfs_trans_apply_sb_deltas(tp); 889 xfs_trans_apply_dquot_deltas(tp); 965 xfs_trans_apply_dquot_deltas(tp); 890 966 891 xlog_cil_commit(log, tp, &commit_seq, !! 967 xfs_log_commit_cil(mp, tp, &commit_lsn, regrant); 892 968 >> 969 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); 893 xfs_trans_free(tp); 970 xfs_trans_free(tp); 894 971 895 /* 972 /* 896 * If the transaction needs to be sync 973 * If the transaction needs to be synchronous, then force the 897 * log out now and wait for it. 974 * log out now and wait for it. 898 */ 975 */ 899 if (sync) { 976 if (sync) { 900 error = xfs_log_force_seq(mp, !! 977 error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL); 901 XFS_STATS_INC(mp, xs_trans_syn 978 XFS_STATS_INC(mp, xs_trans_sync); 902 } else { 979 } else { 903 XFS_STATS_INC(mp, xs_trans_asy 980 XFS_STATS_INC(mp, xs_trans_async); 904 } 981 } 905 982 906 return error; 983 return error; 907 984 908 out_unreserve: 985 out_unreserve: 909 xfs_trans_unreserve_and_mod_sb(tp); 986 xfs_trans_unreserve_and_mod_sb(tp); 910 987 911 /* 988 /* 912 * It is indeed possible for the trans 989 * It is indeed possible for the transaction to be not dirty but 913 * the dqinfo portion to be. All that 990 * the dqinfo portion to be. All that means is that we have some 914 * (non-persistent) quota reservations 991 * (non-persistent) quota reservations that need to be unreserved. 915 */ 992 */ 916 xfs_trans_unreserve_and_mod_dquots(tp) 993 xfs_trans_unreserve_and_mod_dquots(tp); 917 if (tp->t_ticket) { 994 if (tp->t_ticket) { 918 if (regrant && !xlog_is_shutdo !! 995 commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant); 919 xfs_log_ticket_regrant !! 996 if (commit_lsn == -1 && !error) 920 else !! 997 error = -EIO; 921 xfs_log_ticket_ungrant << 922 tp->t_ticket = NULL; 998 tp->t_ticket = NULL; 923 } 999 } 924 xfs_trans_free_items(tp, !!error); !! 1000 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); >> 1001 xfs_trans_free_items(tp, NULLCOMMITLSN, !!error); 925 xfs_trans_free(tp); 1002 xfs_trans_free(tp); 926 1003 927 XFS_STATS_INC(mp, xs_trans_empty); 1004 XFS_STATS_INC(mp, xs_trans_empty); 928 return error; 1005 return error; 929 } 1006 } 930 1007 931 int 1008 int 932 xfs_trans_commit( 1009 xfs_trans_commit( 933 struct xfs_trans *tp) 1010 struct xfs_trans *tp) 934 { 1011 { 935 return __xfs_trans_commit(tp, false); 1012 return __xfs_trans_commit(tp, false); 936 } 1013 } 937 1014 938 /* 1015 /* 939 * Unlock all of the transaction's items and f !! 1016 * Unlock all of the transaction's items and free the transaction. 940 * transaction is dirty, we must shut down the !! 1017 * The transaction must not have modified any of its items, because 941 * way to restore them to their previous state !! 1018 * there is no way to restore them to their previous state. 942 * << 943 * If the transaction has made a log reservati << 944 * well. << 945 * 1019 * 946 * This is a high level function (equivalent t !! 1020 * If the transaction has made a log reservation, make sure to release 947 * be called after the transaction has effecti !! 1021 * it as well. 948 * being shut down. However, if the mount has << 949 * transaction is dirty we will shut the mount << 950 * guarantees that the log is shut down, too. << 951 * careful with shutdown state and dirty items << 952 * xfs_trans_commit(). << 953 */ 1022 */ 954 void 1023 void 955 xfs_trans_cancel( 1024 xfs_trans_cancel( 956 struct xfs_trans *tp) 1025 struct xfs_trans *tp) 957 { 1026 { 958 struct xfs_mount *mp = tp->t_mo 1027 struct xfs_mount *mp = tp->t_mountp; 959 struct xlog *log = mp->m_l << 960 bool dirty = (tp->t 1028 bool dirty = (tp->t_flags & XFS_TRANS_DIRTY); 961 1029 962 trace_xfs_trans_cancel(tp, _RET_IP_); 1030 trace_xfs_trans_cancel(tp, _RET_IP_); 963 1031 964 /* !! 1032 if (tp->t_flags & XFS_TRANS_PERM_LOG_RES) 965 * It's never valid to cancel a transa << 966 * because the transaction is effectiv << 967 * loudly before freeing the in-memory << 968 * filesystem. << 969 */ << 970 if (!list_empty(&tp->t_dfops)) { << 971 ASSERT(tp->t_flags & XFS_TRANS << 972 dirty = true; << 973 xfs_defer_cancel(tp); 1033 xfs_defer_cancel(tp); 974 } << 975 1034 976 /* 1035 /* 977 * See if the caller is relying on us !! 1036 * See if the caller is relying on us to shut down the 978 * only want an error report if there !! 1037 * filesystem. This happens in paths where we detect 979 * progress, so we only need to check !! 1038 * corruption and decide to give up. 980 * here. << 981 */ 1039 */ 982 if (dirty && !xfs_is_shutdown(mp)) { !! 1040 if (dirty && !XFS_FORCED_SHUTDOWN(mp)) { 983 XFS_ERROR_REPORT("xfs_trans_ca 1041 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); 984 xfs_force_shutdown(mp, SHUTDOW 1042 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 985 } 1043 } 986 #ifdef DEBUG 1044 #ifdef DEBUG 987 /* Log items need to be consistent unt !! 1045 if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) { 988 if (!dirty && !xlog_is_shutdown(log)) << 989 struct xfs_log_item *lip; 1046 struct xfs_log_item *lip; 990 1047 991 list_for_each_entry(lip, &tp-> 1048 list_for_each_entry(lip, &tp->t_items, li_trans) 992 ASSERT(!xlog_item_is_i !! 1049 ASSERT(!(lip->li_type == XFS_LI_EFD)); 993 } 1050 } 994 #endif 1051 #endif 995 xfs_trans_unreserve_and_mod_sb(tp); 1052 xfs_trans_unreserve_and_mod_sb(tp); 996 xfs_trans_unreserve_and_mod_dquots(tp) 1053 xfs_trans_unreserve_and_mod_dquots(tp); 997 1054 998 if (tp->t_ticket) { 1055 if (tp->t_ticket) { 999 xfs_log_ticket_ungrant(log, tp !! 1056 xfs_log_done(mp, tp->t_ticket, NULL, false); 1000 tp->t_ticket = NULL; 1057 tp->t_ticket = NULL; 1001 } 1058 } 1002 1059 1003 xfs_trans_free_items(tp, dirty); !! 1060 /* mark this thread as no longer being in a transaction */ >> 1061 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); >> 1062 >> 1063 xfs_trans_free_items(tp, NULLCOMMITLSN, dirty); 1004 xfs_trans_free(tp); 1064 xfs_trans_free(tp); 1005 } 1065 } 1006 1066 1007 /* 1067 /* 1008 * Roll from one trans in the sequence of PER 1068 * Roll from one trans in the sequence of PERMANENT transactions to 1009 * the next: permanent transactions are only 1069 * the next: permanent transactions are only flushed out when 1010 * committed with xfs_trans_commit(), but we 1070 * committed with xfs_trans_commit(), but we still want as soon 1011 * as possible to let chunks of it go to the 1071 * as possible to let chunks of it go to the log. So we commit the 1012 * chunk we've been working on and get a new 1072 * chunk we've been working on and get a new transaction to continue. 1013 */ 1073 */ 1014 int 1074 int 1015 xfs_trans_roll( 1075 xfs_trans_roll( 1016 struct xfs_trans **tpp) 1076 struct xfs_trans **tpp) 1017 { 1077 { 1018 struct xfs_trans *trans = *tpp 1078 struct xfs_trans *trans = *tpp; 1019 struct xfs_trans_res tres; 1079 struct xfs_trans_res tres; 1020 int error; 1080 int error; 1021 1081 1022 trace_xfs_trans_roll(trans, _RET_IP_) 1082 trace_xfs_trans_roll(trans, _RET_IP_); 1023 1083 1024 /* 1084 /* 1025 * Copy the critical parameters from 1085 * Copy the critical parameters from one trans to the next. 1026 */ 1086 */ 1027 tres.tr_logres = trans->t_log_res; 1087 tres.tr_logres = trans->t_log_res; 1028 tres.tr_logcount = trans->t_log_count 1088 tres.tr_logcount = trans->t_log_count; 1029 1089 1030 *tpp = xfs_trans_dup(trans); 1090 *tpp = xfs_trans_dup(trans); 1031 1091 1032 /* 1092 /* 1033 * Commit the current transaction. 1093 * Commit the current transaction. 1034 * If this commit failed, then it'd j 1094 * If this commit failed, then it'd just unlock those items that 1035 * are not marked ihold. That also me 1095 * are not marked ihold. That also means that a filesystem shutdown 1036 * is in progress. The caller takes t 1096 * is in progress. The caller takes the responsibility to cancel 1037 * the duplicate transaction that get 1097 * the duplicate transaction that gets returned. 1038 */ 1098 */ 1039 error = __xfs_trans_commit(trans, tru 1099 error = __xfs_trans_commit(trans, true); 1040 if (error) 1100 if (error) 1041 return error; 1101 return error; 1042 1102 1043 /* 1103 /* 1044 * Reserve space in the log for the n 1104 * Reserve space in the log for the next transaction. 1045 * This also pushes items in the "AIL 1105 * This also pushes items in the "AIL", the list of logged items, 1046 * out to disk if they are taking up 1106 * out to disk if they are taking up space at the tail of the log 1047 * that we want to use. This require 1107 * that we want to use. This requires that either nothing be locked 1048 * across this call, or that anything 1108 * across this call, or that anything that is locked be logged in 1049 * the prior and the next transaction 1109 * the prior and the next transactions. 1050 */ 1110 */ 1051 tres.tr_logflags = XFS_TRANS_PERM_LOG 1111 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; 1052 return xfs_trans_reserve(*tpp, &tres, 1112 return xfs_trans_reserve(*tpp, &tres, 0, 0); 1053 } << 1054 << 1055 /* << 1056 * Allocate an transaction, lock and join the << 1057 * << 1058 * The caller must ensure that the on-disk dq << 1059 * already been allocated and initialized. T << 1060 * releasing ILOCK_EXCL if a new transaction << 1061 */ << 1062 int << 1063 xfs_trans_alloc_inode( << 1064 struct xfs_inode *ip, << 1065 struct xfs_trans_res *resv, << 1066 unsigned int dblocks, << 1067 unsigned int rblocks, << 1068 bool force, << 1069 struct xfs_trans **tpp) << 1070 { << 1071 struct xfs_trans *tp; << 1072 struct xfs_mount *mp = ip->i_m << 1073 bool retried = fal << 1074 int error; << 1075 << 1076 retry: << 1077 error = xfs_trans_alloc(mp, resv, dbl << 1078 xfs_extlen_to_rtxlen( << 1079 force ? XFS_TRANS_RES << 1080 if (error) << 1081 return error; << 1082 << 1083 xfs_ilock(ip, XFS_ILOCK_EXCL); << 1084 xfs_trans_ijoin(tp, ip, 0); << 1085 << 1086 error = xfs_qm_dqattach_locked(ip, fa << 1087 if (error) { << 1088 /* Caller should have allocat << 1089 ASSERT(error != -ENOENT); << 1090 goto out_cancel; << 1091 } << 1092 << 1093 error = xfs_trans_reserve_quota_nblks << 1094 if ((error == -EDQUOT || error == -EN << 1095 xfs_trans_cancel(tp); << 1096 xfs_iunlock(ip, XFS_ILOCK_EXC << 1097 xfs_blockgc_free_quota(ip, 0) << 1098 retried = true; << 1099 goto retry; << 1100 } << 1101 if (error) << 1102 goto out_cancel; << 1103 << 1104 *tpp = tp; << 1105 return 0; << 1106 << 1107 out_cancel: << 1108 xfs_trans_cancel(tp); << 1109 xfs_iunlock(ip, XFS_ILOCK_EXCL); << 1110 return error; << 1111 } << 1112 << 1113 /* << 1114 * Try to reserve more blocks for a transacti << 1115 * << 1116 * This is for callers that need to attach re << 1117 * those resources to determine the space res << 1118 * modify the attached resources. In other w << 1119 * fail due to ENOSPC, so the caller must be << 1120 * without shutting down the fs. << 1121 */ << 1122 int << 1123 xfs_trans_reserve_more( << 1124 struct xfs_trans *tp, << 1125 unsigned int blocks, << 1126 unsigned int rtextents) << 1127 { << 1128 struct xfs_trans_res resv = { }; << 1129 << 1130 return xfs_trans_reserve(tp, &resv, b << 1131 } << 1132 << 1133 /* << 1134 * Try to reserve more blocks and file quota << 1135 * conditions of usage as xfs_trans_reserve_m << 1136 */ << 1137 int << 1138 xfs_trans_reserve_more_inode( << 1139 struct xfs_trans *tp, << 1140 struct xfs_inode *ip, << 1141 unsigned int dblocks, << 1142 unsigned int rblocks, << 1143 bool force_quota) << 1144 { << 1145 struct xfs_trans_res resv = { }; << 1146 struct xfs_mount *mp = ip->i_m << 1147 unsigned int rtx = xfs_ext << 1148 int error; << 1149 << 1150 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL << 1151 << 1152 error = xfs_trans_reserve(tp, &resv, << 1153 if (error) << 1154 return error; << 1155 << 1156 if (!XFS_IS_QUOTA_ON(mp) || xfs_is_qu << 1157 return 0; << 1158 << 1159 if (tp->t_flags & XFS_TRANS_RESERVE) << 1160 force_quota = true; << 1161 << 1162 error = xfs_trans_reserve_quota_nblks << 1163 force_quota); << 1164 if (!error) << 1165 return 0; << 1166 << 1167 /* Quota failed, give back the new re << 1168 xfs_add_fdblocks(mp, dblocks); << 1169 tp->t_blk_res -= dblocks; << 1170 xfs_add_frextents(mp, rtx); << 1171 tp->t_rtx_res -= rtx; << 1172 return error; << 1173 } << 1174 << 1175 /* << 1176 * Allocate an transaction in preparation for << 1177 * against the given dquots. Callers are not << 1178 */ << 1179 int << 1180 xfs_trans_alloc_icreate( << 1181 struct xfs_mount *mp, << 1182 struct xfs_trans_res *resv, << 1183 struct xfs_dquot *udqp, << 1184 struct xfs_dquot *gdqp, << 1185 struct xfs_dquot *pdqp, << 1186 unsigned int dblocks, << 1187 struct xfs_trans **tpp) << 1188 { << 1189 struct xfs_trans *tp; << 1190 bool retried = fal << 1191 int error; << 1192 << 1193 retry: << 1194 error = xfs_trans_alloc(mp, resv, dbl << 1195 if (error) << 1196 return error; << 1197 << 1198 error = xfs_trans_reserve_quota_icrea << 1199 if ((error == -EDQUOT || error == -EN << 1200 xfs_trans_cancel(tp); << 1201 xfs_blockgc_free_dquots(mp, u << 1202 retried = true; << 1203 goto retry; << 1204 } << 1205 if (error) { << 1206 xfs_trans_cancel(tp); << 1207 return error; << 1208 } << 1209 << 1210 *tpp = tp; << 1211 return 0; << 1212 } << 1213 << 1214 /* << 1215 * Allocate an transaction, lock and join the << 1216 * in preparation for inode attribute changes << 1217 * changes. << 1218 * << 1219 * The caller must ensure that the on-disk dq << 1220 * already been allocated and initialized. T << 1221 * transaction is committed or cancelled. << 1222 */ << 1223 int << 1224 xfs_trans_alloc_ichange( << 1225 struct xfs_inode *ip, << 1226 struct xfs_dquot *new_udqp, << 1227 struct xfs_dquot *new_gdqp, << 1228 struct xfs_dquot *new_pdqp, << 1229 bool force, << 1230 struct xfs_trans **tpp) << 1231 { << 1232 struct xfs_trans *tp; << 1233 struct xfs_mount *mp = ip->i_m << 1234 struct xfs_dquot *udqp; << 1235 struct xfs_dquot *gdqp; << 1236 struct xfs_dquot *pdqp; << 1237 bool retried = fal << 1238 int error; << 1239 << 1240 retry: << 1241 error = xfs_trans_alloc(mp, &M_RES(mp << 1242 if (error) << 1243 return error; << 1244 << 1245 xfs_ilock(ip, XFS_ILOCK_EXCL); << 1246 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXC << 1247 << 1248 error = xfs_qm_dqattach_locked(ip, fa << 1249 if (error) { << 1250 /* Caller should have allocat << 1251 ASSERT(error != -ENOENT); << 1252 goto out_cancel; << 1253 } << 1254 << 1255 /* << 1256 * For each quota type, skip quota re << 1257 * now match the ones that came from << 1258 * pass one in. The inode's dquots c << 1259 * perform a blockgc scan, so we must << 1260 */ << 1261 udqp = (new_udqp != ip->i_udquot) ? n << 1262 gdqp = (new_gdqp != ip->i_gdquot) ? n << 1263 pdqp = (new_pdqp != ip->i_pdquot) ? n << 1264 if (udqp || gdqp || pdqp) { << 1265 unsigned int qflags = XFS_ << 1266 << 1267 if (force) << 1268 qflags |= XFS_QMOPT_F << 1269 << 1270 /* << 1271 * Reserve enough quota to ha << 1272 * for a delayed allocation. << 1273 * delalloc reservation betwe << 1274 * though that part is only s << 1275 */ << 1276 error = xfs_trans_reserve_quo << 1277 pdqp, ip->i_n << 1278 1, qflags); << 1279 if ((error == -EDQUOT || erro << 1280 xfs_trans_cancel(tp); << 1281 xfs_blockgc_free_dquo << 1282 retried = true; << 1283 goto retry; << 1284 } << 1285 if (error) << 1286 goto out_cancel; << 1287 } << 1288 << 1289 *tpp = tp; << 1290 return 0; << 1291 << 1292 out_cancel: << 1293 xfs_trans_cancel(tp); << 1294 return error; << 1295 } << 1296 << 1297 /* << 1298 * Allocate an transaction, lock and join the << 1299 * and reserve quota for a directory update. << 1300 * @dblocks will be set to zero for a reserva << 1301 * @nospace_error will be set to a negative e << 1302 * constraint we hit. << 1303 * << 1304 * The caller must ensure that the on-disk dq << 1305 * already been allocated and initialized. T << 1306 * transaction is committed or cancelled. << 1307 * << 1308 * Caller is responsible for unlocking the in << 1309 */ << 1310 int << 1311 xfs_trans_alloc_dir( << 1312 struct xfs_inode *dp, << 1313 struct xfs_trans_res *resv, << 1314 struct xfs_inode *ip, << 1315 unsigned int *dblocks, << 1316 struct xfs_trans **tpp, << 1317 int *nospace_erro << 1318 { << 1319 struct xfs_trans *tp; << 1320 struct xfs_mount *mp = ip->i_m << 1321 unsigned int resblks; << 1322 bool retried = fal << 1323 int error; << 1324 << 1325 retry: << 1326 *nospace_error = 0; << 1327 resblks = *dblocks; << 1328 error = xfs_trans_alloc(mp, resv, res << 1329 if (error == -ENOSPC) { << 1330 *nospace_error = error; << 1331 resblks = 0; << 1332 error = xfs_trans_alloc(mp, r << 1333 } << 1334 if (error) << 1335 return error; << 1336 << 1337 xfs_lock_two_inodes(dp, XFS_ILOCK_EXC << 1338 << 1339 xfs_trans_ijoin(tp, dp, 0); << 1340 xfs_trans_ijoin(tp, ip, 0); << 1341 << 1342 error = xfs_qm_dqattach_locked(dp, fa << 1343 if (error) { << 1344 /* Caller should have allocat << 1345 ASSERT(error != -ENOENT); << 1346 goto out_cancel; << 1347 } << 1348 << 1349 error = xfs_qm_dqattach_locked(ip, fa << 1350 if (error) { << 1351 /* Caller should have allocat << 1352 ASSERT(error != -ENOENT); << 1353 goto out_cancel; << 1354 } << 1355 << 1356 if (resblks == 0) << 1357 goto done; << 1358 << 1359 error = xfs_trans_reserve_quota_nblks << 1360 if (error == -EDQUOT || error == -ENO << 1361 if (!retried) { << 1362 xfs_trans_cancel(tp); << 1363 xfs_iunlock(dp, XFS_I << 1364 if (dp != ip) << 1365 xfs_iunlock(i << 1366 xfs_blockgc_free_quot << 1367 retried = true; << 1368 goto retry; << 1369 } << 1370 << 1371 *nospace_error = error; << 1372 resblks = 0; << 1373 error = 0; << 1374 } << 1375 if (error) << 1376 goto out_cancel; << 1377 << 1378 done: << 1379 *tpp = tp; << 1380 *dblocks = resblks; << 1381 return 0; << 1382 << 1383 out_cancel: << 1384 xfs_trans_cancel(tp); << 1385 return error; << 1386 } 1113 } 1387 1114
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.