1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2003 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_shared.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_defer.h" 15 #include "xfs_inode.h" 16 #include "xfs_bmap.h" 17 #include "xfs_quota.h" 18 #include "xfs_trans.h" 19 #include "xfs_buf_item.h" 20 #include "xfs_trans_space.h" 21 #include "xfs_trans_priv.h" 22 #include "xfs_qm.h" 23 #include "xfs_trace.h" 24 #include "xfs_log.h" 25 #include "xfs_bmap_btree.h" 26 #include "xfs_error.h" 27 #include "xfs_health.h" 28 29 /* 30 * Lock order: 31 * 32 * ip->i_lock 33 * qi->qi_tree_lock 34 * dquot->q_qlock (xfs_dqlock() and friends) 35 * dquot->q_flush (xfs_dqflock() and friends) 36 * qi->qi_lru_lock 37 * 38 * If two dquots need to be locked the order is user before group/project, 39 * otherwise by the lowest id first, see xfs_dqlock2. 40 */ 41 42 struct kmem_cache *xfs_dqtrx_cache; 43 static struct kmem_cache *xfs_dquot_cache; 44 45 static struct lock_class_key xfs_dquot_group_class; 46 static struct lock_class_key xfs_dquot_project_class; 47 48 /* Record observations of quota corruption with the health tracking system. */ 49 static void 50 xfs_dquot_mark_sick( 51 struct xfs_dquot *dqp) 52 { 53 struct xfs_mount *mp = dqp->q_mount; 54 55 switch (dqp->q_type) { 56 case XFS_DQTYPE_USER: 57 xfs_fs_mark_sick(mp, XFS_SICK_FS_UQUOTA); 58 break; 59 case XFS_DQTYPE_GROUP: 60 xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA); 61 break; 62 case XFS_DQTYPE_PROJ: 63 xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA); 64 break; 65 default: 66 ASSERT(0); 67 break; 68 } 69 } 70 71 /* 72 * This is called to free all the memory associated with a dquot 73 */ 74 void 75 xfs_qm_dqdestroy( 76 struct xfs_dquot *dqp) 77 { 78 ASSERT(list_empty(&dqp->q_lru)); 79 80 kvfree(dqp->q_logitem.qli_item.li_lv_shadow); 81 mutex_destroy(&dqp->q_qlock); 82 83 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot); 84 kmem_cache_free(xfs_dquot_cache, dqp); 85 } 86 87 /* 88 * If default limits are in force, push them into the dquot now. 89 * We overwrite the dquot limits only if they are zero and this 90 * is not the root dquot. 91 */ 92 void 93 xfs_qm_adjust_dqlimits( 94 struct xfs_dquot *dq) 95 { 96 struct xfs_mount *mp = dq->q_mount; 97 struct xfs_quotainfo *q = mp->m_quotainfo; 98 struct xfs_def_quota *defq; 99 int prealloc = 0; 100 101 ASSERT(dq->q_id); 102 defq = xfs_get_defquota(q, xfs_dquot_type(dq)); 103 104 if (!dq->q_blk.softlimit) { 105 dq->q_blk.softlimit = defq->blk.soft; 106 prealloc = 1; 107 } 108 if (!dq->q_blk.hardlimit) { 109 dq->q_blk.hardlimit = defq->blk.hard; 110 prealloc = 1; 111 } 112 if (!dq->q_ino.softlimit) 113 dq->q_ino.softlimit = defq->ino.soft; 114 if (!dq->q_ino.hardlimit) 115 dq->q_ino.hardlimit = defq->ino.hard; 116 if (!dq->q_rtb.softlimit) 117 dq->q_rtb.softlimit = defq->rtb.soft; 118 if (!dq->q_rtb.hardlimit) 119 dq->q_rtb.hardlimit = defq->rtb.hard; 120 121 if (prealloc) 122 xfs_dquot_set_prealloc_limits(dq); 123 } 124 125 /* Set the expiration time of a quota's grace period. */ 126 time64_t 127 xfs_dquot_set_timeout( 128 struct xfs_mount *mp, 129 time64_t timeout) 130 { 131 struct xfs_quotainfo *qi = mp->m_quotainfo; 132 133 return clamp_t(time64_t, timeout, qi->qi_expiry_min, 134 qi->qi_expiry_max); 135 } 136 137 /* Set the length of the default grace period. */ 138 time64_t 139 xfs_dquot_set_grace_period( 140 time64_t grace) 141 { 142 return clamp_t(time64_t, grace, XFS_DQ_GRACE_MIN, XFS_DQ_GRACE_MAX); 143 } 144 145 /* 146 * Determine if this quota counter is over either limit and set the quota 147 * timers as appropriate. 148 */ 149 static inline void 150 xfs_qm_adjust_res_timer( 151 struct xfs_mount *mp, 152 struct xfs_dquot_res *res, 153 struct xfs_quota_limits *qlim) 154 { 155 ASSERT(res->hardlimit == 0 || res->softlimit <= res->hardlimit); 156 157 if ((res->softlimit && res->count > res->softlimit) || 158 (res->hardlimit && res->count > res->hardlimit)) { 159 if (res->timer == 0) 160 res->timer = xfs_dquot_set_timeout(mp, 161 ktime_get_real_seconds() + qlim->time); 162 } else { 163 res->timer = 0; 164 } 165 } 166 167 /* 168 * Check the limits and timers of a dquot and start or reset timers 169 * if necessary. 170 * This gets called even when quota enforcement is OFF, which makes our 171 * life a little less complicated. (We just don't reject any quota 172 * reservations in that case, when enforcement is off). 173 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when 174 * enforcement's off. 175 * In contrast, warnings are a little different in that they don't 176 * 'automatically' get started when limits get exceeded. They do 177 * get reset to zero, however, when we find the count to be under 178 * the soft limit (they are only ever set non-zero via userspace). 179 */ 180 void 181 xfs_qm_adjust_dqtimers( 182 struct xfs_dquot *dq) 183 { 184 struct xfs_mount *mp = dq->q_mount; 185 struct xfs_quotainfo *qi = mp->m_quotainfo; 186 struct xfs_def_quota *defq; 187 188 ASSERT(dq->q_id); 189 defq = xfs_get_defquota(qi, xfs_dquot_type(dq)); 190 191 xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_blk, &defq->blk); 192 xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_ino, &defq->ino); 193 xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_rtb, &defq->rtb); 194 } 195 196 /* 197 * initialize a buffer full of dquots and log the whole thing 198 */ 199 void 200 xfs_qm_init_dquot_blk( 201 struct xfs_trans *tp, 202 xfs_dqid_t id, 203 xfs_dqtype_t type, 204 struct xfs_buf *bp) 205 { 206 struct xfs_mount *mp = tp->t_mountp; 207 struct xfs_quotainfo *q = mp->m_quotainfo; 208 struct xfs_dqblk *d; 209 xfs_dqid_t curid; 210 unsigned int qflag; 211 unsigned int blftype; 212 int i; 213 214 ASSERT(tp); 215 ASSERT(xfs_buf_islocked(bp)); 216 217 switch (type) { 218 case XFS_DQTYPE_USER: 219 qflag = XFS_UQUOTA_CHKD; 220 blftype = XFS_BLF_UDQUOT_BUF; 221 break; 222 case XFS_DQTYPE_PROJ: 223 qflag = XFS_PQUOTA_CHKD; 224 blftype = XFS_BLF_PDQUOT_BUF; 225 break; 226 case XFS_DQTYPE_GROUP: 227 qflag = XFS_GQUOTA_CHKD; 228 blftype = XFS_BLF_GDQUOT_BUF; 229 break; 230 default: 231 ASSERT(0); 232 return; 233 } 234 235 d = bp->b_addr; 236 237 /* 238 * ID of the first dquot in the block - id's are zero based. 239 */ 240 curid = id - (id % q->qi_dqperchunk); 241 memset(d, 0, BBTOB(q->qi_dqchunklen)); 242 for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) { 243 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); 244 d->dd_diskdq.d_version = XFS_DQUOT_VERSION; 245 d->dd_diskdq.d_id = cpu_to_be32(curid); 246 d->dd_diskdq.d_type = type; 247 if (curid > 0 && xfs_has_bigtime(mp)) 248 d->dd_diskdq.d_type |= XFS_DQTYPE_BIGTIME; 249 if (xfs_has_crc(mp)) { 250 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid); 251 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk), 252 XFS_DQUOT_CRC_OFF); 253 } 254 } 255 256 xfs_trans_dquot_buf(tp, bp, blftype); 257 258 /* 259 * quotacheck uses delayed writes to update all the dquots on disk in an 260 * efficient manner instead of logging the individual dquot changes as 261 * they are made. However if we log the buffer allocated here and crash 262 * after quotacheck while the logged initialisation is still in the 263 * active region of the log, log recovery can replay the dquot buffer 264 * initialisation over the top of the checked dquots and corrupt quota 265 * accounting. 266 * 267 * To avoid this problem, quotacheck cannot log the initialised buffer. 268 * We must still dirty the buffer and write it back before the 269 * allocation transaction clears the log. Therefore, mark the buffer as 270 * ordered instead of logging it directly. This is safe for quotacheck 271 * because it detects and repairs allocated but initialized dquot blocks 272 * in the quota inodes. 273 */ 274 if (!(mp->m_qflags & qflag)) 275 xfs_trans_ordered_buf(tp, bp); 276 else 277 xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); 278 } 279 280 /* 281 * Initialize the dynamic speculative preallocation thresholds. The lo/hi 282 * watermarks correspond to the soft and hard limits by default. If a soft limit 283 * is not specified, we use 95% of the hard limit. 284 */ 285 void 286 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp) 287 { 288 uint64_t space; 289 290 dqp->q_prealloc_hi_wmark = dqp->q_blk.hardlimit; 291 dqp->q_prealloc_lo_wmark = dqp->q_blk.softlimit; 292 if (!dqp->q_prealloc_lo_wmark) { 293 dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark; 294 do_div(dqp->q_prealloc_lo_wmark, 100); 295 dqp->q_prealloc_lo_wmark *= 95; 296 } 297 298 space = dqp->q_prealloc_hi_wmark; 299 300 do_div(space, 100); 301 dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space; 302 dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3; 303 dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5; 304 } 305 306 /* 307 * Ensure that the given in-core dquot has a buffer on disk backing it, and 308 * return the buffer locked and held. This is called when the bmapi finds a 309 * hole. 310 */ 311 STATIC int 312 xfs_dquot_disk_alloc( 313 struct xfs_dquot *dqp, 314 struct xfs_buf **bpp) 315 { 316 struct xfs_bmbt_irec map; 317 struct xfs_trans *tp; 318 struct xfs_mount *mp = dqp->q_mount; 319 struct xfs_buf *bp; 320 xfs_dqtype_t qtype = xfs_dquot_type(dqp); 321 struct xfs_inode *quotip = xfs_quota_inode(mp, qtype); 322 int nmaps = 1; 323 int error; 324 325 trace_xfs_dqalloc(dqp); 326 327 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc, 328 XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp); 329 if (error) 330 return error; 331 332 xfs_ilock(quotip, XFS_ILOCK_EXCL); 333 xfs_trans_ijoin(tp, quotip, 0); 334 335 if (!xfs_this_quota_on(dqp->q_mount, qtype)) { 336 /* 337 * Return if this type of quotas is turned off while we didn't 338 * have an inode lock 339 */ 340 error = -ESRCH; 341 goto err_cancel; 342 } 343 344 error = xfs_iext_count_extend(tp, quotip, XFS_DATA_FORK, 345 XFS_IEXT_ADD_NOSPLIT_CNT); 346 if (error) 347 goto err_cancel; 348 349 /* Create the block mapping. */ 350 error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset, 351 XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map, 352 &nmaps); 353 if (error) 354 goto err_cancel; 355 356 ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); 357 ASSERT((map.br_startblock != DELAYSTARTBLOCK) && 358 (map.br_startblock != HOLESTARTBLOCK)); 359 360 /* 361 * Keep track of the blkno to save a lookup later 362 */ 363 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); 364 365 /* now we can just get the buffer (there's nothing to read yet) */ 366 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno, 367 mp->m_quotainfo->qi_dqchunklen, 0, &bp); 368 if (error) 369 goto err_cancel; 370 bp->b_ops = &xfs_dquot_buf_ops; 371 372 /* 373 * Make a chunk of dquots out of this buffer and log 374 * the entire thing. 375 */ 376 xfs_qm_init_dquot_blk(tp, dqp->q_id, qtype, bp); 377 xfs_buf_set_ref(bp, XFS_DQUOT_REF); 378 379 /* 380 * Hold the buffer and join it to the dfops so that we'll still own 381 * the buffer when we return to the caller. The buffer disposal on 382 * error must be paid attention to very carefully, as it has been 383 * broken since commit efa092f3d4c6 "[XFS] Fixes a bug in the quota 384 * code when allocating a new dquot record" in 2005, and the later 385 * conversion to xfs_defer_ops in commit 310a75a3c6c747 failed to keep 386 * the buffer locked across the _defer_finish call. We can now do 387 * this correctly with xfs_defer_bjoin. 388 * 389 * Above, we allocated a disk block for the dquot information and used 390 * get_buf to initialize the dquot. If the _defer_finish fails, the old 391 * transaction is gone but the new buffer is not joined or held to any 392 * transaction, so we must _buf_relse it. 393 * 394 * If everything succeeds, the caller of this function is returned a 395 * buffer that is locked and held to the transaction. The caller 396 * is responsible for unlocking any buffer passed back, either 397 * manually or by committing the transaction. On error, the buffer is 398 * released and not passed back. 399 * 400 * Keep the quota inode ILOCKed until after the transaction commit to 401 * maintain the atomicity of bmap/rmap updates. 402 */ 403 xfs_trans_bhold(tp, bp); 404 error = xfs_trans_commit(tp); 405 xfs_iunlock(quotip, XFS_ILOCK_EXCL); 406 if (error) { 407 xfs_buf_relse(bp); 408 return error; 409 } 410 411 *bpp = bp; 412 return 0; 413 414 err_cancel: 415 xfs_trans_cancel(tp); 416 xfs_iunlock(quotip, XFS_ILOCK_EXCL); 417 return error; 418 } 419 420 /* 421 * Read in the in-core dquot's on-disk metadata and return the buffer. 422 * Returns ENOENT to signal a hole. 423 */ 424 STATIC int 425 xfs_dquot_disk_read( 426 struct xfs_mount *mp, 427 struct xfs_dquot *dqp, 428 struct xfs_buf **bpp) 429 { 430 struct xfs_bmbt_irec map; 431 struct xfs_buf *bp; 432 xfs_dqtype_t qtype = xfs_dquot_type(dqp); 433 struct xfs_inode *quotip = xfs_quota_inode(mp, qtype); 434 uint lock_mode; 435 int nmaps = 1; 436 int error; 437 438 lock_mode = xfs_ilock_data_map_shared(quotip); 439 if (!xfs_this_quota_on(mp, qtype)) { 440 /* 441 * Return if this type of quotas is turned off while we 442 * didn't have the quota inode lock. 443 */ 444 xfs_iunlock(quotip, lock_mode); 445 return -ESRCH; 446 } 447 448 /* 449 * Find the block map; no allocations yet 450 */ 451 error = xfs_bmapi_read(quotip, dqp->q_fileoffset, 452 XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); 453 xfs_iunlock(quotip, lock_mode); 454 if (error) 455 return error; 456 457 ASSERT(nmaps == 1); 458 ASSERT(map.br_blockcount >= 1); 459 ASSERT(map.br_startblock != DELAYSTARTBLOCK); 460 if (map.br_startblock == HOLESTARTBLOCK) 461 return -ENOENT; 462 463 trace_xfs_dqtobp_read(dqp); 464 465 /* 466 * store the blkno etc so that we don't have to do the 467 * mapping all the time 468 */ 469 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); 470 471 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, 472 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 473 &xfs_dquot_buf_ops); 474 if (xfs_metadata_is_sick(error)) 475 xfs_dquot_mark_sick(dqp); 476 if (error) { 477 ASSERT(bp == NULL); 478 return error; 479 } 480 481 ASSERT(xfs_buf_islocked(bp)); 482 xfs_buf_set_ref(bp, XFS_DQUOT_REF); 483 *bpp = bp; 484 485 return 0; 486 } 487 488 /* Allocate and initialize everything we need for an incore dquot. */ 489 STATIC struct xfs_dquot * 490 xfs_dquot_alloc( 491 struct xfs_mount *mp, 492 xfs_dqid_t id, 493 xfs_dqtype_t type) 494 { 495 struct xfs_dquot *dqp; 496 497 dqp = kmem_cache_zalloc(xfs_dquot_cache, GFP_KERNEL | __GFP_NOFAIL); 498 499 dqp->q_type = type; 500 dqp->q_id = id; 501 dqp->q_mount = mp; 502 INIT_LIST_HEAD(&dqp->q_lru); 503 mutex_init(&dqp->q_qlock); 504 init_waitqueue_head(&dqp->q_pinwait); 505 dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; 506 /* 507 * Offset of dquot in the (fixed sized) dquot chunk. 508 */ 509 dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * 510 sizeof(struct xfs_dqblk); 511 512 /* 513 * Because we want to use a counting completion, complete 514 * the flush completion once to allow a single access to 515 * the flush completion without blocking. 516 */ 517 init_completion(&dqp->q_flush); 518 complete(&dqp->q_flush); 519 520 /* 521 * Make sure group quotas have a different lock class than user 522 * quotas. 523 */ 524 switch (type) { 525 case XFS_DQTYPE_USER: 526 /* uses the default lock class */ 527 break; 528 case XFS_DQTYPE_GROUP: 529 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class); 530 break; 531 case XFS_DQTYPE_PROJ: 532 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class); 533 break; 534 default: 535 ASSERT(0); 536 break; 537 } 538 539 xfs_qm_dquot_logitem_init(dqp); 540 541 XFS_STATS_INC(mp, xs_qm_dquot); 542 return dqp; 543 } 544 545 /* Check the ondisk dquot's id and type match what the incore dquot expects. */ 546 static bool 547 xfs_dquot_check_type( 548 struct xfs_dquot *dqp, 549 struct xfs_disk_dquot *ddqp) 550 { 551 uint8_t ddqp_type; 552 uint8_t dqp_type; 553 554 ddqp_type = ddqp->d_type & XFS_DQTYPE_REC_MASK; 555 dqp_type = xfs_dquot_type(dqp); 556 557 if (be32_to_cpu(ddqp->d_id) != dqp->q_id) 558 return false; 559 560 /* 561 * V5 filesystems always expect an exact type match. V4 filesystems 562 * expect an exact match for user dquots and for non-root group and 563 * project dquots. 564 */ 565 if (xfs_has_crc(dqp->q_mount) || 566 dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0) 567 return ddqp_type == dqp_type; 568 569 /* 570 * V4 filesystems support either group or project quotas, but not both 571 * at the same time. The non-user quota file can be switched between 572 * group and project quota uses depending on the mount options, which 573 * means that we can encounter the other type when we try to load quota 574 * defaults. Quotacheck will soon reset the entire quota file 575 * (including the root dquot) anyway, but don't log scary corruption 576 * reports to dmesg. 577 */ 578 return ddqp_type == XFS_DQTYPE_GROUP || ddqp_type == XFS_DQTYPE_PROJ; 579 } 580 581 /* Copy the in-core quota fields in from the on-disk buffer. */ 582 STATIC int 583 xfs_dquot_from_disk( 584 struct xfs_dquot *dqp, 585 struct xfs_buf *bp) 586 { 587 struct xfs_dqblk *dqb = xfs_buf_offset(bp, dqp->q_bufoffset); 588 struct xfs_disk_dquot *ddqp = &dqb->dd_diskdq; 589 590 /* 591 * Ensure that we got the type and ID we were looking for. 592 * Everything else was checked by the dquot buffer verifier. 593 */ 594 if (!xfs_dquot_check_type(dqp, ddqp)) { 595 xfs_alert_tag(bp->b_mount, XFS_PTAG_VERIFIER_ERROR, 596 "Metadata corruption detected at %pS, quota %u", 597 __this_address, dqp->q_id); 598 xfs_alert(bp->b_mount, "Unmount and run xfs_repair"); 599 xfs_dquot_mark_sick(dqp); 600 return -EFSCORRUPTED; 601 } 602 603 /* copy everything from disk dquot to the incore dquot */ 604 dqp->q_type = ddqp->d_type; 605 dqp->q_blk.hardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); 606 dqp->q_blk.softlimit = be64_to_cpu(ddqp->d_blk_softlimit); 607 dqp->q_ino.hardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); 608 dqp->q_ino.softlimit = be64_to_cpu(ddqp->d_ino_softlimit); 609 dqp->q_rtb.hardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); 610 dqp->q_rtb.softlimit = be64_to_cpu(ddqp->d_rtb_softlimit); 611 612 dqp->q_blk.count = be64_to_cpu(ddqp->d_bcount); 613 dqp->q_ino.count = be64_to_cpu(ddqp->d_icount); 614 dqp->q_rtb.count = be64_to_cpu(ddqp->d_rtbcount); 615 616 dqp->q_blk.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_btimer); 617 dqp->q_ino.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_itimer); 618 dqp->q_rtb.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_rtbtimer); 619 620 /* 621 * Reservation counters are defined as reservation plus current usage 622 * to avoid having to add every time. 623 */ 624 dqp->q_blk.reserved = dqp->q_blk.count; 625 dqp->q_ino.reserved = dqp->q_ino.count; 626 dqp->q_rtb.reserved = dqp->q_rtb.count; 627 628 /* initialize the dquot speculative prealloc thresholds */ 629 xfs_dquot_set_prealloc_limits(dqp); 630 return 0; 631 } 632 633 /* Copy the in-core quota fields into the on-disk buffer. */ 634 void 635 xfs_dquot_to_disk( 636 struct xfs_disk_dquot *ddqp, 637 struct xfs_dquot *dqp) 638 { 639 ddqp->d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); 640 ddqp->d_version = XFS_DQUOT_VERSION; 641 ddqp->d_type = dqp->q_type; 642 ddqp->d_id = cpu_to_be32(dqp->q_id); 643 ddqp->d_pad0 = 0; 644 ddqp->d_pad = 0; 645 646 ddqp->d_blk_hardlimit = cpu_to_be64(dqp->q_blk.hardlimit); 647 ddqp->d_blk_softlimit = cpu_to_be64(dqp->q_blk.softlimit); 648 ddqp->d_ino_hardlimit = cpu_to_be64(dqp->q_ino.hardlimit); 649 ddqp->d_ino_softlimit = cpu_to_be64(dqp->q_ino.softlimit); 650 ddqp->d_rtb_hardlimit = cpu_to_be64(dqp->q_rtb.hardlimit); 651 ddqp->d_rtb_softlimit = cpu_to_be64(dqp->q_rtb.softlimit); 652 653 ddqp->d_bcount = cpu_to_be64(dqp->q_blk.count); 654 ddqp->d_icount = cpu_to_be64(dqp->q_ino.count); 655 ddqp->d_rtbcount = cpu_to_be64(dqp->q_rtb.count); 656 657 ddqp->d_bwarns = 0; 658 ddqp->d_iwarns = 0; 659 ddqp->d_rtbwarns = 0; 660 661 ddqp->d_btimer = xfs_dquot_to_disk_ts(dqp, dqp->q_blk.timer); 662 ddqp->d_itimer = xfs_dquot_to_disk_ts(dqp, dqp->q_ino.timer); 663 ddqp->d_rtbtimer = xfs_dquot_to_disk_ts(dqp, dqp->q_rtb.timer); 664 } 665 666 /* 667 * Read in the ondisk dquot using dqtobp() then copy it to an incore version, 668 * and release the buffer immediately. If @can_alloc is true, fill any 669 * holes in the on-disk metadata. 670 */ 671 static int 672 xfs_qm_dqread( 673 struct xfs_mount *mp, 674 xfs_dqid_t id, 675 xfs_dqtype_t type, 676 bool can_alloc, 677 struct xfs_dquot **dqpp) 678 { 679 struct xfs_dquot *dqp; 680 struct xfs_buf *bp; 681 int error; 682 683 dqp = xfs_dquot_alloc(mp, id, type); 684 trace_xfs_dqread(dqp); 685 686 /* Try to read the buffer, allocating if necessary. */ 687 error = xfs_dquot_disk_read(mp, dqp, &bp); 688 if (error == -ENOENT && can_alloc) 689 error = xfs_dquot_disk_alloc(dqp, &bp); 690 if (error) 691 goto err; 692 693 /* 694 * At this point we should have a clean locked buffer. Copy the data 695 * to the incore dquot and release the buffer since the incore dquot 696 * has its own locking protocol so we needn't tie up the buffer any 697 * further. 698 */ 699 ASSERT(xfs_buf_islocked(bp)); 700 error = xfs_dquot_from_disk(dqp, bp); 701 xfs_buf_relse(bp); 702 if (error) 703 goto err; 704 705 *dqpp = dqp; 706 return error; 707 708 err: 709 trace_xfs_dqread_fail(dqp); 710 xfs_qm_dqdestroy(dqp); 711 *dqpp = NULL; 712 return error; 713 } 714 715 /* 716 * Advance to the next id in the current chunk, or if at the 717 * end of the chunk, skip ahead to first id in next allocated chunk 718 * using the SEEK_DATA interface. 719 */ 720 static int 721 xfs_dq_get_next_id( 722 struct xfs_mount *mp, 723 xfs_dqtype_t type, 724 xfs_dqid_t *id) 725 { 726 struct xfs_inode *quotip = xfs_quota_inode(mp, type); 727 xfs_dqid_t next_id = *id + 1; /* simple advance */ 728 uint lock_flags; 729 struct xfs_bmbt_irec got; 730 struct xfs_iext_cursor cur; 731 xfs_fsblock_t start; 732 int error = 0; 733 734 /* If we'd wrap past the max ID, stop */ 735 if (next_id < *id) 736 return -ENOENT; 737 738 /* If new ID is within the current chunk, advancing it sufficed */ 739 if (next_id % mp->m_quotainfo->qi_dqperchunk) { 740 *id = next_id; 741 return 0; 742 } 743 744 /* Nope, next_id is now past the current chunk, so find the next one */ 745 start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk; 746 747 lock_flags = xfs_ilock_data_map_shared(quotip); 748 error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK); 749 if (error) 750 return error; 751 752 if (xfs_iext_lookup_extent(quotip, "ip->i_df, start, &cur, &got)) { 753 /* contiguous chunk, bump startoff for the id calculation */ 754 if (got.br_startoff < start) 755 got.br_startoff = start; 756 *id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk; 757 } else { 758 error = -ENOENT; 759 } 760 761 xfs_iunlock(quotip, lock_flags); 762 763 return error; 764 } 765 766 /* 767 * Look up the dquot in the in-core cache. If found, the dquot is returned 768 * locked and ready to go. 769 */ 770 static struct xfs_dquot * 771 xfs_qm_dqget_cache_lookup( 772 struct xfs_mount *mp, 773 struct xfs_quotainfo *qi, 774 struct radix_tree_root *tree, 775 xfs_dqid_t id) 776 { 777 struct xfs_dquot *dqp; 778 779 restart: 780 mutex_lock(&qi->qi_tree_lock); 781 dqp = radix_tree_lookup(tree, id); 782 if (!dqp) { 783 mutex_unlock(&qi->qi_tree_lock); 784 XFS_STATS_INC(mp, xs_qm_dqcachemisses); 785 return NULL; 786 } 787 788 xfs_dqlock(dqp); 789 if (dqp->q_flags & XFS_DQFLAG_FREEING) { 790 xfs_dqunlock(dqp); 791 mutex_unlock(&qi->qi_tree_lock); 792 trace_xfs_dqget_freeing(dqp); 793 delay(1); 794 goto restart; 795 } 796 797 dqp->q_nrefs++; 798 mutex_unlock(&qi->qi_tree_lock); 799 800 trace_xfs_dqget_hit(dqp); 801 XFS_STATS_INC(mp, xs_qm_dqcachehits); 802 return dqp; 803 } 804 805 /* 806 * Try to insert a new dquot into the in-core cache. If an error occurs the 807 * caller should throw away the dquot and start over. Otherwise, the dquot 808 * is returned locked (and held by the cache) as if there had been a cache 809 * hit. 810 * 811 * The insert needs to be done under memalloc_nofs context because the radix 812 * tree can do memory allocation during insert. The qi->qi_tree_lock is taken in 813 * memory reclaim when freeing unused dquots, so we cannot have the radix tree 814 * node allocation recursing into filesystem reclaim whilst we hold the 815 * qi_tree_lock. 816 */ 817 static int 818 xfs_qm_dqget_cache_insert( 819 struct xfs_mount *mp, 820 struct xfs_quotainfo *qi, 821 struct radix_tree_root *tree, 822 xfs_dqid_t id, 823 struct xfs_dquot *dqp) 824 { 825 unsigned int nofs_flags; 826 int error; 827 828 nofs_flags = memalloc_nofs_save(); 829 mutex_lock(&qi->qi_tree_lock); 830 error = radix_tree_insert(tree, id, dqp); 831 if (unlikely(error)) { 832 /* Duplicate found! Caller must try again. */ 833 trace_xfs_dqget_dup(dqp); 834 goto out_unlock; 835 } 836 837 /* Return a locked dquot to the caller, with a reference taken. */ 838 xfs_dqlock(dqp); 839 dqp->q_nrefs = 1; 840 qi->qi_dquots++; 841 842 out_unlock: 843 mutex_unlock(&qi->qi_tree_lock); 844 memalloc_nofs_restore(nofs_flags); 845 return error; 846 } 847 848 /* Check our input parameters. */ 849 static int 850 xfs_qm_dqget_checks( 851 struct xfs_mount *mp, 852 xfs_dqtype_t type) 853 { 854 switch (type) { 855 case XFS_DQTYPE_USER: 856 if (!XFS_IS_UQUOTA_ON(mp)) 857 return -ESRCH; 858 return 0; 859 case XFS_DQTYPE_GROUP: 860 if (!XFS_IS_GQUOTA_ON(mp)) 861 return -ESRCH; 862 return 0; 863 case XFS_DQTYPE_PROJ: 864 if (!XFS_IS_PQUOTA_ON(mp)) 865 return -ESRCH; 866 return 0; 867 default: 868 WARN_ON_ONCE(0); 869 return -EINVAL; 870 } 871 } 872 873 /* 874 * Given the file system, id, and type (UDQUOT/GDQUOT/PDQUOT), return a 875 * locked dquot, doing an allocation (if requested) as needed. 876 */ 877 int 878 xfs_qm_dqget( 879 struct xfs_mount *mp, 880 xfs_dqid_t id, 881 xfs_dqtype_t type, 882 bool can_alloc, 883 struct xfs_dquot **O_dqpp) 884 { 885 struct xfs_quotainfo *qi = mp->m_quotainfo; 886 struct radix_tree_root *tree = xfs_dquot_tree(qi, type); 887 struct xfs_dquot *dqp; 888 int error; 889 890 error = xfs_qm_dqget_checks(mp, type); 891 if (error) 892 return error; 893 894 restart: 895 dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id); 896 if (dqp) { 897 *O_dqpp = dqp; 898 return 0; 899 } 900 901 error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp); 902 if (error) 903 return error; 904 905 error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp); 906 if (error) { 907 /* 908 * Duplicate found. Just throw away the new dquot and start 909 * over. 910 */ 911 xfs_qm_dqdestroy(dqp); 912 XFS_STATS_INC(mp, xs_qm_dquot_dups); 913 goto restart; 914 } 915 916 trace_xfs_dqget_miss(dqp); 917 *O_dqpp = dqp; 918 return 0; 919 } 920 921 /* 922 * Given a dquot id and type, read and initialize a dquot from the on-disk 923 * metadata. This function is only for use during quota initialization so 924 * it ignores the dquot cache assuming that the dquot shrinker isn't set up. 925 * The caller is responsible for _qm_dqdestroy'ing the returned dquot. 926 */ 927 int 928 xfs_qm_dqget_uncached( 929 struct xfs_mount *mp, 930 xfs_dqid_t id, 931 xfs_dqtype_t type, 932 struct xfs_dquot **dqpp) 933 { 934 int error; 935 936 error = xfs_qm_dqget_checks(mp, type); 937 if (error) 938 return error; 939 940 return xfs_qm_dqread(mp, id, type, 0, dqpp); 941 } 942 943 /* Return the quota id for a given inode and type. */ 944 xfs_dqid_t 945 xfs_qm_id_for_quotatype( 946 struct xfs_inode *ip, 947 xfs_dqtype_t type) 948 { 949 switch (type) { 950 case XFS_DQTYPE_USER: 951 return i_uid_read(VFS_I(ip)); 952 case XFS_DQTYPE_GROUP: 953 return i_gid_read(VFS_I(ip)); 954 case XFS_DQTYPE_PROJ: 955 return ip->i_projid; 956 } 957 ASSERT(0); 958 return 0; 959 } 960 961 /* 962 * Return the dquot for a given inode and type. If @can_alloc is true, then 963 * allocate blocks if needed. The inode's ILOCK must be held and it must not 964 * have already had an inode attached. 965 */ 966 int 967 xfs_qm_dqget_inode( 968 struct xfs_inode *ip, 969 xfs_dqtype_t type, 970 bool can_alloc, 971 struct xfs_dquot **O_dqpp) 972 { 973 struct xfs_mount *mp = ip->i_mount; 974 struct xfs_quotainfo *qi = mp->m_quotainfo; 975 struct radix_tree_root *tree = xfs_dquot_tree(qi, type); 976 struct xfs_dquot *dqp; 977 xfs_dqid_t id; 978 int error; 979 980 error = xfs_qm_dqget_checks(mp, type); 981 if (error) 982 return error; 983 984 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); 985 ASSERT(xfs_inode_dquot(ip, type) == NULL); 986 987 id = xfs_qm_id_for_quotatype(ip, type); 988 989 restart: 990 dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id); 991 if (dqp) { 992 *O_dqpp = dqp; 993 return 0; 994 } 995 996 /* 997 * Dquot cache miss. We don't want to keep the inode lock across 998 * a (potential) disk read. Also we don't want to deal with the lock 999 * ordering between quotainode and this inode. OTOH, dropping the inode 1000 * lock here means dealing with a chown that can happen before 1001 * we re-acquire the lock. 1002 */ 1003 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1004 error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp); 1005 xfs_ilock(ip, XFS_ILOCK_EXCL); 1006 if (error) 1007 return error; 1008 1009 /* 1010 * A dquot could be attached to this inode by now, since we had 1011 * dropped the ilock. 1012 */ 1013 if (xfs_this_quota_on(mp, type)) { 1014 struct xfs_dquot *dqp1; 1015 1016 dqp1 = xfs_inode_dquot(ip, type); 1017 if (dqp1) { 1018 xfs_qm_dqdestroy(dqp); 1019 dqp = dqp1; 1020 xfs_dqlock(dqp); 1021 goto dqret; 1022 } 1023 } else { 1024 /* inode stays locked on return */ 1025 xfs_qm_dqdestroy(dqp); 1026 return -ESRCH; 1027 } 1028 1029 error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp); 1030 if (error) { 1031 /* 1032 * Duplicate found. Just throw away the new dquot and start 1033 * over. 1034 */ 1035 xfs_qm_dqdestroy(dqp); 1036 XFS_STATS_INC(mp, xs_qm_dquot_dups); 1037 goto restart; 1038 } 1039 1040 dqret: 1041 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); 1042 trace_xfs_dqget_miss(dqp); 1043 *O_dqpp = dqp; 1044 return 0; 1045 } 1046 1047 /* 1048 * Starting at @id and progressing upwards, look for an initialized incore 1049 * dquot, lock it, and return it. 1050 */ 1051 int 1052 xfs_qm_dqget_next( 1053 struct xfs_mount *mp, 1054 xfs_dqid_t id, 1055 xfs_dqtype_t type, 1056 struct xfs_dquot **dqpp) 1057 { 1058 struct xfs_dquot *dqp; 1059 int error = 0; 1060 1061 *dqpp = NULL; 1062 for (; !error; error = xfs_dq_get_next_id(mp, type, &id)) { 1063 error = xfs_qm_dqget(mp, id, type, false, &dqp); 1064 if (error == -ENOENT) 1065 continue; 1066 else if (error != 0) 1067 break; 1068 1069 if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) { 1070 *dqpp = dqp; 1071 return 0; 1072 } 1073 1074 xfs_qm_dqput(dqp); 1075 } 1076 1077 return error; 1078 } 1079 1080 /* 1081 * Release a reference to the dquot (decrement ref-count) and unlock it. 1082 * 1083 * If there is a group quota attached to this dquot, carefully release that 1084 * too without tripping over deadlocks'n'stuff. 1085 */ 1086 void 1087 xfs_qm_dqput( 1088 struct xfs_dquot *dqp) 1089 { 1090 ASSERT(dqp->q_nrefs > 0); 1091 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1092 1093 trace_xfs_dqput(dqp); 1094 1095 if (--dqp->q_nrefs == 0) { 1096 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo; 1097 trace_xfs_dqput_free(dqp); 1098 1099 if (list_lru_add_obj(&qi->qi_lru, &dqp->q_lru)) 1100 XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused); 1101 } 1102 xfs_dqunlock(dqp); 1103 } 1104 1105 /* 1106 * Release a dquot. Flush it if dirty, then dqput() it. 1107 * dquot must not be locked. 1108 */ 1109 void 1110 xfs_qm_dqrele( 1111 struct xfs_dquot *dqp) 1112 { 1113 if (!dqp) 1114 return; 1115 1116 trace_xfs_dqrele(dqp); 1117 1118 xfs_dqlock(dqp); 1119 /* 1120 * We don't care to flush it if the dquot is dirty here. 1121 * That will create stutters that we want to avoid. 1122 * Instead we do a delayed write when we try to reclaim 1123 * a dirty dquot. Also xfs_sync will take part of the burden... 1124 */ 1125 xfs_qm_dqput(dqp); 1126 } 1127 1128 /* 1129 * This is the dquot flushing I/O completion routine. It is called 1130 * from interrupt level when the buffer containing the dquot is 1131 * flushed to disk. It is responsible for removing the dquot logitem 1132 * from the AIL if it has not been re-logged, and unlocking the dquot's 1133 * flush lock. This behavior is very similar to that of inodes.. 1134 */ 1135 static void 1136 xfs_qm_dqflush_done( 1137 struct xfs_log_item *lip) 1138 { 1139 struct xfs_dq_logitem *qip = (struct xfs_dq_logitem *)lip; 1140 struct xfs_dquot *dqp = qip->qli_dquot; 1141 struct xfs_ail *ailp = lip->li_ailp; 1142 xfs_lsn_t tail_lsn; 1143 1144 /* 1145 * We only want to pull the item from the AIL if its 1146 * location in the log has not changed since we started the flush. 1147 * Thus, we only bother if the dquot's lsn has 1148 * not changed. First we check the lsn outside the lock 1149 * since it's cheaper, and then we recheck while 1150 * holding the lock before removing the dquot from the AIL. 1151 */ 1152 if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) && 1153 ((lip->li_lsn == qip->qli_flush_lsn) || 1154 test_bit(XFS_LI_FAILED, &lip->li_flags))) { 1155 1156 spin_lock(&ailp->ail_lock); 1157 xfs_clear_li_failed(lip); 1158 if (lip->li_lsn == qip->qli_flush_lsn) { 1159 /* xfs_ail_update_finish() drops the AIL lock */ 1160 tail_lsn = xfs_ail_delete_one(ailp, lip); 1161 xfs_ail_update_finish(ailp, tail_lsn); 1162 } else { 1163 spin_unlock(&ailp->ail_lock); 1164 } 1165 } 1166 1167 /* 1168 * Release the dq's flush lock since we're done with it. 1169 */ 1170 xfs_dqfunlock(dqp); 1171 } 1172 1173 void 1174 xfs_buf_dquot_iodone( 1175 struct xfs_buf *bp) 1176 { 1177 struct xfs_log_item *lip, *n; 1178 1179 list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { 1180 list_del_init(&lip->li_bio_list); 1181 xfs_qm_dqflush_done(lip); 1182 } 1183 } 1184 1185 void 1186 xfs_buf_dquot_io_fail( 1187 struct xfs_buf *bp) 1188 { 1189 struct xfs_log_item *lip; 1190 1191 spin_lock(&bp->b_mount->m_ail->ail_lock); 1192 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) 1193 xfs_set_li_failed(lip, bp); 1194 spin_unlock(&bp->b_mount->m_ail->ail_lock); 1195 } 1196 1197 /* Check incore dquot for errors before we flush. */ 1198 static xfs_failaddr_t 1199 xfs_qm_dqflush_check( 1200 struct xfs_dquot *dqp) 1201 { 1202 xfs_dqtype_t type = xfs_dquot_type(dqp); 1203 1204 if (type != XFS_DQTYPE_USER && 1205 type != XFS_DQTYPE_GROUP && 1206 type != XFS_DQTYPE_PROJ) 1207 return __this_address; 1208 1209 if (dqp->q_id == 0) 1210 return NULL; 1211 1212 if (dqp->q_blk.softlimit && dqp->q_blk.count > dqp->q_blk.softlimit && 1213 !dqp->q_blk.timer) 1214 return __this_address; 1215 1216 if (dqp->q_ino.softlimit && dqp->q_ino.count > dqp->q_ino.softlimit && 1217 !dqp->q_ino.timer) 1218 return __this_address; 1219 1220 if (dqp->q_rtb.softlimit && dqp->q_rtb.count > dqp->q_rtb.softlimit && 1221 !dqp->q_rtb.timer) 1222 return __this_address; 1223 1224 /* bigtime flag should never be set on root dquots */ 1225 if (dqp->q_type & XFS_DQTYPE_BIGTIME) { 1226 if (!xfs_has_bigtime(dqp->q_mount)) 1227 return __this_address; 1228 if (dqp->q_id == 0) 1229 return __this_address; 1230 } 1231 1232 return NULL; 1233 } 1234 1235 /* 1236 * Write a modified dquot to disk. 1237 * The dquot must be locked and the flush lock too taken by caller. 1238 * The flush lock will not be unlocked until the dquot reaches the disk, 1239 * but the dquot is free to be unlocked and modified by the caller 1240 * in the interim. Dquot is still locked on return. This behavior is 1241 * identical to that of inodes. 1242 */ 1243 int 1244 xfs_qm_dqflush( 1245 struct xfs_dquot *dqp, 1246 struct xfs_buf **bpp) 1247 { 1248 struct xfs_mount *mp = dqp->q_mount; 1249 struct xfs_log_item *lip = &dqp->q_logitem.qli_item; 1250 struct xfs_buf *bp; 1251 struct xfs_dqblk *dqblk; 1252 xfs_failaddr_t fa; 1253 int error; 1254 1255 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1256 ASSERT(!completion_done(&dqp->q_flush)); 1257 1258 trace_xfs_dqflush(dqp); 1259 1260 *bpp = NULL; 1261 1262 xfs_qm_dqunpin_wait(dqp); 1263 1264 /* 1265 * Get the buffer containing the on-disk dquot 1266 */ 1267 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, 1268 mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK, 1269 &bp, &xfs_dquot_buf_ops); 1270 if (error == -EAGAIN) 1271 goto out_unlock; 1272 if (xfs_metadata_is_sick(error)) 1273 xfs_dquot_mark_sick(dqp); 1274 if (error) 1275 goto out_abort; 1276 1277 fa = xfs_qm_dqflush_check(dqp); 1278 if (fa) { 1279 xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS", 1280 dqp->q_id, fa); 1281 xfs_buf_relse(bp); 1282 xfs_dquot_mark_sick(dqp); 1283 error = -EFSCORRUPTED; 1284 goto out_abort; 1285 } 1286 1287 /* Flush the incore dquot to the ondisk buffer. */ 1288 dqblk = xfs_buf_offset(bp, dqp->q_bufoffset); 1289 xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp); 1290 1291 /* 1292 * Clear the dirty field and remember the flush lsn for later use. 1293 */ 1294 dqp->q_flags &= ~XFS_DQFLAG_DIRTY; 1295 1296 xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, 1297 &dqp->q_logitem.qli_item.li_lsn); 1298 1299 /* 1300 * copy the lsn into the on-disk dquot now while we have the in memory 1301 * dquot here. This can't be done later in the write verifier as we 1302 * can't get access to the log item at that point in time. 1303 * 1304 * We also calculate the CRC here so that the on-disk dquot in the 1305 * buffer always has a valid CRC. This ensures there is no possibility 1306 * of a dquot without an up-to-date CRC getting to disk. 1307 */ 1308 if (xfs_has_crc(mp)) { 1309 dqblk->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn); 1310 xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk), 1311 XFS_DQUOT_CRC_OFF); 1312 } 1313 1314 /* 1315 * Attach the dquot to the buffer so that we can remove this dquot from 1316 * the AIL and release the flush lock once the dquot is synced to disk. 1317 */ 1318 bp->b_flags |= _XBF_DQUOTS; 1319 list_add_tail(&dqp->q_logitem.qli_item.li_bio_list, &bp->b_li_list); 1320 1321 /* 1322 * If the buffer is pinned then push on the log so we won't 1323 * get stuck waiting in the write for too long. 1324 */ 1325 if (xfs_buf_ispinned(bp)) { 1326 trace_xfs_dqflush_force(dqp); 1327 xfs_log_force(mp, 0); 1328 } 1329 1330 trace_xfs_dqflush_done(dqp); 1331 *bpp = bp; 1332 return 0; 1333 1334 out_abort: 1335 dqp->q_flags &= ~XFS_DQFLAG_DIRTY; 1336 xfs_trans_ail_delete(lip, 0); 1337 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1338 out_unlock: 1339 xfs_dqfunlock(dqp); 1340 return error; 1341 } 1342 1343 /* 1344 * Lock two xfs_dquot structures. 1345 * 1346 * To avoid deadlocks we always lock the quota structure with 1347 * the lowerd id first. 1348 */ 1349 void 1350 xfs_dqlock2( 1351 struct xfs_dquot *d1, 1352 struct xfs_dquot *d2) 1353 { 1354 if (d1 && d2) { 1355 ASSERT(d1 != d2); 1356 if (d1->q_id > d2->q_id) { 1357 mutex_lock(&d2->q_qlock); 1358 mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED); 1359 } else { 1360 mutex_lock(&d1->q_qlock); 1361 mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED); 1362 } 1363 } else if (d1) { 1364 mutex_lock(&d1->q_qlock); 1365 } else if (d2) { 1366 mutex_lock(&d2->q_qlock); 1367 } 1368 } 1369 1370 static int 1371 xfs_dqtrx_cmp( 1372 const void *a, 1373 const void *b) 1374 { 1375 const struct xfs_dqtrx *qa = a; 1376 const struct xfs_dqtrx *qb = b; 1377 1378 if (qa->qt_dquot->q_id > qb->qt_dquot->q_id) 1379 return 1; 1380 if (qa->qt_dquot->q_id < qb->qt_dquot->q_id) 1381 return -1; 1382 return 0; 1383 } 1384 1385 void 1386 xfs_dqlockn( 1387 struct xfs_dqtrx *q) 1388 { 1389 unsigned int i; 1390 1391 BUILD_BUG_ON(XFS_QM_TRANS_MAXDQS > MAX_LOCKDEP_SUBCLASSES); 1392 1393 /* Sort in order of dquot id, do not allow duplicates */ 1394 for (i = 0; i < XFS_QM_TRANS_MAXDQS && q[i].qt_dquot != NULL; i++) { 1395 unsigned int j; 1396 1397 for (j = 0; j < i; j++) 1398 ASSERT(q[i].qt_dquot != q[j].qt_dquot); 1399 } 1400 if (i == 0) 1401 return; 1402 1403 sort(q, i, sizeof(struct xfs_dqtrx), xfs_dqtrx_cmp, NULL); 1404 1405 mutex_lock(&q[0].qt_dquot->q_qlock); 1406 for (i = 1; i < XFS_QM_TRANS_MAXDQS && q[i].qt_dquot != NULL; i++) 1407 mutex_lock_nested(&q[i].qt_dquot->q_qlock, 1408 XFS_QLOCK_NESTED + i - 1); 1409 } 1410 1411 int __init 1412 xfs_qm_init(void) 1413 { 1414 xfs_dquot_cache = kmem_cache_create("xfs_dquot", 1415 sizeof(struct xfs_dquot), 1416 0, 0, NULL); 1417 if (!xfs_dquot_cache) 1418 goto out; 1419 1420 xfs_dqtrx_cache = kmem_cache_create("xfs_dqtrx", 1421 sizeof(struct xfs_dquot_acct), 1422 0, 0, NULL); 1423 if (!xfs_dqtrx_cache) 1424 goto out_free_dquot_cache; 1425 1426 return 0; 1427 1428 out_free_dquot_cache: 1429 kmem_cache_destroy(xfs_dquot_cache); 1430 out: 1431 return -ENOMEM; 1432 } 1433 1434 void 1435 xfs_qm_exit(void) 1436 { 1437 kmem_cache_destroy(xfs_dqtrx_cache); 1438 kmem_cache_destroy(xfs_dquot_cache); 1439 } 1440
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.