1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * resize.c 4 * 5 * volume resize. 6 * Inspired by ext3/resize.c. 7 * 8 * Copyright (C) 2007 Oracle. All rights reserved. 9 */ 10 11 #include <linux/fs.h> 12 #include <linux/types.h> 13 14 #include <cluster/masklog.h> 15 16 #include "ocfs2.h" 17 18 #include "alloc.h" 19 #include "dlmglue.h" 20 #include "inode.h" 21 #include "journal.h" 22 #include "super.h" 23 #include "sysfile.h" 24 #include "uptodate.h" 25 #include "ocfs2_trace.h" 26 27 #include "buffer_head_io.h" 28 #include "suballoc.h" 29 #include "resize.h" 30 31 /* 32 * Check whether there are new backup superblocks exist 33 * in the last group. If there are some, mark them or clear 34 * them in the bitmap. 35 * 36 * Return how many backups we find in the last group. 37 */ 38 static u16 ocfs2_calc_new_backup_super(struct inode *inode, 39 struct ocfs2_group_desc *gd, 40 u16 cl_cpg, 41 u16 old_bg_clusters, 42 int set) 43 { 44 int i; 45 u16 backups = 0; 46 u32 cluster, lgd_cluster; 47 u64 blkno, gd_blkno, lgd_blkno = le64_to_cpu(gd->bg_blkno); 48 49 for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) { 50 blkno = ocfs2_backup_super_blkno(inode->i_sb, i); 51 cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno); 52 53 gd_blkno = ocfs2_which_cluster_group(inode, cluster); 54 if (gd_blkno < lgd_blkno) 55 continue; 56 else if (gd_blkno > lgd_blkno) 57 break; 58 59 /* check if already done backup super */ 60 lgd_cluster = ocfs2_blocks_to_clusters(inode->i_sb, lgd_blkno); 61 lgd_cluster += old_bg_clusters; 62 if (lgd_cluster >= cluster) 63 continue; 64 65 if (set) 66 ocfs2_set_bit(cluster % cl_cpg, 67 (unsigned long *)gd->bg_bitmap); 68 else 69 ocfs2_clear_bit(cluster % cl_cpg, 70 (unsigned long *)gd->bg_bitmap); 71 backups++; 72 } 73 74 return backups; 75 } 76 77 static int ocfs2_update_last_group_and_inode(handle_t *handle, 78 struct inode *bm_inode, 79 struct buffer_head *bm_bh, 80 struct buffer_head *group_bh, 81 u32 first_new_cluster, 82 int new_clusters) 83 { 84 int ret = 0; 85 struct ocfs2_super *osb = OCFS2_SB(bm_inode->i_sb); 86 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bm_bh->b_data; 87 struct ocfs2_chain_list *cl = &fe->id2.i_chain; 88 struct ocfs2_chain_rec *cr; 89 struct ocfs2_group_desc *group; 90 u16 chain, num_bits, backups = 0; 91 u16 cl_bpc = le16_to_cpu(cl->cl_bpc); 92 u16 cl_cpg = le16_to_cpu(cl->cl_cpg); 93 u16 old_bg_clusters; 94 u16 contig_bits; 95 __le16 old_bg_contig_free_bits; 96 97 trace_ocfs2_update_last_group_and_inode(new_clusters, 98 first_new_cluster); 99 100 ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode), 101 group_bh, OCFS2_JOURNAL_ACCESS_WRITE); 102 if (ret < 0) { 103 mlog_errno(ret); 104 goto out; 105 } 106 107 group = (struct ocfs2_group_desc *)group_bh->b_data; 108 109 old_bg_clusters = le16_to_cpu(group->bg_bits) / cl_bpc; 110 /* update the group first. */ 111 num_bits = new_clusters * cl_bpc; 112 le16_add_cpu(&group->bg_bits, num_bits); 113 le16_add_cpu(&group->bg_free_bits_count, num_bits); 114 115 /* 116 * check whether there are some new backup superblocks exist in 117 * this group and update the group bitmap accordingly. 118 */ 119 if (OCFS2_HAS_COMPAT_FEATURE(osb->sb, 120 OCFS2_FEATURE_COMPAT_BACKUP_SB)) { 121 backups = ocfs2_calc_new_backup_super(bm_inode, 122 group, 123 cl_cpg, old_bg_clusters, 1); 124 le16_add_cpu(&group->bg_free_bits_count, -1 * backups); 125 } 126 127 contig_bits = ocfs2_find_max_contig_free_bits(group->bg_bitmap, 128 le16_to_cpu(group->bg_bits), 0); 129 old_bg_contig_free_bits = group->bg_contig_free_bits; 130 group->bg_contig_free_bits = cpu_to_le16(contig_bits); 131 132 ocfs2_journal_dirty(handle, group_bh); 133 134 /* update the inode accordingly. */ 135 ret = ocfs2_journal_access_di(handle, INODE_CACHE(bm_inode), bm_bh, 136 OCFS2_JOURNAL_ACCESS_WRITE); 137 if (ret < 0) { 138 mlog_errno(ret); 139 goto out_rollback; 140 } 141 142 chain = le16_to_cpu(group->bg_chain); 143 cr = (&cl->cl_recs[chain]); 144 le32_add_cpu(&cr->c_total, num_bits); 145 le32_add_cpu(&cr->c_free, num_bits); 146 le32_add_cpu(&fe->id1.bitmap1.i_total, num_bits); 147 le32_add_cpu(&fe->i_clusters, new_clusters); 148 149 if (backups) { 150 le32_add_cpu(&cr->c_free, -1 * backups); 151 le32_add_cpu(&fe->id1.bitmap1.i_used, backups); 152 } 153 154 spin_lock(&OCFS2_I(bm_inode)->ip_lock); 155 OCFS2_I(bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters); 156 le64_add_cpu(&fe->i_size, (u64)new_clusters << osb->s_clustersize_bits); 157 spin_unlock(&OCFS2_I(bm_inode)->ip_lock); 158 i_size_write(bm_inode, le64_to_cpu(fe->i_size)); 159 160 ocfs2_journal_dirty(handle, bm_bh); 161 162 out_rollback: 163 if (ret < 0) { 164 ocfs2_calc_new_backup_super(bm_inode, 165 group, 166 cl_cpg, old_bg_clusters, 0); 167 le16_add_cpu(&group->bg_free_bits_count, backups); 168 le16_add_cpu(&group->bg_bits, -1 * num_bits); 169 le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits); 170 group->bg_contig_free_bits = old_bg_contig_free_bits; 171 } 172 out: 173 if (ret) 174 mlog_errno(ret); 175 return ret; 176 } 177 178 static int update_backups(struct inode * inode, u32 clusters, char *data) 179 { 180 int i, ret = 0; 181 u32 cluster; 182 u64 blkno; 183 struct buffer_head *backup = NULL; 184 struct ocfs2_dinode *backup_di = NULL; 185 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 186 187 /* calculate the real backups we need to update. */ 188 for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) { 189 blkno = ocfs2_backup_super_blkno(inode->i_sb, i); 190 cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno); 191 if (cluster >= clusters) 192 break; 193 194 ret = ocfs2_read_blocks_sync(osb, blkno, 1, &backup); 195 if (ret < 0) { 196 mlog_errno(ret); 197 break; 198 } 199 200 memcpy(backup->b_data, data, inode->i_sb->s_blocksize); 201 202 backup_di = (struct ocfs2_dinode *)backup->b_data; 203 backup_di->i_blkno = cpu_to_le64(blkno); 204 205 ret = ocfs2_write_super_or_backup(osb, backup); 206 brelse(backup); 207 backup = NULL; 208 if (ret < 0) { 209 mlog_errno(ret); 210 break; 211 } 212 } 213 214 return ret; 215 } 216 217 static void ocfs2_update_super_and_backups(struct inode *inode, 218 int new_clusters) 219 { 220 int ret; 221 u32 clusters = 0; 222 struct buffer_head *super_bh = NULL; 223 struct ocfs2_dinode *super_di = NULL; 224 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 225 226 /* 227 * update the superblock last. 228 * It doesn't matter if the write failed. 229 */ 230 ret = ocfs2_read_blocks_sync(osb, OCFS2_SUPER_BLOCK_BLKNO, 1, 231 &super_bh); 232 if (ret < 0) { 233 mlog_errno(ret); 234 goto out; 235 } 236 237 super_di = (struct ocfs2_dinode *)super_bh->b_data; 238 le32_add_cpu(&super_di->i_clusters, new_clusters); 239 clusters = le32_to_cpu(super_di->i_clusters); 240 241 ret = ocfs2_write_super_or_backup(osb, super_bh); 242 if (ret < 0) { 243 mlog_errno(ret); 244 goto out; 245 } 246 247 if (OCFS2_HAS_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_COMPAT_BACKUP_SB)) 248 ret = update_backups(inode, clusters, super_bh->b_data); 249 250 out: 251 brelse(super_bh); 252 if (ret) 253 printk(KERN_WARNING "ocfs2: Failed to update super blocks on %s" 254 " during fs resize. This condition is not fatal," 255 " but fsck.ocfs2 should be run to fix it\n", 256 osb->dev_str); 257 return; 258 } 259 260 /* 261 * Extend the filesystem to the new number of clusters specified. This entry 262 * point is only used to extend the current filesystem to the end of the last 263 * existing group. 264 */ 265 int ocfs2_group_extend(struct inode * inode, int new_clusters) 266 { 267 int ret; 268 handle_t *handle; 269 struct buffer_head *main_bm_bh = NULL; 270 struct buffer_head *group_bh = NULL; 271 struct inode *main_bm_inode = NULL; 272 struct ocfs2_dinode *fe = NULL; 273 struct ocfs2_group_desc *group = NULL; 274 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 275 u16 cl_bpc; 276 u32 first_new_cluster; 277 u64 lgd_blkno; 278 279 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) 280 return -EROFS; 281 282 if (new_clusters < 0) 283 return -EINVAL; 284 else if (new_clusters == 0) 285 return 0; 286 287 main_bm_inode = ocfs2_get_system_file_inode(osb, 288 GLOBAL_BITMAP_SYSTEM_INODE, 289 OCFS2_INVALID_SLOT); 290 if (!main_bm_inode) { 291 ret = -EINVAL; 292 mlog_errno(ret); 293 goto out; 294 } 295 296 inode_lock(main_bm_inode); 297 298 ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1); 299 if (ret < 0) { 300 mlog_errno(ret); 301 goto out_mutex; 302 } 303 304 fe = (struct ocfs2_dinode *)main_bm_bh->b_data; 305 306 /* main_bm_bh is validated by inode read inside ocfs2_inode_lock(), 307 * so any corruption is a code bug. */ 308 BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); 309 310 if (le16_to_cpu(fe->id2.i_chain.cl_cpg) != 311 ocfs2_group_bitmap_size(osb->sb, 0, 312 osb->s_feature_incompat) * 8) { 313 mlog(ML_ERROR, "The disk is too old and small. " 314 "Force to do offline resize."); 315 ret = -EINVAL; 316 goto out_unlock; 317 } 318 319 first_new_cluster = le32_to_cpu(fe->i_clusters); 320 lgd_blkno = ocfs2_which_cluster_group(main_bm_inode, 321 first_new_cluster - 1); 322 323 ret = ocfs2_read_group_descriptor(main_bm_inode, fe, lgd_blkno, 324 &group_bh); 325 if (ret < 0) { 326 mlog_errno(ret); 327 goto out_unlock; 328 } 329 group = (struct ocfs2_group_desc *)group_bh->b_data; 330 331 cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc); 332 if (le16_to_cpu(group->bg_bits) / cl_bpc + new_clusters > 333 le16_to_cpu(fe->id2.i_chain.cl_cpg)) { 334 ret = -EINVAL; 335 goto out_unlock; 336 } 337 338 339 trace_ocfs2_group_extend( 340 (unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters); 341 342 handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS); 343 if (IS_ERR(handle)) { 344 mlog_errno(PTR_ERR(handle)); 345 ret = -EINVAL; 346 goto out_unlock; 347 } 348 349 /* update the last group descriptor and inode. */ 350 ret = ocfs2_update_last_group_and_inode(handle, main_bm_inode, 351 main_bm_bh, group_bh, 352 first_new_cluster, 353 new_clusters); 354 if (ret) { 355 mlog_errno(ret); 356 goto out_commit; 357 } 358 359 ocfs2_update_super_and_backups(main_bm_inode, new_clusters); 360 361 out_commit: 362 ocfs2_commit_trans(osb, handle); 363 out_unlock: 364 brelse(group_bh); 365 brelse(main_bm_bh); 366 367 ocfs2_inode_unlock(main_bm_inode, 1); 368 369 out_mutex: 370 inode_unlock(main_bm_inode); 371 iput(main_bm_inode); 372 373 out: 374 return ret; 375 } 376 377 static int ocfs2_check_new_group(struct inode *inode, 378 struct ocfs2_dinode *di, 379 struct ocfs2_new_group_input *input, 380 struct buffer_head *group_bh) 381 { 382 int ret; 383 struct ocfs2_group_desc *gd = 384 (struct ocfs2_group_desc *)group_bh->b_data; 385 u16 cl_bpc = le16_to_cpu(di->id2.i_chain.cl_bpc); 386 387 ret = ocfs2_check_group_descriptor(inode->i_sb, di, group_bh); 388 if (ret) 389 goto out; 390 391 ret = -EINVAL; 392 if (le16_to_cpu(gd->bg_chain) != input->chain) 393 mlog(ML_ERROR, "Group descriptor # %llu has bad chain %u " 394 "while input has %u set.\n", 395 (unsigned long long)le64_to_cpu(gd->bg_blkno), 396 le16_to_cpu(gd->bg_chain), input->chain); 397 else if (le16_to_cpu(gd->bg_bits) != input->clusters * cl_bpc) 398 mlog(ML_ERROR, "Group descriptor # %llu has bit count %u but " 399 "input has %u clusters set\n", 400 (unsigned long long)le64_to_cpu(gd->bg_blkno), 401 le16_to_cpu(gd->bg_bits), input->clusters); 402 else if (le16_to_cpu(gd->bg_free_bits_count) != input->frees * cl_bpc) 403 mlog(ML_ERROR, "Group descriptor # %llu has free bit count %u " 404 "but it should have %u set\n", 405 (unsigned long long)le64_to_cpu(gd->bg_blkno), 406 le16_to_cpu(gd->bg_bits), 407 input->frees * cl_bpc); 408 else 409 ret = 0; 410 411 out: 412 return ret; 413 } 414 415 static int ocfs2_verify_group_and_input(struct inode *inode, 416 struct ocfs2_dinode *di, 417 struct ocfs2_new_group_input *input, 418 struct buffer_head *group_bh) 419 { 420 u16 cl_count = le16_to_cpu(di->id2.i_chain.cl_count); 421 u16 cl_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg); 422 u16 next_free = le16_to_cpu(di->id2.i_chain.cl_next_free_rec); 423 u32 cluster = ocfs2_blocks_to_clusters(inode->i_sb, input->group); 424 u32 total_clusters = le32_to_cpu(di->i_clusters); 425 int ret = -EINVAL; 426 427 if (cluster < total_clusters) 428 mlog(ML_ERROR, "add a group which is in the current volume.\n"); 429 else if (input->chain >= cl_count) 430 mlog(ML_ERROR, "input chain exceeds the limit.\n"); 431 else if (next_free != cl_count && next_free != input->chain) 432 mlog(ML_ERROR, 433 "the add group should be in chain %u\n", next_free); 434 else if (total_clusters + input->clusters < total_clusters) 435 mlog(ML_ERROR, "add group's clusters overflow.\n"); 436 else if (input->clusters > cl_cpg) 437 mlog(ML_ERROR, "the cluster exceeds the maximum of a group\n"); 438 else if (input->frees > input->clusters) 439 mlog(ML_ERROR, "the free cluster exceeds the total clusters\n"); 440 else if (total_clusters % cl_cpg != 0) 441 mlog(ML_ERROR, 442 "the last group isn't full. Use group extend first.\n"); 443 else if (input->group != ocfs2_which_cluster_group(inode, cluster)) 444 mlog(ML_ERROR, "group blkno is invalid\n"); 445 else if ((ret = ocfs2_check_new_group(inode, di, input, group_bh))) 446 mlog(ML_ERROR, "group descriptor check failed.\n"); 447 else 448 ret = 0; 449 450 return ret; 451 } 452 453 /* Add a new group descriptor to global_bitmap. */ 454 int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input) 455 { 456 int ret; 457 handle_t *handle; 458 struct buffer_head *main_bm_bh = NULL; 459 struct inode *main_bm_inode = NULL; 460 struct ocfs2_dinode *fe = NULL; 461 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 462 struct buffer_head *group_bh = NULL; 463 struct ocfs2_group_desc *group = NULL; 464 struct ocfs2_chain_list *cl; 465 struct ocfs2_chain_rec *cr; 466 u16 cl_bpc; 467 u64 bg_ptr; 468 469 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) 470 return -EROFS; 471 472 main_bm_inode = ocfs2_get_system_file_inode(osb, 473 GLOBAL_BITMAP_SYSTEM_INODE, 474 OCFS2_INVALID_SLOT); 475 if (!main_bm_inode) { 476 ret = -EINVAL; 477 mlog_errno(ret); 478 goto out; 479 } 480 481 inode_lock(main_bm_inode); 482 483 ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1); 484 if (ret < 0) { 485 mlog_errno(ret); 486 goto out_mutex; 487 } 488 489 fe = (struct ocfs2_dinode *)main_bm_bh->b_data; 490 491 if (le16_to_cpu(fe->id2.i_chain.cl_cpg) != 492 ocfs2_group_bitmap_size(osb->sb, 0, 493 osb->s_feature_incompat) * 8) { 494 mlog(ML_ERROR, "The disk is too old and small." 495 " Force to do offline resize."); 496 ret = -EINVAL; 497 goto out_unlock; 498 } 499 500 ret = ocfs2_read_blocks_sync(osb, input->group, 1, &group_bh); 501 if (ret < 0) { 502 mlog(ML_ERROR, "Can't read the group descriptor # %llu " 503 "from the device.", (unsigned long long)input->group); 504 goto out_unlock; 505 } 506 507 ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), group_bh); 508 509 ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh); 510 if (ret) { 511 mlog_errno(ret); 512 goto out_free_group_bh; 513 } 514 515 trace_ocfs2_group_add((unsigned long long)input->group, 516 input->chain, input->clusters, input->frees); 517 518 handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS); 519 if (IS_ERR(handle)) { 520 mlog_errno(PTR_ERR(handle)); 521 ret = -EINVAL; 522 goto out_free_group_bh; 523 } 524 525 cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc); 526 cl = &fe->id2.i_chain; 527 cr = &cl->cl_recs[input->chain]; 528 529 ret = ocfs2_journal_access_gd(handle, INODE_CACHE(main_bm_inode), 530 group_bh, OCFS2_JOURNAL_ACCESS_WRITE); 531 if (ret < 0) { 532 mlog_errno(ret); 533 goto out_commit; 534 } 535 536 group = (struct ocfs2_group_desc *)group_bh->b_data; 537 bg_ptr = le64_to_cpu(group->bg_next_group); 538 group->bg_next_group = cr->c_blkno; 539 ocfs2_journal_dirty(handle, group_bh); 540 541 ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode), 542 main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE); 543 if (ret < 0) { 544 group->bg_next_group = cpu_to_le64(bg_ptr); 545 mlog_errno(ret); 546 goto out_commit; 547 } 548 549 if (input->chain == le16_to_cpu(cl->cl_next_free_rec)) { 550 le16_add_cpu(&cl->cl_next_free_rec, 1); 551 memset(cr, 0, sizeof(struct ocfs2_chain_rec)); 552 } 553 554 cr->c_blkno = cpu_to_le64(input->group); 555 le32_add_cpu(&cr->c_total, input->clusters * cl_bpc); 556 le32_add_cpu(&cr->c_free, input->frees * cl_bpc); 557 558 le32_add_cpu(&fe->id1.bitmap1.i_total, input->clusters *cl_bpc); 559 le32_add_cpu(&fe->id1.bitmap1.i_used, 560 (input->clusters - input->frees) * cl_bpc); 561 le32_add_cpu(&fe->i_clusters, input->clusters); 562 563 ocfs2_journal_dirty(handle, main_bm_bh); 564 565 spin_lock(&OCFS2_I(main_bm_inode)->ip_lock); 566 OCFS2_I(main_bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters); 567 le64_add_cpu(&fe->i_size, (u64)input->clusters << osb->s_clustersize_bits); 568 spin_unlock(&OCFS2_I(main_bm_inode)->ip_lock); 569 i_size_write(main_bm_inode, le64_to_cpu(fe->i_size)); 570 571 ocfs2_update_super_and_backups(main_bm_inode, input->clusters); 572 573 out_commit: 574 ocfs2_commit_trans(osb, handle); 575 576 out_free_group_bh: 577 brelse(group_bh); 578 579 out_unlock: 580 brelse(main_bm_bh); 581 582 ocfs2_inode_unlock(main_bm_inode, 1); 583 584 out_mutex: 585 inode_unlock(main_bm_inode); 586 iput(main_bm_inode); 587 588 out: 589 return ret; 590 } 591
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.