1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This file is part of UBIFS. 4 * 5 * Copyright (C) 2006-2008 Nokia Corporation. 6 * 7 * Authors: Adrian Hunter 8 * Artem Bityutskiy (Битюцкий Артём) 9 */ 10 11 /* 12 * This file implements functions that manage the running of the commit process. 13 * Each affected module has its own functions to accomplish their part in the 14 * commit and those functions are called here. 15 * 16 * The commit is the process whereby all updates to the index and LEB properties 17 * are written out together and the journal becomes empty. This keeps the 18 * file system consistent - at all times the state can be recreated by reading 19 * the index and LEB properties and then replaying the journal. 20 * 21 * The commit is split into two parts named "commit start" and "commit end". 22 * During commit start, the commit process has exclusive access to the journal 23 * by holding the commit semaphore down for writing. As few I/O operations as 24 * possible are performed during commit start, instead the nodes that are to be 25 * written are merely identified. During commit end, the commit semaphore is no 26 * longer held and the journal is again in operation, allowing users to continue 27 * to use the file system while the bulk of the commit I/O is performed. The 28 * purpose of this two-step approach is to prevent the commit from causing any 29 * latency blips. Note that in any case, the commit does not prevent lookups 30 * (as permitted by the TNC mutex), or access to VFS data structures e.g. page 31 * cache. 32 */ 33 34 #include <linux/freezer.h> 35 #include <linux/kthread.h> 36 #include <linux/slab.h> 37 #include "ubifs.h" 38 39 /* 40 * nothing_to_commit - check if there is nothing to commit. 41 * @c: UBIFS file-system description object 42 * 43 * This is a helper function which checks if there is anything to commit. It is 44 * used as an optimization to avoid starting the commit if it is not really 45 * necessary. Indeed, the commit operation always assumes flash I/O (e.g., 46 * writing the commit start node to the log), and it is better to avoid doing 47 * this unnecessarily. E.g., 'ubifs_sync_fs()' runs the commit, but if there is 48 * nothing to commit, it is more optimal to avoid any flash I/O. 49 * 50 * This function has to be called with @c->commit_sem locked for writing - 51 * this function does not take LPT/TNC locks because the @c->commit_sem 52 * guarantees that we have exclusive access to the TNC and LPT data structures. 53 * 54 * This function returns %1 if there is nothing to commit and %0 otherwise. 55 */ 56 static int nothing_to_commit(struct ubifs_info *c) 57 { 58 /* 59 * During mounting or remounting from R/O mode to R/W mode we may 60 * commit for various recovery-related reasons. 61 */ 62 if (c->mounting || c->remounting_rw) 63 return 0; 64 65 /* 66 * If the root TNC node is dirty, we definitely have something to 67 * commit. 68 */ 69 if (c->zroot.znode && ubifs_zn_dirty(c->zroot.znode)) 70 return 0; 71 72 /* 73 * Increasing @c->dirty_pn_cnt/@c->dirty_nn_cnt and marking 74 * nnodes/pnodes as dirty in run_gc() could race with following 75 * checking, which leads inconsistent states between @c->nroot 76 * and @c->dirty_pn_cnt/@c->dirty_nn_cnt, holding @c->lp_mutex 77 * to avoid that. 78 */ 79 mutex_lock(&c->lp_mutex); 80 /* 81 * Even though the TNC is clean, the LPT tree may have dirty nodes. For 82 * example, this may happen if the budgeting subsystem invoked GC to 83 * make some free space, and the GC found an LEB with only dirty and 84 * free space. In this case GC would just change the lprops of this 85 * LEB (by turning all space into free space) and unmap it. 86 */ 87 if (c->nroot && test_bit(DIRTY_CNODE, &c->nroot->flags)) { 88 mutex_unlock(&c->lp_mutex); 89 return 0; 90 } 91 92 ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0); 93 ubifs_assert(c, c->dirty_pn_cnt == 0); 94 ubifs_assert(c, c->dirty_nn_cnt == 0); 95 mutex_unlock(&c->lp_mutex); 96 97 return 1; 98 } 99 100 /** 101 * do_commit - commit the journal. 102 * @c: UBIFS file-system description object 103 * 104 * This function implements UBIFS commit. It has to be called with commit lock 105 * locked. Returns zero in case of success and a negative error code in case of 106 * failure. 107 */ 108 static int do_commit(struct ubifs_info *c) 109 { 110 int err, new_ltail_lnum, old_ltail_lnum, i; 111 struct ubifs_zbranch zroot; 112 struct ubifs_lp_stats lst; 113 114 dbg_cmt("start"); 115 ubifs_assert(c, !c->ro_media && !c->ro_mount); 116 117 if (c->ro_error) { 118 err = -EROFS; 119 goto out_up; 120 } 121 122 if (nothing_to_commit(c)) { 123 up_write(&c->commit_sem); 124 err = 0; 125 goto out_cancel; 126 } 127 128 /* Sync all write buffers (necessary for recovery) */ 129 for (i = 0; i < c->jhead_cnt; i++) { 130 err = ubifs_wbuf_sync(&c->jheads[i].wbuf); 131 if (err) 132 goto out_up; 133 } 134 135 c->cmt_no += 1; 136 err = ubifs_gc_start_commit(c); 137 if (err) 138 goto out_up; 139 err = dbg_check_lprops(c); 140 if (err) 141 goto out_up; 142 err = ubifs_log_start_commit(c, &new_ltail_lnum); 143 if (err) 144 goto out_up; 145 err = ubifs_tnc_start_commit(c, &zroot); 146 if (err) 147 goto out_up; 148 err = ubifs_lpt_start_commit(c); 149 if (err) 150 goto out_up; 151 err = ubifs_orphan_start_commit(c); 152 if (err) 153 goto out_up; 154 155 ubifs_get_lp_stats(c, &lst); 156 157 up_write(&c->commit_sem); 158 159 err = ubifs_tnc_end_commit(c); 160 if (err) 161 goto out; 162 err = ubifs_lpt_end_commit(c); 163 if (err) 164 goto out; 165 err = ubifs_orphan_end_commit(c); 166 if (err) 167 goto out; 168 err = dbg_check_old_index(c, &zroot); 169 if (err) 170 goto out; 171 172 c->mst_node->cmt_no = cpu_to_le64(c->cmt_no); 173 c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum); 174 c->mst_node->root_lnum = cpu_to_le32(zroot.lnum); 175 c->mst_node->root_offs = cpu_to_le32(zroot.offs); 176 c->mst_node->root_len = cpu_to_le32(zroot.len); 177 c->mst_node->ihead_lnum = cpu_to_le32(c->ihead_lnum); 178 c->mst_node->ihead_offs = cpu_to_le32(c->ihead_offs); 179 c->mst_node->index_size = cpu_to_le64(c->bi.old_idx_sz); 180 c->mst_node->lpt_lnum = cpu_to_le32(c->lpt_lnum); 181 c->mst_node->lpt_offs = cpu_to_le32(c->lpt_offs); 182 c->mst_node->nhead_lnum = cpu_to_le32(c->nhead_lnum); 183 c->mst_node->nhead_offs = cpu_to_le32(c->nhead_offs); 184 c->mst_node->ltab_lnum = cpu_to_le32(c->ltab_lnum); 185 c->mst_node->ltab_offs = cpu_to_le32(c->ltab_offs); 186 c->mst_node->lsave_lnum = cpu_to_le32(c->lsave_lnum); 187 c->mst_node->lsave_offs = cpu_to_le32(c->lsave_offs); 188 c->mst_node->lscan_lnum = cpu_to_le32(c->lscan_lnum); 189 c->mst_node->empty_lebs = cpu_to_le32(lst.empty_lebs); 190 c->mst_node->idx_lebs = cpu_to_le32(lst.idx_lebs); 191 c->mst_node->total_free = cpu_to_le64(lst.total_free); 192 c->mst_node->total_dirty = cpu_to_le64(lst.total_dirty); 193 c->mst_node->total_used = cpu_to_le64(lst.total_used); 194 c->mst_node->total_dead = cpu_to_le64(lst.total_dead); 195 c->mst_node->total_dark = cpu_to_le64(lst.total_dark); 196 if (c->no_orphs) 197 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); 198 else 199 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS); 200 201 old_ltail_lnum = c->ltail_lnum; 202 err = ubifs_log_end_commit(c, new_ltail_lnum); 203 if (err) 204 goto out; 205 206 err = ubifs_log_post_commit(c, old_ltail_lnum); 207 if (err) 208 goto out; 209 err = ubifs_gc_end_commit(c); 210 if (err) 211 goto out; 212 err = ubifs_lpt_post_commit(c); 213 if (err) 214 goto out; 215 216 out_cancel: 217 spin_lock(&c->cs_lock); 218 c->cmt_state = COMMIT_RESTING; 219 wake_up(&c->cmt_wq); 220 dbg_cmt("commit end"); 221 spin_unlock(&c->cs_lock); 222 return 0; 223 224 out_up: 225 up_write(&c->commit_sem); 226 out: 227 ubifs_err(c, "commit failed, error %d", err); 228 spin_lock(&c->cs_lock); 229 c->cmt_state = COMMIT_BROKEN; 230 wake_up(&c->cmt_wq); 231 spin_unlock(&c->cs_lock); 232 ubifs_ro_mode(c, err); 233 return err; 234 } 235 236 /** 237 * run_bg_commit - run background commit if it is needed. 238 * @c: UBIFS file-system description object 239 * 240 * This function runs background commit if it is needed. Returns zero in case 241 * of success and a negative error code in case of failure. 242 */ 243 static int run_bg_commit(struct ubifs_info *c) 244 { 245 spin_lock(&c->cs_lock); 246 /* 247 * Run background commit only if background commit was requested or if 248 * commit is required. 249 */ 250 if (c->cmt_state != COMMIT_BACKGROUND && 251 c->cmt_state != COMMIT_REQUIRED) 252 goto out; 253 spin_unlock(&c->cs_lock); 254 255 down_write(&c->commit_sem); 256 spin_lock(&c->cs_lock); 257 if (c->cmt_state == COMMIT_REQUIRED) 258 c->cmt_state = COMMIT_RUNNING_REQUIRED; 259 else if (c->cmt_state == COMMIT_BACKGROUND) 260 c->cmt_state = COMMIT_RUNNING_BACKGROUND; 261 else 262 goto out_cmt_unlock; 263 spin_unlock(&c->cs_lock); 264 265 return do_commit(c); 266 267 out_cmt_unlock: 268 up_write(&c->commit_sem); 269 out: 270 spin_unlock(&c->cs_lock); 271 return 0; 272 } 273 274 /** 275 * ubifs_bg_thread - UBIFS background thread function. 276 * @info: points to the file-system description object 277 * 278 * This function implements various file-system background activities: 279 * o when a write-buffer timer expires it synchronizes the appropriate 280 * write-buffer; 281 * o when the journal is about to be full, it starts in-advance commit. 282 * 283 * Note, other stuff like background garbage collection may be added here in 284 * future. 285 */ 286 int ubifs_bg_thread(void *info) 287 { 288 int err; 289 struct ubifs_info *c = info; 290 291 ubifs_msg(c, "background thread \"%s\" started, PID %d", 292 c->bgt_name, current->pid); 293 set_freezable(); 294 295 while (1) { 296 if (kthread_should_stop()) 297 break; 298 299 if (try_to_freeze()) 300 continue; 301 302 set_current_state(TASK_INTERRUPTIBLE); 303 /* Check if there is something to do */ 304 if (!c->need_bgt) { 305 /* 306 * Nothing prevents us from going sleep now and 307 * be never woken up and block the task which 308 * could wait in 'kthread_stop()' forever. 309 */ 310 if (kthread_should_stop()) 311 break; 312 schedule(); 313 continue; 314 } else 315 __set_current_state(TASK_RUNNING); 316 317 c->need_bgt = 0; 318 err = ubifs_bg_wbufs_sync(c); 319 if (err) 320 ubifs_ro_mode(c, err); 321 322 run_bg_commit(c); 323 cond_resched(); 324 } 325 326 ubifs_msg(c, "background thread \"%s\" stops", c->bgt_name); 327 return 0; 328 } 329 330 /** 331 * ubifs_commit_required - set commit state to "required". 332 * @c: UBIFS file-system description object 333 * 334 * This function is called if a commit is required but cannot be done from the 335 * calling function, so it is just flagged instead. 336 */ 337 void ubifs_commit_required(struct ubifs_info *c) 338 { 339 spin_lock(&c->cs_lock); 340 switch (c->cmt_state) { 341 case COMMIT_RESTING: 342 case COMMIT_BACKGROUND: 343 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state), 344 dbg_cstate(COMMIT_REQUIRED)); 345 c->cmt_state = COMMIT_REQUIRED; 346 break; 347 case COMMIT_RUNNING_BACKGROUND: 348 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state), 349 dbg_cstate(COMMIT_RUNNING_REQUIRED)); 350 c->cmt_state = COMMIT_RUNNING_REQUIRED; 351 break; 352 case COMMIT_REQUIRED: 353 case COMMIT_RUNNING_REQUIRED: 354 case COMMIT_BROKEN: 355 break; 356 } 357 spin_unlock(&c->cs_lock); 358 } 359 360 /** 361 * ubifs_request_bg_commit - notify the background thread to do a commit. 362 * @c: UBIFS file-system description object 363 * 364 * This function is called if the journal is full enough to make a commit 365 * worthwhile, so background thread is kicked to start it. 366 */ 367 void ubifs_request_bg_commit(struct ubifs_info *c) 368 { 369 spin_lock(&c->cs_lock); 370 if (c->cmt_state == COMMIT_RESTING) { 371 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state), 372 dbg_cstate(COMMIT_BACKGROUND)); 373 c->cmt_state = COMMIT_BACKGROUND; 374 spin_unlock(&c->cs_lock); 375 ubifs_wake_up_bgt(c); 376 } else 377 spin_unlock(&c->cs_lock); 378 } 379 380 /** 381 * wait_for_commit - wait for commit. 382 * @c: UBIFS file-system description object 383 * 384 * This function sleeps until the commit operation is no longer running. 385 */ 386 static int wait_for_commit(struct ubifs_info *c) 387 { 388 dbg_cmt("pid %d goes sleep", current->pid); 389 390 /* 391 * The following sleeps if the condition is false, and will be woken 392 * when the commit ends. It is possible, although very unlikely, that we 393 * will wake up and see the subsequent commit running, rather than the 394 * one we were waiting for, and go back to sleep. However, we will be 395 * woken again, so there is no danger of sleeping forever. 396 */ 397 wait_event(c->cmt_wq, c->cmt_state != COMMIT_RUNNING_BACKGROUND && 398 c->cmt_state != COMMIT_RUNNING_REQUIRED); 399 dbg_cmt("commit finished, pid %d woke up", current->pid); 400 return 0; 401 } 402 403 /** 404 * ubifs_run_commit - run or wait for commit. 405 * @c: UBIFS file-system description object 406 * 407 * This function runs commit and returns zero in case of success and a negative 408 * error code in case of failure. 409 */ 410 int ubifs_run_commit(struct ubifs_info *c) 411 { 412 int err = 0; 413 414 spin_lock(&c->cs_lock); 415 if (c->cmt_state == COMMIT_BROKEN) { 416 err = -EROFS; 417 goto out; 418 } 419 420 if (c->cmt_state == COMMIT_RUNNING_BACKGROUND) 421 /* 422 * We set the commit state to 'running required' to indicate 423 * that we want it to complete as quickly as possible. 424 */ 425 c->cmt_state = COMMIT_RUNNING_REQUIRED; 426 427 if (c->cmt_state == COMMIT_RUNNING_REQUIRED) { 428 spin_unlock(&c->cs_lock); 429 return wait_for_commit(c); 430 } 431 spin_unlock(&c->cs_lock); 432 433 /* Ok, the commit is indeed needed */ 434 435 down_write(&c->commit_sem); 436 spin_lock(&c->cs_lock); 437 /* 438 * Since we unlocked 'c->cs_lock', the state may have changed, so 439 * re-check it. 440 */ 441 if (c->cmt_state == COMMIT_BROKEN) { 442 err = -EROFS; 443 goto out_cmt_unlock; 444 } 445 446 if (c->cmt_state == COMMIT_RUNNING_BACKGROUND) 447 c->cmt_state = COMMIT_RUNNING_REQUIRED; 448 449 if (c->cmt_state == COMMIT_RUNNING_REQUIRED) { 450 up_write(&c->commit_sem); 451 spin_unlock(&c->cs_lock); 452 return wait_for_commit(c); 453 } 454 c->cmt_state = COMMIT_RUNNING_REQUIRED; 455 spin_unlock(&c->cs_lock); 456 457 err = do_commit(c); 458 return err; 459 460 out_cmt_unlock: 461 up_write(&c->commit_sem); 462 out: 463 spin_unlock(&c->cs_lock); 464 return err; 465 } 466 467 /** 468 * ubifs_gc_should_commit - determine if it is time for GC to run commit. 469 * @c: UBIFS file-system description object 470 * 471 * This function is called by garbage collection to determine if commit should 472 * be run. If commit state is @COMMIT_BACKGROUND, which means that the journal 473 * is full enough to start commit, this function returns true. It is not 474 * absolutely necessary to commit yet, but it feels like this should be better 475 * then to keep doing GC. This function returns %1 if GC has to initiate commit 476 * and %0 if not. 477 */ 478 int ubifs_gc_should_commit(struct ubifs_info *c) 479 { 480 int ret = 0; 481 482 spin_lock(&c->cs_lock); 483 if (c->cmt_state == COMMIT_BACKGROUND) { 484 dbg_cmt("commit required now"); 485 c->cmt_state = COMMIT_REQUIRED; 486 } else 487 dbg_cmt("commit not requested"); 488 if (c->cmt_state == COMMIT_REQUIRED) 489 ret = 1; 490 spin_unlock(&c->cs_lock); 491 return ret; 492 } 493 494 /* 495 * Everything below is related to debugging. 496 */ 497 498 /** 499 * struct idx_node - hold index nodes during index tree traversal. 500 * @list: list 501 * @iip: index in parent (slot number of this indexing node in the parent 502 * indexing node) 503 * @upper_key: all keys in this indexing node have to be less or equivalent to 504 * this key 505 * @idx: index node (8-byte aligned because all node structures must be 8-byte 506 * aligned) 507 */ 508 struct idx_node { 509 struct list_head list; 510 int iip; 511 union ubifs_key upper_key; 512 struct ubifs_idx_node idx __aligned(8); 513 }; 514 515 /** 516 * dbg_old_index_check_init - get information for the next old index check. 517 * @c: UBIFS file-system description object 518 * @zroot: root of the index 519 * 520 * This function records information about the index that will be needed for the 521 * next old index check i.e. 'dbg_check_old_index()'. 522 * 523 * This function returns %0 on success and a negative error code on failure. 524 */ 525 int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot) 526 { 527 struct ubifs_idx_node *idx; 528 int lnum, offs, len, err = 0; 529 struct ubifs_debug_info *d = c->dbg; 530 531 d->old_zroot = *zroot; 532 lnum = d->old_zroot.lnum; 533 offs = d->old_zroot.offs; 534 len = d->old_zroot.len; 535 536 idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); 537 if (!idx) 538 return -ENOMEM; 539 540 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); 541 if (err) 542 goto out; 543 544 d->old_zroot_level = le16_to_cpu(idx->level); 545 d->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum); 546 out: 547 kfree(idx); 548 return err; 549 } 550 551 /** 552 * dbg_check_old_index - check the old copy of the index. 553 * @c: UBIFS file-system description object 554 * @zroot: root of the new index 555 * 556 * In order to be able to recover from an unclean unmount, a complete copy of 557 * the index must exist on flash. This is the "old" index. The commit process 558 * must write the "new" index to flash without overwriting or destroying any 559 * part of the old index. This function is run at commit end in order to check 560 * that the old index does indeed exist completely intact. 561 * 562 * This function returns %0 on success and a negative error code on failure. 563 */ 564 int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) 565 { 566 int lnum, offs, len, err = 0, last_level, child_cnt; 567 int first = 1, iip; 568 struct ubifs_debug_info *d = c->dbg; 569 union ubifs_key lower_key, upper_key, l_key, u_key; 570 unsigned long long last_sqnum; 571 struct ubifs_idx_node *idx; 572 struct list_head list; 573 struct idx_node *i; 574 size_t sz; 575 576 if (!dbg_is_chk_index(c)) 577 return 0; 578 579 INIT_LIST_HEAD(&list); 580 581 sz = sizeof(struct idx_node) + ubifs_idx_node_sz(c, c->fanout) - 582 UBIFS_IDX_NODE_SZ; 583 584 /* Start at the old zroot */ 585 lnum = d->old_zroot.lnum; 586 offs = d->old_zroot.offs; 587 len = d->old_zroot.len; 588 iip = 0; 589 590 /* 591 * Traverse the index tree preorder depth-first i.e. do a node and then 592 * its subtrees from left to right. 593 */ 594 while (1) { 595 struct ubifs_branch *br; 596 597 /* Get the next index node */ 598 i = kmalloc(sz, GFP_NOFS); 599 if (!i) { 600 err = -ENOMEM; 601 goto out_free; 602 } 603 i->iip = iip; 604 /* Keep the index nodes on our path in a linked list */ 605 list_add_tail(&i->list, &list); 606 /* Read the index node */ 607 idx = &i->idx; 608 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); 609 if (err) 610 goto out_free; 611 /* Validate index node */ 612 child_cnt = le16_to_cpu(idx->child_cnt); 613 if (child_cnt < 1 || child_cnt > c->fanout) { 614 err = 1; 615 goto out_dump; 616 } 617 if (first) { 618 first = 0; 619 /* Check root level and sqnum */ 620 if (le16_to_cpu(idx->level) != d->old_zroot_level) { 621 err = 2; 622 goto out_dump; 623 } 624 if (le64_to_cpu(idx->ch.sqnum) != d->old_zroot_sqnum) { 625 err = 3; 626 goto out_dump; 627 } 628 /* Set last values as though root had a parent */ 629 last_level = le16_to_cpu(idx->level) + 1; 630 last_sqnum = le64_to_cpu(idx->ch.sqnum) + 1; 631 key_read(c, ubifs_idx_key(c, idx), &lower_key); 632 highest_ino_key(c, &upper_key, INUM_WATERMARK); 633 } 634 key_copy(c, &upper_key, &i->upper_key); 635 if (le16_to_cpu(idx->level) != last_level - 1) { 636 err = 3; 637 goto out_dump; 638 } 639 /* 640 * The index is always written bottom up hence a child's sqnum 641 * is always less than the parents. 642 */ 643 if (le64_to_cpu(idx->ch.sqnum) >= last_sqnum) { 644 err = 4; 645 goto out_dump; 646 } 647 /* Check key range */ 648 key_read(c, ubifs_idx_key(c, idx), &l_key); 649 br = ubifs_idx_branch(c, idx, child_cnt - 1); 650 key_read(c, &br->key, &u_key); 651 if (keys_cmp(c, &lower_key, &l_key) > 0) { 652 err = 5; 653 goto out_dump; 654 } 655 if (keys_cmp(c, &upper_key, &u_key) < 0) { 656 err = 6; 657 goto out_dump; 658 } 659 if (keys_cmp(c, &upper_key, &u_key) == 0) 660 if (!is_hash_key(c, &u_key)) { 661 err = 7; 662 goto out_dump; 663 } 664 /* Go to next index node */ 665 if (le16_to_cpu(idx->level) == 0) { 666 /* At the bottom, so go up until can go right */ 667 while (1) { 668 /* Drop the bottom of the list */ 669 list_del(&i->list); 670 kfree(i); 671 /* No more list means we are done */ 672 if (list_empty(&list)) 673 goto out; 674 /* Look at the new bottom */ 675 i = list_entry(list.prev, struct idx_node, 676 list); 677 idx = &i->idx; 678 /* Can we go right */ 679 if (iip + 1 < le16_to_cpu(idx->child_cnt)) { 680 iip = iip + 1; 681 break; 682 } else 683 /* Nope, so go up again */ 684 iip = i->iip; 685 } 686 } else 687 /* Go down left */ 688 iip = 0; 689 /* 690 * We have the parent in 'idx' and now we set up for reading the 691 * child pointed to by slot 'iip'. 692 */ 693 last_level = le16_to_cpu(idx->level); 694 last_sqnum = le64_to_cpu(idx->ch.sqnum); 695 br = ubifs_idx_branch(c, idx, iip); 696 lnum = le32_to_cpu(br->lnum); 697 offs = le32_to_cpu(br->offs); 698 len = le32_to_cpu(br->len); 699 key_read(c, &br->key, &lower_key); 700 if (iip + 1 < le16_to_cpu(idx->child_cnt)) { 701 br = ubifs_idx_branch(c, idx, iip + 1); 702 key_read(c, &br->key, &upper_key); 703 } else 704 key_copy(c, &i->upper_key, &upper_key); 705 } 706 out: 707 err = dbg_old_index_check_init(c, zroot); 708 if (err) 709 goto out_free; 710 711 return 0; 712 713 out_dump: 714 ubifs_err(c, "dumping index node (iip=%d)", i->iip); 715 ubifs_dump_node(c, idx, ubifs_idx_node_sz(c, c->fanout)); 716 list_del(&i->list); 717 kfree(i); 718 if (!list_empty(&list)) { 719 i = list_entry(list.prev, struct idx_node, list); 720 ubifs_err(c, "dumping parent index node"); 721 ubifs_dump_node(c, &i->idx, ubifs_idx_node_sz(c, c->fanout)); 722 } 723 out_free: 724 while (!list_empty(&list)) { 725 i = list_entry(list.next, struct idx_node, list); 726 list_del(&i->list); 727 kfree(i); 728 } 729 ubifs_err(c, "failed, error %d", err); 730 if (err > 0) 731 err = -EINVAL; 732 return err; 733 } 734
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.