1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4 * Written by Alex Tomas <alex@clusterfs.com> 5 */ 6 7 8 /* 9 * mballoc.c contains the multiblocks allocation routines 10 */ 11 12 #include "ext4_jbd2.h" 13 #include "mballoc.h" 14 #include <linux/log2.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/nospec.h> 18 #include <linux/backing-dev.h> 19 #include <linux/freezer.h> 20 #include <trace/events/ext4.h> 21 #include <kunit/static_stub.h> 22 23 /* 24 * MUSTDO: 25 * - test ext4_ext_search_left() and ext4_ext_search_right() 26 * - search for metadata in few groups 27 * 28 * TODO v4: 29 * - normalization should take into account whether file is still open 30 * - discard preallocations if no free space left (policy?) 31 * - don't normalize tails 32 * - quota 33 * - reservation for superuser 34 * 35 * TODO v3: 36 * - bitmap read-ahead (proposed by Oleg Drokin aka green) 37 * - track min/max extents in each group for better group selection 38 * - mb_mark_used() may allocate chunk right after splitting buddy 39 * - tree of groups sorted by number of free blocks 40 * - error handling 41 */ 42 43 /* 44 * The allocation request involve request for multiple number of blocks 45 * near to the goal(block) value specified. 46 * 47 * During initialization phase of the allocator we decide to use the 48 * group preallocation or inode preallocation depending on the size of 49 * the file. The size of the file could be the resulting file size we 50 * would have after allocation, or the current file size, which ever 51 * is larger. If the size is less than sbi->s_mb_stream_request we 52 * select to use the group preallocation. The default value of 53 * s_mb_stream_request is 16 blocks. This can also be tuned via 54 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 55 * terms of number of blocks. 56 * 57 * The main motivation for having small file use group preallocation is to 58 * ensure that we have small files closer together on the disk. 59 * 60 * First stage the allocator looks at the inode prealloc list, 61 * ext4_inode_info->i_prealloc_list, which contains list of prealloc 62 * spaces for this particular inode. The inode prealloc space is 63 * represented as: 64 * 65 * pa_lstart -> the logical start block for this prealloc space 66 * pa_pstart -> the physical start block for this prealloc space 67 * pa_len -> length for this prealloc space (in clusters) 68 * pa_free -> free space available in this prealloc space (in clusters) 69 * 70 * The inode preallocation space is used looking at the _logical_ start 71 * block. If only the logical file block falls within the range of prealloc 72 * space we will consume the particular prealloc space. This makes sure that 73 * we have contiguous physical blocks representing the file blocks 74 * 75 * The important thing to be noted in case of inode prealloc space is that 76 * we don't modify the values associated to inode prealloc space except 77 * pa_free. 78 * 79 * If we are not able to find blocks in the inode prealloc space and if we 80 * have the group allocation flag set then we look at the locality group 81 * prealloc space. These are per CPU prealloc list represented as 82 * 83 * ext4_sb_info.s_locality_groups[smp_processor_id()] 84 * 85 * The reason for having a per cpu locality group is to reduce the contention 86 * between CPUs. It is possible to get scheduled at this point. 87 * 88 * The locality group prealloc space is used looking at whether we have 89 * enough free space (pa_free) within the prealloc space. 90 * 91 * If we can't allocate blocks via inode prealloc or/and locality group 92 * prealloc then we look at the buddy cache. The buddy cache is represented 93 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 94 * mapped to the buddy and bitmap information regarding different 95 * groups. The buddy information is attached to buddy cache inode so that 96 * we can access them through the page cache. The information regarding 97 * each group is loaded via ext4_mb_load_buddy. The information involve 98 * block bitmap and buddy information. The information are stored in the 99 * inode as: 100 * 101 * { page } 102 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 103 * 104 * 105 * one block each for bitmap and buddy information. So for each group we 106 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / 107 * blocksize) blocks. So it can have information regarding groups_per_page 108 * which is blocks_per_page/2 109 * 110 * The buddy cache inode is not stored on disk. The inode is thrown 111 * away when the filesystem is unmounted. 112 * 113 * We look for count number of blocks in the buddy cache. If we were able 114 * to locate that many free blocks we return with additional information 115 * regarding rest of the contiguous physical block available 116 * 117 * Before allocating blocks via buddy cache we normalize the request 118 * blocks. This ensure we ask for more blocks that we needed. The extra 119 * blocks that we get after allocation is added to the respective prealloc 120 * list. In case of inode preallocation we follow a list of heuristics 121 * based on file size. This can be found in ext4_mb_normalize_request. If 122 * we are doing a group prealloc we try to normalize the request to 123 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 124 * dependent on the cluster size; for non-bigalloc file systems, it is 125 * 512 blocks. This can be tuned via 126 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 127 * terms of number of blocks. If we have mounted the file system with -O 128 * stripe=<value> option the group prealloc request is normalized to the 129 * smallest multiple of the stripe value (sbi->s_stripe) which is 130 * greater than the default mb_group_prealloc. 131 * 132 * If "mb_optimize_scan" mount option is set, we maintain in memory group info 133 * structures in two data structures: 134 * 135 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders) 136 * 137 * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks) 138 * 139 * This is an array of lists where the index in the array represents the 140 * largest free order in the buddy bitmap of the participating group infos of 141 * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total 142 * number of buddy bitmap orders possible) number of lists. Group-infos are 143 * placed in appropriate lists. 144 * 145 * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size) 146 * 147 * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks) 148 * 149 * This is an array of lists where in the i-th list there are groups with 150 * average fragment size >= 2^i and < 2^(i+1). The average fragment size 151 * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments. 152 * Note that we don't bother with a special list for completely empty groups 153 * so we only have MB_NUM_ORDERS(sb) lists. 154 * 155 * When "mb_optimize_scan" mount option is set, mballoc consults the above data 156 * structures to decide the order in which groups are to be traversed for 157 * fulfilling an allocation request. 158 * 159 * At CR_POWER2_ALIGNED , we look for groups which have the largest_free_order 160 * >= the order of the request. We directly look at the largest free order list 161 * in the data structure (1) above where largest_free_order = order of the 162 * request. If that list is empty, we look at remaining list in the increasing 163 * order of largest_free_order. This allows us to perform CR_POWER2_ALIGNED 164 * lookup in O(1) time. 165 * 166 * At CR_GOAL_LEN_FAST, we only consider groups where 167 * average fragment size > request size. So, we lookup a group which has average 168 * fragment size just above or equal to request size using our average fragment 169 * size group lists (data structure 2) in O(1) time. 170 * 171 * At CR_BEST_AVAIL_LEN, we aim to optimize allocations which can't be satisfied 172 * in CR_GOAL_LEN_FAST. The fact that we couldn't find a group in 173 * CR_GOAL_LEN_FAST suggests that there is no BG that has avg 174 * fragment size > goal length. So before falling to the slower 175 * CR_GOAL_LEN_SLOW, in CR_BEST_AVAIL_LEN we proactively trim goal length and 176 * then use the same fragment lists as CR_GOAL_LEN_FAST to find a BG with a big 177 * enough average fragment size. This increases the chances of finding a 178 * suitable block group in O(1) time and results in faster allocation at the 179 * cost of reduced size of allocation. 180 * 181 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in 182 * linear order which requires O(N) search time for each CR_POWER2_ALIGNED and 183 * CR_GOAL_LEN_FAST phase. 184 * 185 * The regular allocator (using the buddy cache) supports a few tunables. 186 * 187 * /sys/fs/ext4/<partition>/mb_min_to_scan 188 * /sys/fs/ext4/<partition>/mb_max_to_scan 189 * /sys/fs/ext4/<partition>/mb_order2_req 190 * /sys/fs/ext4/<partition>/mb_linear_limit 191 * 192 * The regular allocator uses buddy scan only if the request len is power of 193 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 194 * value of s_mb_order2_reqs can be tuned via 195 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 196 * stripe size (sbi->s_stripe), we try to search for contiguous block in 197 * stripe size. This should result in better allocation on RAID setups. If 198 * not, we search in the specific group using bitmap for best extents. The 199 * tunable min_to_scan and max_to_scan control the behaviour here. 200 * min_to_scan indicate how long the mballoc __must__ look for a best 201 * extent and max_to_scan indicates how long the mballoc __can__ look for a 202 * best extent in the found extents. Searching for the blocks starts with 203 * the group specified as the goal value in allocation context via 204 * ac_g_ex. Each group is first checked based on the criteria whether it 205 * can be used for allocation. ext4_mb_good_group explains how the groups are 206 * checked. 207 * 208 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not 209 * get traversed linearly. That may result in subsequent allocations being not 210 * close to each other. And so, the underlying device may get filled up in a 211 * non-linear fashion. While that may not matter on non-rotational devices, for 212 * rotational devices that may result in higher seek times. "mb_linear_limit" 213 * tells mballoc how many groups mballoc should search linearly before 214 * performing consulting above data structures for more efficient lookups. For 215 * non rotational devices, this value defaults to 0 and for rotational devices 216 * this is set to MB_DEFAULT_LINEAR_LIMIT. 217 * 218 * Both the prealloc space are getting populated as above. So for the first 219 * request we will hit the buddy cache which will result in this prealloc 220 * space getting filled. The prealloc space is then later used for the 221 * subsequent request. 222 */ 223 224 /* 225 * mballoc operates on the following data: 226 * - on-disk bitmap 227 * - in-core buddy (actually includes buddy and bitmap) 228 * - preallocation descriptors (PAs) 229 * 230 * there are two types of preallocations: 231 * - inode 232 * assiged to specific inode and can be used for this inode only. 233 * it describes part of inode's space preallocated to specific 234 * physical blocks. any block from that preallocated can be used 235 * independent. the descriptor just tracks number of blocks left 236 * unused. so, before taking some block from descriptor, one must 237 * make sure corresponded logical block isn't allocated yet. this 238 * also means that freeing any block within descriptor's range 239 * must discard all preallocated blocks. 240 * - locality group 241 * assigned to specific locality group which does not translate to 242 * permanent set of inodes: inode can join and leave group. space 243 * from this type of preallocation can be used for any inode. thus 244 * it's consumed from the beginning to the end. 245 * 246 * relation between them can be expressed as: 247 * in-core buddy = on-disk bitmap + preallocation descriptors 248 * 249 * this mean blocks mballoc considers used are: 250 * - allocated blocks (persistent) 251 * - preallocated blocks (non-persistent) 252 * 253 * consistency in mballoc world means that at any time a block is either 254 * free or used in ALL structures. notice: "any time" should not be read 255 * literally -- time is discrete and delimited by locks. 256 * 257 * to keep it simple, we don't use block numbers, instead we count number of 258 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 259 * 260 * all operations can be expressed as: 261 * - init buddy: buddy = on-disk + PAs 262 * - new PA: buddy += N; PA = N 263 * - use inode PA: on-disk += N; PA -= N 264 * - discard inode PA buddy -= on-disk - PA; PA = 0 265 * - use locality group PA on-disk += N; PA -= N 266 * - discard locality group PA buddy -= PA; PA = 0 267 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 268 * is used in real operation because we can't know actual used 269 * bits from PA, only from on-disk bitmap 270 * 271 * if we follow this strict logic, then all operations above should be atomic. 272 * given some of them can block, we'd have to use something like semaphores 273 * killing performance on high-end SMP hardware. let's try to relax it using 274 * the following knowledge: 275 * 1) if buddy is referenced, it's already initialized 276 * 2) while block is used in buddy and the buddy is referenced, 277 * nobody can re-allocate that block 278 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 279 * bit set and PA claims same block, it's OK. IOW, one can set bit in 280 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 281 * block 282 * 283 * so, now we're building a concurrency table: 284 * - init buddy vs. 285 * - new PA 286 * blocks for PA are allocated in the buddy, buddy must be referenced 287 * until PA is linked to allocation group to avoid concurrent buddy init 288 * - use inode PA 289 * we need to make sure that either on-disk bitmap or PA has uptodate data 290 * given (3) we care that PA-=N operation doesn't interfere with init 291 * - discard inode PA 292 * the simplest way would be to have buddy initialized by the discard 293 * - use locality group PA 294 * again PA-=N must be serialized with init 295 * - discard locality group PA 296 * the simplest way would be to have buddy initialized by the discard 297 * - new PA vs. 298 * - use inode PA 299 * i_data_sem serializes them 300 * - discard inode PA 301 * discard process must wait until PA isn't used by another process 302 * - use locality group PA 303 * some mutex should serialize them 304 * - discard locality group PA 305 * discard process must wait until PA isn't used by another process 306 * - use inode PA 307 * - use inode PA 308 * i_data_sem or another mutex should serializes them 309 * - discard inode PA 310 * discard process must wait until PA isn't used by another process 311 * - use locality group PA 312 * nothing wrong here -- they're different PAs covering different blocks 313 * - discard locality group PA 314 * discard process must wait until PA isn't used by another process 315 * 316 * now we're ready to make few consequences: 317 * - PA is referenced and while it is no discard is possible 318 * - PA is referenced until block isn't marked in on-disk bitmap 319 * - PA changes only after on-disk bitmap 320 * - discard must not compete with init. either init is done before 321 * any discard or they're serialized somehow 322 * - buddy init as sum of on-disk bitmap and PAs is done atomically 323 * 324 * a special case when we've used PA to emptiness. no need to modify buddy 325 * in this case, but we should care about concurrent init 326 * 327 */ 328 329 /* 330 * Logic in few words: 331 * 332 * - allocation: 333 * load group 334 * find blocks 335 * mark bits in on-disk bitmap 336 * release group 337 * 338 * - use preallocation: 339 * find proper PA (per-inode or group) 340 * load group 341 * mark bits in on-disk bitmap 342 * release group 343 * release PA 344 * 345 * - free: 346 * load group 347 * mark bits in on-disk bitmap 348 * release group 349 * 350 * - discard preallocations in group: 351 * mark PAs deleted 352 * move them onto local list 353 * load on-disk bitmap 354 * load group 355 * remove PA from object (inode or locality group) 356 * mark free blocks in-core 357 * 358 * - discard inode's preallocations: 359 */ 360 361 /* 362 * Locking rules 363 * 364 * Locks: 365 * - bitlock on a group (group) 366 * - object (inode/locality) (object) 367 * - per-pa lock (pa) 368 * - cr_power2_aligned lists lock (cr_power2_aligned) 369 * - cr_goal_len_fast lists lock (cr_goal_len_fast) 370 * 371 * Paths: 372 * - new pa 373 * object 374 * group 375 * 376 * - find and use pa: 377 * pa 378 * 379 * - release consumed pa: 380 * pa 381 * group 382 * object 383 * 384 * - generate in-core bitmap: 385 * group 386 * pa 387 * 388 * - discard all for given object (inode, locality group): 389 * object 390 * pa 391 * group 392 * 393 * - discard all for given group: 394 * group 395 * pa 396 * group 397 * object 398 * 399 * - allocation path (ext4_mb_regular_allocator) 400 * group 401 * cr_power2_aligned/cr_goal_len_fast 402 */ 403 static struct kmem_cache *ext4_pspace_cachep; 404 static struct kmem_cache *ext4_ac_cachep; 405 static struct kmem_cache *ext4_free_data_cachep; 406 407 /* We create slab caches for groupinfo data structures based on the 408 * superblock block size. There will be one per mounted filesystem for 409 * each unique s_blocksize_bits */ 410 #define NR_GRPINFO_CACHES 8 411 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 412 413 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 414 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 415 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 416 "ext4_groupinfo_64k", "ext4_groupinfo_128k" 417 }; 418 419 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 420 ext4_group_t group); 421 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); 422 423 static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 424 ext4_group_t group, enum criteria cr); 425 426 static int ext4_try_to_trim_range(struct super_block *sb, 427 struct ext4_buddy *e4b, ext4_grpblk_t start, 428 ext4_grpblk_t max, ext4_grpblk_t minblocks); 429 430 /* 431 * The algorithm using this percpu seq counter goes below: 432 * 1. We sample the percpu discard_pa_seq counter before trying for block 433 * allocation in ext4_mb_new_blocks(). 434 * 2. We increment this percpu discard_pa_seq counter when we either allocate 435 * or free these blocks i.e. while marking those blocks as used/free in 436 * mb_mark_used()/mb_free_blocks(). 437 * 3. We also increment this percpu seq counter when we successfully identify 438 * that the bb_prealloc_list is not empty and hence proceed for discarding 439 * of those PAs inside ext4_mb_discard_group_preallocations(). 440 * 441 * Now to make sure that the regular fast path of block allocation is not 442 * affected, as a small optimization we only sample the percpu seq counter 443 * on that cpu. Only when the block allocation fails and when freed blocks 444 * found were 0, that is when we sample percpu seq counter for all cpus using 445 * below function ext4_get_discard_pa_seq_sum(). This happens after making 446 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty. 447 */ 448 static DEFINE_PER_CPU(u64, discard_pa_seq); 449 static inline u64 ext4_get_discard_pa_seq_sum(void) 450 { 451 int __cpu; 452 u64 __seq = 0; 453 454 for_each_possible_cpu(__cpu) 455 __seq += per_cpu(discard_pa_seq, __cpu); 456 return __seq; 457 } 458 459 static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 460 { 461 #if BITS_PER_LONG == 64 462 *bit += ((unsigned long) addr & 7UL) << 3; 463 addr = (void *) ((unsigned long) addr & ~7UL); 464 #elif BITS_PER_LONG == 32 465 *bit += ((unsigned long) addr & 3UL) << 3; 466 addr = (void *) ((unsigned long) addr & ~3UL); 467 #else 468 #error "how many bits you are?!" 469 #endif 470 return addr; 471 } 472 473 static inline int mb_test_bit(int bit, void *addr) 474 { 475 /* 476 * ext4_test_bit on architecture like powerpc 477 * needs unsigned long aligned address 478 */ 479 addr = mb_correct_addr_and_bit(&bit, addr); 480 return ext4_test_bit(bit, addr); 481 } 482 483 static inline void mb_set_bit(int bit, void *addr) 484 { 485 addr = mb_correct_addr_and_bit(&bit, addr); 486 ext4_set_bit(bit, addr); 487 } 488 489 static inline void mb_clear_bit(int bit, void *addr) 490 { 491 addr = mb_correct_addr_and_bit(&bit, addr); 492 ext4_clear_bit(bit, addr); 493 } 494 495 static inline int mb_test_and_clear_bit(int bit, void *addr) 496 { 497 addr = mb_correct_addr_and_bit(&bit, addr); 498 return ext4_test_and_clear_bit(bit, addr); 499 } 500 501 static inline int mb_find_next_zero_bit(void *addr, int max, int start) 502 { 503 int fix = 0, ret, tmpmax; 504 addr = mb_correct_addr_and_bit(&fix, addr); 505 tmpmax = max + fix; 506 start += fix; 507 508 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 509 if (ret > max) 510 return max; 511 return ret; 512 } 513 514 static inline int mb_find_next_bit(void *addr, int max, int start) 515 { 516 int fix = 0, ret, tmpmax; 517 addr = mb_correct_addr_and_bit(&fix, addr); 518 tmpmax = max + fix; 519 start += fix; 520 521 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 522 if (ret > max) 523 return max; 524 return ret; 525 } 526 527 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 528 { 529 char *bb; 530 531 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 532 BUG_ON(max == NULL); 533 534 if (order > e4b->bd_blkbits + 1) { 535 *max = 0; 536 return NULL; 537 } 538 539 /* at order 0 we see each particular block */ 540 if (order == 0) { 541 *max = 1 << (e4b->bd_blkbits + 3); 542 return e4b->bd_bitmap; 543 } 544 545 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 546 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 547 548 return bb; 549 } 550 551 #ifdef DOUBLE_CHECK 552 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 553 int first, int count) 554 { 555 int i; 556 struct super_block *sb = e4b->bd_sb; 557 558 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 559 return; 560 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 561 for (i = 0; i < count; i++) { 562 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 563 ext4_fsblk_t blocknr; 564 565 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 566 blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 567 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 568 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 569 ext4_grp_locked_error(sb, e4b->bd_group, 570 inode ? inode->i_ino : 0, 571 blocknr, 572 "freeing block already freed " 573 "(bit %u)", 574 first + i); 575 } 576 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 577 } 578 } 579 580 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 581 { 582 int i; 583 584 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 585 return; 586 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 587 for (i = 0; i < count; i++) { 588 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 589 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 590 } 591 } 592 593 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 594 { 595 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 596 return; 597 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 598 unsigned char *b1, *b2; 599 int i; 600 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 601 b2 = (unsigned char *) bitmap; 602 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 603 if (b1[i] != b2[i]) { 604 ext4_msg(e4b->bd_sb, KERN_ERR, 605 "corruption in group %u " 606 "at byte %u(%u): %x in copy != %x " 607 "on disk/prealloc", 608 e4b->bd_group, i, i * 8, b1[i], b2[i]); 609 BUG(); 610 } 611 } 612 } 613 } 614 615 static void mb_group_bb_bitmap_alloc(struct super_block *sb, 616 struct ext4_group_info *grp, ext4_group_t group) 617 { 618 struct buffer_head *bh; 619 620 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); 621 if (!grp->bb_bitmap) 622 return; 623 624 bh = ext4_read_block_bitmap(sb, group); 625 if (IS_ERR_OR_NULL(bh)) { 626 kfree(grp->bb_bitmap); 627 grp->bb_bitmap = NULL; 628 return; 629 } 630 631 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); 632 put_bh(bh); 633 } 634 635 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 636 { 637 kfree(grp->bb_bitmap); 638 } 639 640 #else 641 static inline void mb_free_blocks_double(struct inode *inode, 642 struct ext4_buddy *e4b, int first, int count) 643 { 644 return; 645 } 646 static inline void mb_mark_used_double(struct ext4_buddy *e4b, 647 int first, int count) 648 { 649 return; 650 } 651 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 652 { 653 return; 654 } 655 656 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb, 657 struct ext4_group_info *grp, ext4_group_t group) 658 { 659 return; 660 } 661 662 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 663 { 664 return; 665 } 666 #endif 667 668 #ifdef AGGRESSIVE_CHECK 669 670 #define MB_CHECK_ASSERT(assert) \ 671 do { \ 672 if (!(assert)) { \ 673 printk(KERN_EMERG \ 674 "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 675 function, file, line, # assert); \ 676 BUG(); \ 677 } \ 678 } while (0) 679 680 static void __mb_check_buddy(struct ext4_buddy *e4b, char *file, 681 const char *function, int line) 682 { 683 struct super_block *sb = e4b->bd_sb; 684 int order = e4b->bd_blkbits + 1; 685 int max; 686 int max2; 687 int i; 688 int j; 689 int k; 690 int count; 691 struct ext4_group_info *grp; 692 int fragments = 0; 693 int fstart; 694 struct list_head *cur; 695 void *buddy; 696 void *buddy2; 697 698 if (e4b->bd_info->bb_check_counter++ % 10) 699 return; 700 701 while (order > 1) { 702 buddy = mb_find_buddy(e4b, order, &max); 703 MB_CHECK_ASSERT(buddy); 704 buddy2 = mb_find_buddy(e4b, order - 1, &max2); 705 MB_CHECK_ASSERT(buddy2); 706 MB_CHECK_ASSERT(buddy != buddy2); 707 MB_CHECK_ASSERT(max * 2 == max2); 708 709 count = 0; 710 for (i = 0; i < max; i++) { 711 712 if (mb_test_bit(i, buddy)) { 713 /* only single bit in buddy2 may be 0 */ 714 if (!mb_test_bit(i << 1, buddy2)) { 715 MB_CHECK_ASSERT( 716 mb_test_bit((i<<1)+1, buddy2)); 717 } 718 continue; 719 } 720 721 /* both bits in buddy2 must be 1 */ 722 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 723 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 724 725 for (j = 0; j < (1 << order); j++) { 726 k = (i * (1 << order)) + j; 727 MB_CHECK_ASSERT( 728 !mb_test_bit(k, e4b->bd_bitmap)); 729 } 730 count++; 731 } 732 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 733 order--; 734 } 735 736 fstart = -1; 737 buddy = mb_find_buddy(e4b, 0, &max); 738 for (i = 0; i < max; i++) { 739 if (!mb_test_bit(i, buddy)) { 740 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 741 if (fstart == -1) { 742 fragments++; 743 fstart = i; 744 } 745 continue; 746 } 747 fstart = -1; 748 /* check used bits only */ 749 for (j = 0; j < e4b->bd_blkbits + 1; j++) { 750 buddy2 = mb_find_buddy(e4b, j, &max2); 751 k = i >> j; 752 MB_CHECK_ASSERT(k < max2); 753 MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 754 } 755 } 756 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 757 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 758 759 grp = ext4_get_group_info(sb, e4b->bd_group); 760 if (!grp) 761 return; 762 list_for_each(cur, &grp->bb_prealloc_list) { 763 ext4_group_t groupnr; 764 struct ext4_prealloc_space *pa; 765 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 766 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 767 MB_CHECK_ASSERT(groupnr == e4b->bd_group); 768 for (i = 0; i < pa->pa_len; i++) 769 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 770 } 771 } 772 #undef MB_CHECK_ASSERT 773 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 774 __FILE__, __func__, __LINE__) 775 #else 776 #define mb_check_buddy(e4b) 777 #endif 778 779 /* 780 * Divide blocks started from @first with length @len into 781 * smaller chunks with power of 2 blocks. 782 * Clear the bits in bitmap which the blocks of the chunk(s) covered, 783 * then increase bb_counters[] for corresponded chunk size. 784 */ 785 static void ext4_mb_mark_free_simple(struct super_block *sb, 786 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 787 struct ext4_group_info *grp) 788 { 789 struct ext4_sb_info *sbi = EXT4_SB(sb); 790 ext4_grpblk_t min; 791 ext4_grpblk_t max; 792 ext4_grpblk_t chunk; 793 unsigned int border; 794 795 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 796 797 border = 2 << sb->s_blocksize_bits; 798 799 while (len > 0) { 800 /* find how many blocks can be covered since this position */ 801 max = ffs(first | border) - 1; 802 803 /* find how many blocks of power 2 we need to mark */ 804 min = fls(len) - 1; 805 806 if (max < min) 807 min = max; 808 chunk = 1 << min; 809 810 /* mark multiblock chunks only */ 811 grp->bb_counters[min]++; 812 if (min > 0) 813 mb_clear_bit(first >> min, 814 buddy + sbi->s_mb_offsets[min]); 815 816 len -= chunk; 817 first += chunk; 818 } 819 } 820 821 static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len) 822 { 823 int order; 824 825 /* 826 * We don't bother with a special lists groups with only 1 block free 827 * extents and for completely empty groups. 828 */ 829 order = fls(len) - 2; 830 if (order < 0) 831 return 0; 832 if (order == MB_NUM_ORDERS(sb)) 833 order--; 834 if (WARN_ON_ONCE(order > MB_NUM_ORDERS(sb))) 835 order = MB_NUM_ORDERS(sb) - 1; 836 return order; 837 } 838 839 /* Move group to appropriate avg_fragment_size list */ 840 static void 841 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) 842 { 843 struct ext4_sb_info *sbi = EXT4_SB(sb); 844 int new_order; 845 846 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0) 847 return; 848 849 new_order = mb_avg_fragment_size_order(sb, 850 grp->bb_free / grp->bb_fragments); 851 if (new_order == grp->bb_avg_fragment_size_order) 852 return; 853 854 if (grp->bb_avg_fragment_size_order != -1) { 855 write_lock(&sbi->s_mb_avg_fragment_size_locks[ 856 grp->bb_avg_fragment_size_order]); 857 list_del(&grp->bb_avg_fragment_size_node); 858 write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 859 grp->bb_avg_fragment_size_order]); 860 } 861 grp->bb_avg_fragment_size_order = new_order; 862 write_lock(&sbi->s_mb_avg_fragment_size_locks[ 863 grp->bb_avg_fragment_size_order]); 864 list_add_tail(&grp->bb_avg_fragment_size_node, 865 &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]); 866 write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 867 grp->bb_avg_fragment_size_order]); 868 } 869 870 /* 871 * Choose next group by traversing largest_free_order lists. Updates *new_cr if 872 * cr level needs an update. 873 */ 874 static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac, 875 enum criteria *new_cr, ext4_group_t *group) 876 { 877 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 878 struct ext4_group_info *iter; 879 int i; 880 881 if (ac->ac_status == AC_STATUS_FOUND) 882 return; 883 884 if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED)) 885 atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions); 886 887 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 888 if (list_empty(&sbi->s_mb_largest_free_orders[i])) 889 continue; 890 read_lock(&sbi->s_mb_largest_free_orders_locks[i]); 891 if (list_empty(&sbi->s_mb_largest_free_orders[i])) { 892 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 893 continue; 894 } 895 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i], 896 bb_largest_free_order_node) { 897 if (sbi->s_mb_stats) 898 atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]); 899 if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) { 900 *group = iter->bb_group; 901 ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED; 902 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 903 return; 904 } 905 } 906 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 907 } 908 909 /* Increment cr and search again if no group is found */ 910 *new_cr = CR_GOAL_LEN_FAST; 911 } 912 913 /* 914 * Find a suitable group of given order from the average fragments list. 915 */ 916 static struct ext4_group_info * 917 ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order) 918 { 919 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 920 struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order]; 921 rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order]; 922 struct ext4_group_info *grp = NULL, *iter; 923 enum criteria cr = ac->ac_criteria; 924 925 if (list_empty(frag_list)) 926 return NULL; 927 read_lock(frag_list_lock); 928 if (list_empty(frag_list)) { 929 read_unlock(frag_list_lock); 930 return NULL; 931 } 932 list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) { 933 if (sbi->s_mb_stats) 934 atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]); 935 if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) { 936 grp = iter; 937 break; 938 } 939 } 940 read_unlock(frag_list_lock); 941 return grp; 942 } 943 944 /* 945 * Choose next group by traversing average fragment size list of suitable 946 * order. Updates *new_cr if cr level needs an update. 947 */ 948 static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac, 949 enum criteria *new_cr, ext4_group_t *group) 950 { 951 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 952 struct ext4_group_info *grp = NULL; 953 int i; 954 955 if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) { 956 if (sbi->s_mb_stats) 957 atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions); 958 } 959 960 for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); 961 i < MB_NUM_ORDERS(ac->ac_sb); i++) { 962 grp = ext4_mb_find_good_group_avg_frag_lists(ac, i); 963 if (grp) { 964 *group = grp->bb_group; 965 ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED; 966 return; 967 } 968 } 969 970 /* 971 * CR_BEST_AVAIL_LEN works based on the concept that we have 972 * a larger normalized goal len request which can be trimmed to 973 * a smaller goal len such that it can still satisfy original 974 * request len. However, allocation request for non-regular 975 * files never gets normalized. 976 * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA). 977 */ 978 if (ac->ac_flags & EXT4_MB_HINT_DATA) 979 *new_cr = CR_BEST_AVAIL_LEN; 980 else 981 *new_cr = CR_GOAL_LEN_SLOW; 982 } 983 984 /* 985 * We couldn't find a group in CR_GOAL_LEN_FAST so try to find the highest free fragment 986 * order we have and proactively trim the goal request length to that order to 987 * find a suitable group faster. 988 * 989 * This optimizes allocation speed at the cost of slightly reduced 990 * preallocations. However, we make sure that we don't trim the request too 991 * much and fall to CR_GOAL_LEN_SLOW in that case. 992 */ 993 static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac, 994 enum criteria *new_cr, ext4_group_t *group) 995 { 996 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 997 struct ext4_group_info *grp = NULL; 998 int i, order, min_order; 999 unsigned long num_stripe_clusters = 0; 1000 1001 if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) { 1002 if (sbi->s_mb_stats) 1003 atomic_inc(&sbi->s_bal_best_avail_bad_suggestions); 1004 } 1005 1006 /* 1007 * mb_avg_fragment_size_order() returns order in a way that makes 1008 * retrieving back the length using (1 << order) inaccurate. Hence, use 1009 * fls() instead since we need to know the actual length while modifying 1010 * goal length. 1011 */ 1012 order = fls(ac->ac_g_ex.fe_len) - 1; 1013 if (WARN_ON_ONCE(order - 1 > MB_NUM_ORDERS(ac->ac_sb))) 1014 order = MB_NUM_ORDERS(ac->ac_sb); 1015 min_order = order - sbi->s_mb_best_avail_max_trim_order; 1016 if (min_order < 0) 1017 min_order = 0; 1018 1019 if (sbi->s_stripe > 0) { 1020 /* 1021 * We are assuming that stripe size is always a multiple of 1022 * cluster ratio otherwise __ext4_fill_super exists early. 1023 */ 1024 num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe); 1025 if (1 << min_order < num_stripe_clusters) 1026 /* 1027 * We consider 1 order less because later we round 1028 * up the goal len to num_stripe_clusters 1029 */ 1030 min_order = fls(num_stripe_clusters) - 1; 1031 } 1032 1033 if (1 << min_order < ac->ac_o_ex.fe_len) 1034 min_order = fls(ac->ac_o_ex.fe_len); 1035 1036 for (i = order; i >= min_order; i--) { 1037 int frag_order; 1038 /* 1039 * Scale down goal len to make sure we find something 1040 * in the free fragments list. Basically, reduce 1041 * preallocations. 1042 */ 1043 ac->ac_g_ex.fe_len = 1 << i; 1044 1045 if (num_stripe_clusters > 0) { 1046 /* 1047 * Try to round up the adjusted goal length to 1048 * stripe size (in cluster units) multiple for 1049 * efficiency. 1050 */ 1051 ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len, 1052 num_stripe_clusters); 1053 } 1054 1055 frag_order = mb_avg_fragment_size_order(ac->ac_sb, 1056 ac->ac_g_ex.fe_len); 1057 1058 grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order); 1059 if (grp) { 1060 *group = grp->bb_group; 1061 ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED; 1062 return; 1063 } 1064 } 1065 1066 /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */ 1067 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; 1068 *new_cr = CR_GOAL_LEN_SLOW; 1069 } 1070 1071 static inline int should_optimize_scan(struct ext4_allocation_context *ac) 1072 { 1073 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) 1074 return 0; 1075 if (ac->ac_criteria >= CR_GOAL_LEN_SLOW) 1076 return 0; 1077 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) 1078 return 0; 1079 return 1; 1080 } 1081 1082 /* 1083 * Return next linear group for allocation. 1084 */ 1085 static ext4_group_t 1086 next_linear_group(ext4_group_t group, ext4_group_t ngroups) 1087 { 1088 /* 1089 * Artificially restricted ngroups for non-extent 1090 * files makes group > ngroups possible on first loop. 1091 */ 1092 return group + 1 >= ngroups ? 0 : group + 1; 1093 } 1094 1095 /* 1096 * ext4_mb_choose_next_group: choose next group for allocation. 1097 * 1098 * @ac Allocation Context 1099 * @new_cr This is an output parameter. If the there is no good group 1100 * available at current CR level, this field is updated to indicate 1101 * the new cr level that should be used. 1102 * @group This is an input / output parameter. As an input it indicates the 1103 * next group that the allocator intends to use for allocation. As 1104 * output, this field indicates the next group that should be used as 1105 * determined by the optimization functions. 1106 * @ngroups Total number of groups 1107 */ 1108 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac, 1109 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups) 1110 { 1111 *new_cr = ac->ac_criteria; 1112 1113 if (!should_optimize_scan(ac)) { 1114 *group = next_linear_group(*group, ngroups); 1115 return; 1116 } 1117 1118 /* 1119 * Optimized scanning can return non adjacent groups which can cause 1120 * seek overhead for rotational disks. So try few linear groups before 1121 * trying optimized scan. 1122 */ 1123 if (ac->ac_groups_linear_remaining) { 1124 *group = next_linear_group(*group, ngroups); 1125 ac->ac_groups_linear_remaining--; 1126 return; 1127 } 1128 1129 if (*new_cr == CR_POWER2_ALIGNED) { 1130 ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group); 1131 } else if (*new_cr == CR_GOAL_LEN_FAST) { 1132 ext4_mb_choose_next_group_goal_fast(ac, new_cr, group); 1133 } else if (*new_cr == CR_BEST_AVAIL_LEN) { 1134 ext4_mb_choose_next_group_best_avail(ac, new_cr, group); 1135 } else { 1136 /* 1137 * TODO: For CR_GOAL_LEN_SLOW, we can arrange groups in an 1138 * rb tree sorted by bb_free. But until that happens, we should 1139 * never come here. 1140 */ 1141 WARN_ON(1); 1142 } 1143 } 1144 1145 /* 1146 * Cache the order of the largest free extent we have available in this block 1147 * group. 1148 */ 1149 static void 1150 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 1151 { 1152 struct ext4_sb_info *sbi = EXT4_SB(sb); 1153 int i; 1154 1155 for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) 1156 if (grp->bb_counters[i] > 0) 1157 break; 1158 /* No need to move between order lists? */ 1159 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || 1160 i == grp->bb_largest_free_order) { 1161 grp->bb_largest_free_order = i; 1162 return; 1163 } 1164 1165 if (grp->bb_largest_free_order >= 0) { 1166 write_lock(&sbi->s_mb_largest_free_orders_locks[ 1167 grp->bb_largest_free_order]); 1168 list_del_init(&grp->bb_largest_free_order_node); 1169 write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1170 grp->bb_largest_free_order]); 1171 } 1172 grp->bb_largest_free_order = i; 1173 if (grp->bb_largest_free_order >= 0 && grp->bb_free) { 1174 write_lock(&sbi->s_mb_largest_free_orders_locks[ 1175 grp->bb_largest_free_order]); 1176 list_add_tail(&grp->bb_largest_free_order_node, 1177 &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]); 1178 write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1179 grp->bb_largest_free_order]); 1180 } 1181 } 1182 1183 static noinline_for_stack 1184 void ext4_mb_generate_buddy(struct super_block *sb, 1185 void *buddy, void *bitmap, ext4_group_t group, 1186 struct ext4_group_info *grp) 1187 { 1188 struct ext4_sb_info *sbi = EXT4_SB(sb); 1189 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 1190 ext4_grpblk_t i = 0; 1191 ext4_grpblk_t first; 1192 ext4_grpblk_t len; 1193 unsigned free = 0; 1194 unsigned fragments = 0; 1195 unsigned long long period = get_cycles(); 1196 1197 /* initialize buddy from bitmap which is aggregation 1198 * of on-disk bitmap and preallocations */ 1199 i = mb_find_next_zero_bit(bitmap, max, 0); 1200 grp->bb_first_free = i; 1201 while (i < max) { 1202 fragments++; 1203 first = i; 1204 i = mb_find_next_bit(bitmap, max, i); 1205 len = i - first; 1206 free += len; 1207 if (len > 1) 1208 ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 1209 else 1210 grp->bb_counters[0]++; 1211 if (i < max) 1212 i = mb_find_next_zero_bit(bitmap, max, i); 1213 } 1214 grp->bb_fragments = fragments; 1215 1216 if (free != grp->bb_free) { 1217 ext4_grp_locked_error(sb, group, 0, 0, 1218 "block bitmap and bg descriptor " 1219 "inconsistent: %u vs %u free clusters", 1220 free, grp->bb_free); 1221 /* 1222 * If we intend to continue, we consider group descriptor 1223 * corrupt and update bb_free using bitmap value 1224 */ 1225 grp->bb_free = free; 1226 ext4_mark_group_bitmap_corrupted(sb, group, 1227 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1228 } 1229 mb_set_largest_free_order(sb, grp); 1230 mb_update_avg_fragment_size(sb, grp); 1231 1232 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 1233 1234 period = get_cycles() - period; 1235 atomic_inc(&sbi->s_mb_buddies_generated); 1236 atomic64_add(period, &sbi->s_mb_generation_time); 1237 } 1238 1239 static void mb_regenerate_buddy(struct ext4_buddy *e4b) 1240 { 1241 int count; 1242 int order = 1; 1243 void *buddy; 1244 1245 while ((buddy = mb_find_buddy(e4b, order++, &count))) 1246 mb_set_bits(buddy, 0, count); 1247 1248 e4b->bd_info->bb_fragments = 0; 1249 memset(e4b->bd_info->bb_counters, 0, 1250 sizeof(*e4b->bd_info->bb_counters) * 1251 (e4b->bd_sb->s_blocksize_bits + 2)); 1252 1253 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, 1254 e4b->bd_bitmap, e4b->bd_group, e4b->bd_info); 1255 } 1256 1257 /* The buddy information is attached the buddy cache inode 1258 * for convenience. The information regarding each group 1259 * is loaded via ext4_mb_load_buddy. The information involve 1260 * block bitmap and buddy information. The information are 1261 * stored in the inode as 1262 * 1263 * { page } 1264 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 1265 * 1266 * 1267 * one block each for bitmap and buddy information. 1268 * So for each group we take up 2 blocks. A page can 1269 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. 1270 * So it can have information regarding groups_per_page which 1271 * is blocks_per_page/2 1272 * 1273 * Locking note: This routine takes the block group lock of all groups 1274 * for this page; do not hold this lock when calling this routine! 1275 */ 1276 1277 static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp) 1278 { 1279 ext4_group_t ngroups; 1280 unsigned int blocksize; 1281 int blocks_per_page; 1282 int groups_per_page; 1283 int err = 0; 1284 int i; 1285 ext4_group_t first_group, group; 1286 int first_block; 1287 struct super_block *sb; 1288 struct buffer_head *bhs; 1289 struct buffer_head **bh = NULL; 1290 struct inode *inode; 1291 char *data; 1292 char *bitmap; 1293 struct ext4_group_info *grinfo; 1294 1295 inode = folio->mapping->host; 1296 sb = inode->i_sb; 1297 ngroups = ext4_get_groups_count(sb); 1298 blocksize = i_blocksize(inode); 1299 blocks_per_page = PAGE_SIZE / blocksize; 1300 1301 mb_debug(sb, "init folio %lu\n", folio->index); 1302 1303 groups_per_page = blocks_per_page >> 1; 1304 if (groups_per_page == 0) 1305 groups_per_page = 1; 1306 1307 /* allocate buffer_heads to read bitmaps */ 1308 if (groups_per_page > 1) { 1309 i = sizeof(struct buffer_head *) * groups_per_page; 1310 bh = kzalloc(i, gfp); 1311 if (bh == NULL) 1312 return -ENOMEM; 1313 } else 1314 bh = &bhs; 1315 1316 first_group = folio->index * blocks_per_page / 2; 1317 1318 /* read all groups the folio covers into the cache */ 1319 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 1320 if (group >= ngroups) 1321 break; 1322 1323 grinfo = ext4_get_group_info(sb, group); 1324 if (!grinfo) 1325 continue; 1326 /* 1327 * If page is uptodate then we came here after online resize 1328 * which added some new uninitialized group info structs, so 1329 * we must skip all initialized uptodate buddies on the folio, 1330 * which may be currently in use by an allocating task. 1331 */ 1332 if (folio_test_uptodate(folio) && 1333 !EXT4_MB_GRP_NEED_INIT(grinfo)) { 1334 bh[i] = NULL; 1335 continue; 1336 } 1337 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); 1338 if (IS_ERR(bh[i])) { 1339 err = PTR_ERR(bh[i]); 1340 bh[i] = NULL; 1341 goto out; 1342 } 1343 mb_debug(sb, "read bitmap for group %u\n", group); 1344 } 1345 1346 /* wait for I/O completion */ 1347 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 1348 int err2; 1349 1350 if (!bh[i]) 1351 continue; 1352 err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 1353 if (!err) 1354 err = err2; 1355 } 1356 1357 first_block = folio->index * blocks_per_page; 1358 for (i = 0; i < blocks_per_page; i++) { 1359 group = (first_block + i) >> 1; 1360 if (group >= ngroups) 1361 break; 1362 1363 if (!bh[group - first_group]) 1364 /* skip initialized uptodate buddy */ 1365 continue; 1366 1367 if (!buffer_verified(bh[group - first_group])) 1368 /* Skip faulty bitmaps */ 1369 continue; 1370 err = 0; 1371 1372 /* 1373 * data carry information regarding this 1374 * particular group in the format specified 1375 * above 1376 * 1377 */ 1378 data = folio_address(folio) + (i * blocksize); 1379 bitmap = bh[group - first_group]->b_data; 1380 1381 /* 1382 * We place the buddy block and bitmap block 1383 * close together 1384 */ 1385 grinfo = ext4_get_group_info(sb, group); 1386 if (!grinfo) { 1387 err = -EFSCORRUPTED; 1388 goto out; 1389 } 1390 if ((first_block + i) & 1) { 1391 /* this is block of buddy */ 1392 BUG_ON(incore == NULL); 1393 mb_debug(sb, "put buddy for group %u in folio %lu/%x\n", 1394 group, folio->index, i * blocksize); 1395 trace_ext4_mb_buddy_bitmap_load(sb, group); 1396 grinfo->bb_fragments = 0; 1397 memset(grinfo->bb_counters, 0, 1398 sizeof(*grinfo->bb_counters) * 1399 (MB_NUM_ORDERS(sb))); 1400 /* 1401 * incore got set to the group block bitmap below 1402 */ 1403 ext4_lock_group(sb, group); 1404 /* init the buddy */ 1405 memset(data, 0xff, blocksize); 1406 ext4_mb_generate_buddy(sb, data, incore, group, grinfo); 1407 ext4_unlock_group(sb, group); 1408 incore = NULL; 1409 } else { 1410 /* this is block of bitmap */ 1411 BUG_ON(incore != NULL); 1412 mb_debug(sb, "put bitmap for group %u in folio %lu/%x\n", 1413 group, folio->index, i * blocksize); 1414 trace_ext4_mb_bitmap_load(sb, group); 1415 1416 /* see comments in ext4_mb_put_pa() */ 1417 ext4_lock_group(sb, group); 1418 memcpy(data, bitmap, blocksize); 1419 1420 /* mark all preallocated blks used in in-core bitmap */ 1421 ext4_mb_generate_from_pa(sb, data, group); 1422 WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root)); 1423 ext4_unlock_group(sb, group); 1424 1425 /* set incore so that the buddy information can be 1426 * generated using this 1427 */ 1428 incore = data; 1429 } 1430 } 1431 folio_mark_uptodate(folio); 1432 1433 out: 1434 if (bh) { 1435 for (i = 0; i < groups_per_page; i++) 1436 brelse(bh[i]); 1437 if (bh != &bhs) 1438 kfree(bh); 1439 } 1440 return err; 1441 } 1442 1443 /* 1444 * Lock the buddy and bitmap pages. This make sure other parallel init_group 1445 * on the same buddy page doesn't happen whild holding the buddy page lock. 1446 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 1447 * are on the same page e4b->bd_buddy_folio is NULL and return value is 0. 1448 */ 1449 static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 1450 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) 1451 { 1452 struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 1453 int block, pnum, poff; 1454 int blocks_per_page; 1455 struct folio *folio; 1456 1457 e4b->bd_buddy_folio = NULL; 1458 e4b->bd_bitmap_folio = NULL; 1459 1460 blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1461 /* 1462 * the buddy cache inode stores the block bitmap 1463 * and buddy information in consecutive blocks. 1464 * So for each group we need two blocks. 1465 */ 1466 block = group * 2; 1467 pnum = block / blocks_per_page; 1468 poff = block % blocks_per_page; 1469 folio = __filemap_get_folio(inode->i_mapping, pnum, 1470 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1471 if (IS_ERR(folio)) 1472 return PTR_ERR(folio); 1473 BUG_ON(folio->mapping != inode->i_mapping); 1474 e4b->bd_bitmap_folio = folio; 1475 e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize); 1476 1477 if (blocks_per_page >= 2) { 1478 /* buddy and bitmap are on the same page */ 1479 return 0; 1480 } 1481 1482 /* blocks_per_page == 1, hence we need another page for the buddy */ 1483 folio = __filemap_get_folio(inode->i_mapping, block + 1, 1484 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1485 if (IS_ERR(folio)) 1486 return PTR_ERR(folio); 1487 BUG_ON(folio->mapping != inode->i_mapping); 1488 e4b->bd_buddy_folio = folio; 1489 return 0; 1490 } 1491 1492 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 1493 { 1494 if (e4b->bd_bitmap_folio) { 1495 folio_unlock(e4b->bd_bitmap_folio); 1496 folio_put(e4b->bd_bitmap_folio); 1497 } 1498 if (e4b->bd_buddy_folio) { 1499 folio_unlock(e4b->bd_buddy_folio); 1500 folio_put(e4b->bd_buddy_folio); 1501 } 1502 } 1503 1504 /* 1505 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1506 * block group lock of all groups for this page; do not hold the BG lock when 1507 * calling this routine! 1508 */ 1509 static noinline_for_stack 1510 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) 1511 { 1512 1513 struct ext4_group_info *this_grp; 1514 struct ext4_buddy e4b; 1515 struct folio *folio; 1516 int ret = 0; 1517 1518 might_sleep(); 1519 mb_debug(sb, "init group %u\n", group); 1520 this_grp = ext4_get_group_info(sb, group); 1521 if (!this_grp) 1522 return -EFSCORRUPTED; 1523 1524 /* 1525 * This ensures that we don't reinit the buddy cache 1526 * page which map to the group from which we are already 1527 * allocating. If we are looking at the buddy cache we would 1528 * have taken a reference using ext4_mb_load_buddy and that 1529 * would have pinned buddy page to page cache. 1530 * The call to ext4_mb_get_buddy_page_lock will mark the 1531 * page accessed. 1532 */ 1533 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); 1534 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1535 /* 1536 * somebody initialized the group 1537 * return without doing anything 1538 */ 1539 goto err; 1540 } 1541 1542 folio = e4b.bd_bitmap_folio; 1543 ret = ext4_mb_init_cache(folio, NULL, gfp); 1544 if (ret) 1545 goto err; 1546 if (!folio_test_uptodate(folio)) { 1547 ret = -EIO; 1548 goto err; 1549 } 1550 1551 if (e4b.bd_buddy_folio == NULL) { 1552 /* 1553 * If both the bitmap and buddy are in 1554 * the same page we don't need to force 1555 * init the buddy 1556 */ 1557 ret = 0; 1558 goto err; 1559 } 1560 /* init buddy cache */ 1561 folio = e4b.bd_buddy_folio; 1562 ret = ext4_mb_init_cache(folio, e4b.bd_bitmap, gfp); 1563 if (ret) 1564 goto err; 1565 if (!folio_test_uptodate(folio)) { 1566 ret = -EIO; 1567 goto err; 1568 } 1569 err: 1570 ext4_mb_put_buddy_page_lock(&e4b); 1571 return ret; 1572 } 1573 1574 /* 1575 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1576 * block group lock of all groups for this page; do not hold the BG lock when 1577 * calling this routine! 1578 */ 1579 static noinline_for_stack int 1580 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, 1581 struct ext4_buddy *e4b, gfp_t gfp) 1582 { 1583 int blocks_per_page; 1584 int block; 1585 int pnum; 1586 int poff; 1587 struct folio *folio; 1588 int ret; 1589 struct ext4_group_info *grp; 1590 struct ext4_sb_info *sbi = EXT4_SB(sb); 1591 struct inode *inode = sbi->s_buddy_cache; 1592 1593 might_sleep(); 1594 mb_debug(sb, "load group %u\n", group); 1595 1596 blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1597 grp = ext4_get_group_info(sb, group); 1598 if (!grp) 1599 return -EFSCORRUPTED; 1600 1601 e4b->bd_blkbits = sb->s_blocksize_bits; 1602 e4b->bd_info = grp; 1603 e4b->bd_sb = sb; 1604 e4b->bd_group = group; 1605 e4b->bd_buddy_folio = NULL; 1606 e4b->bd_bitmap_folio = NULL; 1607 1608 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1609 /* 1610 * we need full data about the group 1611 * to make a good selection 1612 */ 1613 ret = ext4_mb_init_group(sb, group, gfp); 1614 if (ret) 1615 return ret; 1616 } 1617 1618 /* 1619 * the buddy cache inode stores the block bitmap 1620 * and buddy information in consecutive blocks. 1621 * So for each group we need two blocks. 1622 */ 1623 block = group * 2; 1624 pnum = block / blocks_per_page; 1625 poff = block % blocks_per_page; 1626 1627 /* Avoid locking the folio in the fast path ... */ 1628 folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0); 1629 if (IS_ERR(folio) || !folio_test_uptodate(folio)) { 1630 if (!IS_ERR(folio)) 1631 /* 1632 * drop the folio reference and try 1633 * to get the folio with lock. If we 1634 * are not uptodate that implies 1635 * somebody just created the folio but 1636 * is yet to initialize it. So 1637 * wait for it to initialize. 1638 */ 1639 folio_put(folio); 1640 folio = __filemap_get_folio(inode->i_mapping, pnum, 1641 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1642 if (!IS_ERR(folio)) { 1643 if (WARN_RATELIMIT(folio->mapping != inode->i_mapping, 1644 "ext4: bitmap's mapping != inode->i_mapping\n")) { 1645 /* should never happen */ 1646 folio_unlock(folio); 1647 ret = -EINVAL; 1648 goto err; 1649 } 1650 if (!folio_test_uptodate(folio)) { 1651 ret = ext4_mb_init_cache(folio, NULL, gfp); 1652 if (ret) { 1653 folio_unlock(folio); 1654 goto err; 1655 } 1656 mb_cmp_bitmaps(e4b, folio_address(folio) + 1657 (poff * sb->s_blocksize)); 1658 } 1659 folio_unlock(folio); 1660 } 1661 } 1662 if (IS_ERR(folio)) { 1663 ret = PTR_ERR(folio); 1664 goto err; 1665 } 1666 if (!folio_test_uptodate(folio)) { 1667 ret = -EIO; 1668 goto err; 1669 } 1670 1671 /* Folios marked accessed already */ 1672 e4b->bd_bitmap_folio = folio; 1673 e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize); 1674 1675 block++; 1676 pnum = block / blocks_per_page; 1677 poff = block % blocks_per_page; 1678 1679 folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0); 1680 if (IS_ERR(folio) || !folio_test_uptodate(folio)) { 1681 if (!IS_ERR(folio)) 1682 folio_put(folio); 1683 folio = __filemap_get_folio(inode->i_mapping, pnum, 1684 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1685 if (!IS_ERR(folio)) { 1686 if (WARN_RATELIMIT(folio->mapping != inode->i_mapping, 1687 "ext4: buddy bitmap's mapping != inode->i_mapping\n")) { 1688 /* should never happen */ 1689 folio_unlock(folio); 1690 ret = -EINVAL; 1691 goto err; 1692 } 1693 if (!folio_test_uptodate(folio)) { 1694 ret = ext4_mb_init_cache(folio, e4b->bd_bitmap, 1695 gfp); 1696 if (ret) { 1697 folio_unlock(folio); 1698 goto err; 1699 } 1700 } 1701 folio_unlock(folio); 1702 } 1703 } 1704 if (IS_ERR(folio)) { 1705 ret = PTR_ERR(folio); 1706 goto err; 1707 } 1708 if (!folio_test_uptodate(folio)) { 1709 ret = -EIO; 1710 goto err; 1711 } 1712 1713 /* Folios marked accessed already */ 1714 e4b->bd_buddy_folio = folio; 1715 e4b->bd_buddy = folio_address(folio) + (poff * sb->s_blocksize); 1716 1717 return 0; 1718 1719 err: 1720 if (!IS_ERR_OR_NULL(folio)) 1721 folio_put(folio); 1722 if (e4b->bd_bitmap_folio) 1723 folio_put(e4b->bd_bitmap_folio); 1724 1725 e4b->bd_buddy = NULL; 1726 e4b->bd_bitmap = NULL; 1727 return ret; 1728 } 1729 1730 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1731 struct ext4_buddy *e4b) 1732 { 1733 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); 1734 } 1735 1736 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1737 { 1738 if (e4b->bd_bitmap_folio) 1739 folio_put(e4b->bd_bitmap_folio); 1740 if (e4b->bd_buddy_folio) 1741 folio_put(e4b->bd_buddy_folio); 1742 } 1743 1744 1745 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1746 { 1747 int order = 1, max; 1748 void *bb; 1749 1750 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1751 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1752 1753 while (order <= e4b->bd_blkbits + 1) { 1754 bb = mb_find_buddy(e4b, order, &max); 1755 if (!mb_test_bit(block >> order, bb)) { 1756 /* this block is part of buddy of order 'order' */ 1757 return order; 1758 } 1759 order++; 1760 } 1761 return 0; 1762 } 1763 1764 static void mb_clear_bits(void *bm, int cur, int len) 1765 { 1766 __u32 *addr; 1767 1768 len = cur + len; 1769 while (cur < len) { 1770 if ((cur & 31) == 0 && (len - cur) >= 32) { 1771 /* fast path: clear whole word at once */ 1772 addr = bm + (cur >> 3); 1773 *addr = 0; 1774 cur += 32; 1775 continue; 1776 } 1777 mb_clear_bit(cur, bm); 1778 cur++; 1779 } 1780 } 1781 1782 /* clear bits in given range 1783 * will return first found zero bit if any, -1 otherwise 1784 */ 1785 static int mb_test_and_clear_bits(void *bm, int cur, int len) 1786 { 1787 __u32 *addr; 1788 int zero_bit = -1; 1789 1790 len = cur + len; 1791 while (cur < len) { 1792 if ((cur & 31) == 0 && (len - cur) >= 32) { 1793 /* fast path: clear whole word at once */ 1794 addr = bm + (cur >> 3); 1795 if (*addr != (__u32)(-1) && zero_bit == -1) 1796 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1797 *addr = 0; 1798 cur += 32; 1799 continue; 1800 } 1801 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1802 zero_bit = cur; 1803 cur++; 1804 } 1805 1806 return zero_bit; 1807 } 1808 1809 void mb_set_bits(void *bm, int cur, int len) 1810 { 1811 __u32 *addr; 1812 1813 len = cur + len; 1814 while (cur < len) { 1815 if ((cur & 31) == 0 && (len - cur) >= 32) { 1816 /* fast path: set whole word at once */ 1817 addr = bm + (cur >> 3); 1818 *addr = 0xffffffff; 1819 cur += 32; 1820 continue; 1821 } 1822 mb_set_bit(cur, bm); 1823 cur++; 1824 } 1825 } 1826 1827 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1828 { 1829 if (mb_test_bit(*bit + side, bitmap)) { 1830 mb_clear_bit(*bit, bitmap); 1831 (*bit) -= side; 1832 return 1; 1833 } 1834 else { 1835 (*bit) += side; 1836 mb_set_bit(*bit, bitmap); 1837 return -1; 1838 } 1839 } 1840 1841 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1842 { 1843 int max; 1844 int order = 1; 1845 void *buddy = mb_find_buddy(e4b, order, &max); 1846 1847 while (buddy) { 1848 void *buddy2; 1849 1850 /* Bits in range [first; last] are known to be set since 1851 * corresponding blocks were allocated. Bits in range 1852 * (first; last) will stay set because they form buddies on 1853 * upper layer. We just deal with borders if they don't 1854 * align with upper layer and then go up. 1855 * Releasing entire group is all about clearing 1856 * single bit of highest order buddy. 1857 */ 1858 1859 /* Example: 1860 * --------------------------------- 1861 * | 1 | 1 | 1 | 1 | 1862 * --------------------------------- 1863 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1864 * --------------------------------- 1865 * 0 1 2 3 4 5 6 7 1866 * \_____________________/ 1867 * 1868 * Neither [1] nor [6] is aligned to above layer. 1869 * Left neighbour [0] is free, so mark it busy, 1870 * decrease bb_counters and extend range to 1871 * [0; 6] 1872 * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1873 * mark [6] free, increase bb_counters and shrink range to 1874 * [0; 5]. 1875 * Then shift range to [0; 2], go up and do the same. 1876 */ 1877 1878 1879 if (first & 1) 1880 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1881 if (!(last & 1)) 1882 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1883 if (first > last) 1884 break; 1885 order++; 1886 1887 buddy2 = mb_find_buddy(e4b, order, &max); 1888 if (!buddy2) { 1889 mb_clear_bits(buddy, first, last - first + 1); 1890 e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1891 break; 1892 } 1893 first >>= 1; 1894 last >>= 1; 1895 buddy = buddy2; 1896 } 1897 } 1898 1899 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1900 int first, int count) 1901 { 1902 int left_is_free = 0; 1903 int right_is_free = 0; 1904 int block; 1905 int last = first + count - 1; 1906 struct super_block *sb = e4b->bd_sb; 1907 1908 if (WARN_ON(count == 0)) 1909 return; 1910 BUG_ON(last >= (sb->s_blocksize << 3)); 1911 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1912 /* Don't bother if the block group is corrupt. */ 1913 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1914 return; 1915 1916 mb_check_buddy(e4b); 1917 mb_free_blocks_double(inode, e4b, first, count); 1918 1919 /* access memory sequentially: check left neighbour, 1920 * clear range and then check right neighbour 1921 */ 1922 if (first != 0) 1923 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1924 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1925 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1926 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1927 1928 if (unlikely(block != -1)) { 1929 struct ext4_sb_info *sbi = EXT4_SB(sb); 1930 ext4_fsblk_t blocknr; 1931 1932 /* 1933 * Fastcommit replay can free already freed blocks which 1934 * corrupts allocation info. Regenerate it. 1935 */ 1936 if (sbi->s_mount_state & EXT4_FC_REPLAY) { 1937 mb_regenerate_buddy(e4b); 1938 goto check; 1939 } 1940 1941 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 1942 blocknr += EXT4_C2B(sbi, block); 1943 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 1944 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1945 ext4_grp_locked_error(sb, e4b->bd_group, 1946 inode ? inode->i_ino : 0, blocknr, 1947 "freeing already freed block (bit %u); block bitmap corrupt.", 1948 block); 1949 return; 1950 } 1951 1952 this_cpu_inc(discard_pa_seq); 1953 e4b->bd_info->bb_free += count; 1954 if (first < e4b->bd_info->bb_first_free) 1955 e4b->bd_info->bb_first_free = first; 1956 1957 /* let's maintain fragments counter */ 1958 if (left_is_free && right_is_free) 1959 e4b->bd_info->bb_fragments--; 1960 else if (!left_is_free && !right_is_free) 1961 e4b->bd_info->bb_fragments++; 1962 1963 /* buddy[0] == bd_bitmap is a special case, so handle 1964 * it right away and let mb_buddy_mark_free stay free of 1965 * zero order checks. 1966 * Check if neighbours are to be coaleasced, 1967 * adjust bitmap bb_counters and borders appropriately. 1968 */ 1969 if (first & 1) { 1970 first += !left_is_free; 1971 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1972 } 1973 if (!(last & 1)) { 1974 last -= !right_is_free; 1975 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1976 } 1977 1978 if (first <= last) 1979 mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1980 1981 mb_set_largest_free_order(sb, e4b->bd_info); 1982 mb_update_avg_fragment_size(sb, e4b->bd_info); 1983 check: 1984 mb_check_buddy(e4b); 1985 } 1986 1987 static int mb_find_extent(struct ext4_buddy *e4b, int block, 1988 int needed, struct ext4_free_extent *ex) 1989 { 1990 int max, order, next; 1991 void *buddy; 1992 1993 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1994 BUG_ON(ex == NULL); 1995 1996 buddy = mb_find_buddy(e4b, 0, &max); 1997 BUG_ON(buddy == NULL); 1998 BUG_ON(block >= max); 1999 if (mb_test_bit(block, buddy)) { 2000 ex->fe_len = 0; 2001 ex->fe_start = 0; 2002 ex->fe_group = 0; 2003 return 0; 2004 } 2005 2006 /* find actual order */ 2007 order = mb_find_order_for_block(e4b, block); 2008 2009 ex->fe_len = (1 << order) - (block & ((1 << order) - 1)); 2010 ex->fe_start = block; 2011 ex->fe_group = e4b->bd_group; 2012 2013 block = block >> order; 2014 2015 while (needed > ex->fe_len && 2016 mb_find_buddy(e4b, order, &max)) { 2017 2018 if (block + 1 >= max) 2019 break; 2020 2021 next = (block + 1) * (1 << order); 2022 if (mb_test_bit(next, e4b->bd_bitmap)) 2023 break; 2024 2025 order = mb_find_order_for_block(e4b, next); 2026 2027 block = next >> order; 2028 ex->fe_len += 1 << order; 2029 } 2030 2031 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { 2032 /* Should never happen! (but apparently sometimes does?!?) */ 2033 WARN_ON(1); 2034 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0, 2035 "corruption or bug in mb_find_extent " 2036 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", 2037 block, order, needed, ex->fe_group, ex->fe_start, 2038 ex->fe_len, ex->fe_logical); 2039 ex->fe_len = 0; 2040 ex->fe_start = 0; 2041 ex->fe_group = 0; 2042 } 2043 return ex->fe_len; 2044 } 2045 2046 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 2047 { 2048 int ord; 2049 int mlen = 0; 2050 int max = 0; 2051 int start = ex->fe_start; 2052 int len = ex->fe_len; 2053 unsigned ret = 0; 2054 int len0 = len; 2055 void *buddy; 2056 int ord_start, ord_end; 2057 2058 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 2059 BUG_ON(e4b->bd_group != ex->fe_group); 2060 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 2061 mb_check_buddy(e4b); 2062 mb_mark_used_double(e4b, start, len); 2063 2064 this_cpu_inc(discard_pa_seq); 2065 e4b->bd_info->bb_free -= len; 2066 if (e4b->bd_info->bb_first_free == start) 2067 e4b->bd_info->bb_first_free += len; 2068 2069 /* let's maintain fragments counter */ 2070 if (start != 0) 2071 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 2072 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 2073 max = !mb_test_bit(start + len, e4b->bd_bitmap); 2074 if (mlen && max) 2075 e4b->bd_info->bb_fragments++; 2076 else if (!mlen && !max) 2077 e4b->bd_info->bb_fragments--; 2078 2079 /* let's maintain buddy itself */ 2080 while (len) { 2081 ord = mb_find_order_for_block(e4b, start); 2082 2083 if (((start >> ord) << ord) == start && len >= (1 << ord)) { 2084 /* the whole chunk may be allocated at once! */ 2085 mlen = 1 << ord; 2086 buddy = mb_find_buddy(e4b, ord, &max); 2087 BUG_ON((start >> ord) >= max); 2088 mb_set_bit(start >> ord, buddy); 2089 e4b->bd_info->bb_counters[ord]--; 2090 start += mlen; 2091 len -= mlen; 2092 BUG_ON(len < 0); 2093 continue; 2094 } 2095 2096 /* store for history */ 2097 if (ret == 0) 2098 ret = len | (ord << 16); 2099 2100 BUG_ON(ord <= 0); 2101 buddy = mb_find_buddy(e4b, ord, &max); 2102 mb_set_bit(start >> ord, buddy); 2103 e4b->bd_info->bb_counters[ord]--; 2104 2105 ord_start = (start >> ord) << ord; 2106 ord_end = ord_start + (1 << ord); 2107 /* first chunk */ 2108 if (start > ord_start) 2109 ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy, 2110 ord_start, start - ord_start, 2111 e4b->bd_info); 2112 2113 /* last chunk */ 2114 if (start + len < ord_end) { 2115 ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy, 2116 start + len, 2117 ord_end - (start + len), 2118 e4b->bd_info); 2119 break; 2120 } 2121 len = start + len - ord_end; 2122 start = ord_end; 2123 } 2124 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 2125 2126 mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info); 2127 mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 2128 mb_check_buddy(e4b); 2129 2130 return ret; 2131 } 2132 2133 /* 2134 * Must be called under group lock! 2135 */ 2136 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 2137 struct ext4_buddy *e4b) 2138 { 2139 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2140 int ret; 2141 2142 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 2143 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2144 2145 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 2146 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 2147 ret = mb_mark_used(e4b, &ac->ac_b_ex); 2148 2149 /* preallocation can change ac_b_ex, thus we store actually 2150 * allocated blocks for history */ 2151 ac->ac_f_ex = ac->ac_b_ex; 2152 2153 ac->ac_status = AC_STATUS_FOUND; 2154 ac->ac_tail = ret & 0xffff; 2155 ac->ac_buddy = ret >> 16; 2156 2157 /* 2158 * take the page reference. We want the page to be pinned 2159 * so that we don't get a ext4_mb_init_cache_call for this 2160 * group until we update the bitmap. That would mean we 2161 * double allocate blocks. The reference is dropped 2162 * in ext4_mb_release_context 2163 */ 2164 ac->ac_bitmap_folio = e4b->bd_bitmap_folio; 2165 folio_get(ac->ac_bitmap_folio); 2166 ac->ac_buddy_folio = e4b->bd_buddy_folio; 2167 folio_get(ac->ac_buddy_folio); 2168 /* store last allocated for subsequent stream allocation */ 2169 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2170 spin_lock(&sbi->s_md_lock); 2171 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 2172 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 2173 spin_unlock(&sbi->s_md_lock); 2174 } 2175 /* 2176 * As we've just preallocated more space than 2177 * user requested originally, we store allocated 2178 * space in a special descriptor. 2179 */ 2180 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 2181 ext4_mb_new_preallocation(ac); 2182 2183 } 2184 2185 static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 2186 struct ext4_buddy *e4b, 2187 int finish_group) 2188 { 2189 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2190 struct ext4_free_extent *bex = &ac->ac_b_ex; 2191 struct ext4_free_extent *gex = &ac->ac_g_ex; 2192 2193 if (ac->ac_status == AC_STATUS_FOUND) 2194 return; 2195 /* 2196 * We don't want to scan for a whole year 2197 */ 2198 if (ac->ac_found > sbi->s_mb_max_to_scan && 2199 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2200 ac->ac_status = AC_STATUS_BREAK; 2201 return; 2202 } 2203 2204 /* 2205 * Haven't found good chunk so far, let's continue 2206 */ 2207 if (bex->fe_len < gex->fe_len) 2208 return; 2209 2210 if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 2211 ext4_mb_use_best_found(ac, e4b); 2212 } 2213 2214 /* 2215 * The routine checks whether found extent is good enough. If it is, 2216 * then the extent gets marked used and flag is set to the context 2217 * to stop scanning. Otherwise, the extent is compared with the 2218 * previous found extent and if new one is better, then it's stored 2219 * in the context. Later, the best found extent will be used, if 2220 * mballoc can't find good enough extent. 2221 * 2222 * The algorithm used is roughly as follows: 2223 * 2224 * * If free extent found is exactly as big as goal, then 2225 * stop the scan and use it immediately 2226 * 2227 * * If free extent found is smaller than goal, then keep retrying 2228 * upto a max of sbi->s_mb_max_to_scan times (default 200). After 2229 * that stop scanning and use whatever we have. 2230 * 2231 * * If free extent found is bigger than goal, then keep retrying 2232 * upto a max of sbi->s_mb_min_to_scan times (default 10) before 2233 * stopping the scan and using the extent. 2234 * 2235 * 2236 * FIXME: real allocation policy is to be designed yet! 2237 */ 2238 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 2239 struct ext4_free_extent *ex, 2240 struct ext4_buddy *e4b) 2241 { 2242 struct ext4_free_extent *bex = &ac->ac_b_ex; 2243 struct ext4_free_extent *gex = &ac->ac_g_ex; 2244 2245 BUG_ON(ex->fe_len <= 0); 2246 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2247 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2248 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 2249 2250 ac->ac_found++; 2251 ac->ac_cX_found[ac->ac_criteria]++; 2252 2253 /* 2254 * The special case - take what you catch first 2255 */ 2256 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2257 *bex = *ex; 2258 ext4_mb_use_best_found(ac, e4b); 2259 return; 2260 } 2261 2262 /* 2263 * Let's check whether the chuck is good enough 2264 */ 2265 if (ex->fe_len == gex->fe_len) { 2266 *bex = *ex; 2267 ext4_mb_use_best_found(ac, e4b); 2268 return; 2269 } 2270 2271 /* 2272 * If this is first found extent, just store it in the context 2273 */ 2274 if (bex->fe_len == 0) { 2275 *bex = *ex; 2276 return; 2277 } 2278 2279 /* 2280 * If new found extent is better, store it in the context 2281 */ 2282 if (bex->fe_len < gex->fe_len) { 2283 /* if the request isn't satisfied, any found extent 2284 * larger than previous best one is better */ 2285 if (ex->fe_len > bex->fe_len) 2286 *bex = *ex; 2287 } else if (ex->fe_len > gex->fe_len) { 2288 /* if the request is satisfied, then we try to find 2289 * an extent that still satisfy the request, but is 2290 * smaller than previous one */ 2291 if (ex->fe_len < bex->fe_len) 2292 *bex = *ex; 2293 } 2294 2295 ext4_mb_check_limits(ac, e4b, 0); 2296 } 2297 2298 static noinline_for_stack 2299 void ext4_mb_try_best_found(struct ext4_allocation_context *ac, 2300 struct ext4_buddy *e4b) 2301 { 2302 struct ext4_free_extent ex = ac->ac_b_ex; 2303 ext4_group_t group = ex.fe_group; 2304 int max; 2305 int err; 2306 2307 BUG_ON(ex.fe_len <= 0); 2308 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2309 if (err) 2310 return; 2311 2312 ext4_lock_group(ac->ac_sb, group); 2313 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 2314 goto out; 2315 2316 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 2317 2318 if (max > 0) { 2319 ac->ac_b_ex = ex; 2320 ext4_mb_use_best_found(ac, e4b); 2321 } 2322 2323 out: 2324 ext4_unlock_group(ac->ac_sb, group); 2325 ext4_mb_unload_buddy(e4b); 2326 } 2327 2328 static noinline_for_stack 2329 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 2330 struct ext4_buddy *e4b) 2331 { 2332 ext4_group_t group = ac->ac_g_ex.fe_group; 2333 int max; 2334 int err; 2335 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2336 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2337 struct ext4_free_extent ex; 2338 2339 if (!grp) 2340 return -EFSCORRUPTED; 2341 if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) 2342 return 0; 2343 if (grp->bb_free == 0) 2344 return 0; 2345 2346 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2347 if (err) 2348 return err; 2349 2350 ext4_lock_group(ac->ac_sb, group); 2351 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 2352 goto out; 2353 2354 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 2355 ac->ac_g_ex.fe_len, &ex); 2356 ex.fe_logical = 0xDEADFA11; /* debug value */ 2357 2358 if (max >= ac->ac_g_ex.fe_len && 2359 ac->ac_g_ex.fe_len == EXT4_B2C(sbi, sbi->s_stripe)) { 2360 ext4_fsblk_t start; 2361 2362 start = ext4_grp_offs_to_block(ac->ac_sb, &ex); 2363 /* use do_div to get remainder (would be 64-bit modulo) */ 2364 if (do_div(start, sbi->s_stripe) == 0) { 2365 ac->ac_found++; 2366 ac->ac_b_ex = ex; 2367 ext4_mb_use_best_found(ac, e4b); 2368 } 2369 } else if (max >= ac->ac_g_ex.fe_len) { 2370 BUG_ON(ex.fe_len <= 0); 2371 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2372 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2373 ac->ac_found++; 2374 ac->ac_b_ex = ex; 2375 ext4_mb_use_best_found(ac, e4b); 2376 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 2377 /* Sometimes, caller may want to merge even small 2378 * number of blocks to an existing extent */ 2379 BUG_ON(ex.fe_len <= 0); 2380 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2381 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2382 ac->ac_found++; 2383 ac->ac_b_ex = ex; 2384 ext4_mb_use_best_found(ac, e4b); 2385 } 2386 out: 2387 ext4_unlock_group(ac->ac_sb, group); 2388 ext4_mb_unload_buddy(e4b); 2389 2390 return 0; 2391 } 2392 2393 /* 2394 * The routine scans buddy structures (not bitmap!) from given order 2395 * to max order and tries to find big enough chunk to satisfy the req 2396 */ 2397 static noinline_for_stack 2398 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 2399 struct ext4_buddy *e4b) 2400 { 2401 struct super_block *sb = ac->ac_sb; 2402 struct ext4_group_info *grp = e4b->bd_info; 2403 void *buddy; 2404 int i; 2405 int k; 2406 int max; 2407 2408 BUG_ON(ac->ac_2order <= 0); 2409 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { 2410 if (grp->bb_counters[i] == 0) 2411 continue; 2412 2413 buddy = mb_find_buddy(e4b, i, &max); 2414 if (WARN_RATELIMIT(buddy == NULL, 2415 "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i)) 2416 continue; 2417 2418 k = mb_find_next_zero_bit(buddy, max, 0); 2419 if (k >= max) { 2420 ext4_mark_group_bitmap_corrupted(ac->ac_sb, 2421 e4b->bd_group, 2422 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2423 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, 2424 "%d free clusters of order %d. But found 0", 2425 grp->bb_counters[i], i); 2426 break; 2427 } 2428 ac->ac_found++; 2429 ac->ac_cX_found[ac->ac_criteria]++; 2430 2431 ac->ac_b_ex.fe_len = 1 << i; 2432 ac->ac_b_ex.fe_start = k << i; 2433 ac->ac_b_ex.fe_group = e4b->bd_group; 2434 2435 ext4_mb_use_best_found(ac, e4b); 2436 2437 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); 2438 2439 if (EXT4_SB(sb)->s_mb_stats) 2440 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 2441 2442 break; 2443 } 2444 } 2445 2446 /* 2447 * The routine scans the group and measures all found extents. 2448 * In order to optimize scanning, caller must pass number of 2449 * free blocks in the group, so the routine can know upper limit. 2450 */ 2451 static noinline_for_stack 2452 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 2453 struct ext4_buddy *e4b) 2454 { 2455 struct super_block *sb = ac->ac_sb; 2456 void *bitmap = e4b->bd_bitmap; 2457 struct ext4_free_extent ex; 2458 int i, j, freelen; 2459 int free; 2460 2461 free = e4b->bd_info->bb_free; 2462 if (WARN_ON(free <= 0)) 2463 return; 2464 2465 i = e4b->bd_info->bb_first_free; 2466 2467 while (free && ac->ac_status == AC_STATUS_CONTINUE) { 2468 i = mb_find_next_zero_bit(bitmap, 2469 EXT4_CLUSTERS_PER_GROUP(sb), i); 2470 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 2471 /* 2472 * IF we have corrupt bitmap, we won't find any 2473 * free blocks even though group info says we 2474 * have free blocks 2475 */ 2476 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2477 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2478 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 2479 "%d free clusters as per " 2480 "group info. But bitmap says 0", 2481 free); 2482 break; 2483 } 2484 2485 if (!ext4_mb_cr_expensive(ac->ac_criteria)) { 2486 /* 2487 * In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are 2488 * sure that this group will have a large enough 2489 * continuous free extent, so skip over the smaller free 2490 * extents 2491 */ 2492 j = mb_find_next_bit(bitmap, 2493 EXT4_CLUSTERS_PER_GROUP(sb), i); 2494 freelen = j - i; 2495 2496 if (freelen < ac->ac_g_ex.fe_len) { 2497 i = j; 2498 free -= freelen; 2499 continue; 2500 } 2501 } 2502 2503 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 2504 if (WARN_ON(ex.fe_len <= 0)) 2505 break; 2506 if (free < ex.fe_len) { 2507 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2508 EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2509 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 2510 "%d free clusters as per " 2511 "group info. But got %d blocks", 2512 free, ex.fe_len); 2513 /* 2514 * The number of free blocks differs. This mostly 2515 * indicate that the bitmap is corrupt. So exit 2516 * without claiming the space. 2517 */ 2518 break; 2519 } 2520 ex.fe_logical = 0xDEADC0DE; /* debug value */ 2521 ext4_mb_measure_extent(ac, &ex, e4b); 2522 2523 i += ex.fe_len; 2524 free -= ex.fe_len; 2525 } 2526 2527 ext4_mb_check_limits(ac, e4b, 1); 2528 } 2529 2530 /* 2531 * This is a special case for storages like raid5 2532 * we try to find stripe-aligned chunks for stripe-size-multiple requests 2533 */ 2534 static noinline_for_stack 2535 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 2536 struct ext4_buddy *e4b) 2537 { 2538 struct super_block *sb = ac->ac_sb; 2539 struct ext4_sb_info *sbi = EXT4_SB(sb); 2540 void *bitmap = e4b->bd_bitmap; 2541 struct ext4_free_extent ex; 2542 ext4_fsblk_t first_group_block; 2543 ext4_fsblk_t a; 2544 ext4_grpblk_t i, stripe; 2545 int max; 2546 2547 BUG_ON(sbi->s_stripe == 0); 2548 2549 /* find first stripe-aligned block in group */ 2550 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 2551 2552 a = first_group_block + sbi->s_stripe - 1; 2553 do_div(a, sbi->s_stripe); 2554 i = (a * sbi->s_stripe) - first_group_block; 2555 2556 stripe = EXT4_B2C(sbi, sbi->s_stripe); 2557 i = EXT4_B2C(sbi, i); 2558 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2559 if (!mb_test_bit(i, bitmap)) { 2560 max = mb_find_extent(e4b, i, stripe, &ex); 2561 if (max >= stripe) { 2562 ac->ac_found++; 2563 ac->ac_cX_found[ac->ac_criteria]++; 2564 ex.fe_logical = 0xDEADF00D; /* debug value */ 2565 ac->ac_b_ex = ex; 2566 ext4_mb_use_best_found(ac, e4b); 2567 break; 2568 } 2569 } 2570 i += stripe; 2571 } 2572 } 2573 2574 /* 2575 * This is also called BEFORE we load the buddy bitmap. 2576 * Returns either 1 or 0 indicating that the group is either suitable 2577 * for the allocation or not. 2578 */ 2579 static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 2580 ext4_group_t group, enum criteria cr) 2581 { 2582 ext4_grpblk_t free, fragments; 2583 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2584 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2585 2586 BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS); 2587 2588 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2589 return false; 2590 2591 free = grp->bb_free; 2592 if (free == 0) 2593 return false; 2594 2595 fragments = grp->bb_fragments; 2596 if (fragments == 0) 2597 return false; 2598 2599 switch (cr) { 2600 case CR_POWER2_ALIGNED: 2601 BUG_ON(ac->ac_2order == 0); 2602 2603 /* Avoid using the first bg of a flexgroup for data files */ 2604 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2605 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2606 ((group % flex_size) == 0)) 2607 return false; 2608 2609 if (free < ac->ac_g_ex.fe_len) 2610 return false; 2611 2612 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) 2613 return true; 2614 2615 if (grp->bb_largest_free_order < ac->ac_2order) 2616 return false; 2617 2618 return true; 2619 case CR_GOAL_LEN_FAST: 2620 case CR_BEST_AVAIL_LEN: 2621 if ((free / fragments) >= ac->ac_g_ex.fe_len) 2622 return true; 2623 break; 2624 case CR_GOAL_LEN_SLOW: 2625 if (free >= ac->ac_g_ex.fe_len) 2626 return true; 2627 break; 2628 case CR_ANY_FREE: 2629 return true; 2630 default: 2631 BUG(); 2632 } 2633 2634 return false; 2635 } 2636 2637 /* 2638 * This could return negative error code if something goes wrong 2639 * during ext4_mb_init_group(). This should not be called with 2640 * ext4_lock_group() held. 2641 * 2642 * Note: because we are conditionally operating with the group lock in 2643 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this 2644 * function using __acquire and __release. This means we need to be 2645 * super careful before messing with the error path handling via "goto 2646 * out"! 2647 */ 2648 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, 2649 ext4_group_t group, enum criteria cr) 2650 { 2651 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2652 struct super_block *sb = ac->ac_sb; 2653 struct ext4_sb_info *sbi = EXT4_SB(sb); 2654 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; 2655 ext4_grpblk_t free; 2656 int ret = 0; 2657 2658 if (!grp) 2659 return -EFSCORRUPTED; 2660 if (sbi->s_mb_stats) 2661 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); 2662 if (should_lock) { 2663 ext4_lock_group(sb, group); 2664 __release(ext4_group_lock_ptr(sb, group)); 2665 } 2666 free = grp->bb_free; 2667 if (free == 0) 2668 goto out; 2669 /* 2670 * In all criterias except CR_ANY_FREE we try to avoid groups that 2671 * can't possibly satisfy the full goal request due to insufficient 2672 * free blocks. 2673 */ 2674 if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len) 2675 goto out; 2676 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2677 goto out; 2678 if (should_lock) { 2679 __acquire(ext4_group_lock_ptr(sb, group)); 2680 ext4_unlock_group(sb, group); 2681 } 2682 2683 /* We only do this if the grp has never been initialized */ 2684 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2685 struct ext4_group_desc *gdp = 2686 ext4_get_group_desc(sb, group, NULL); 2687 int ret; 2688 2689 /* 2690 * CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic 2691 * search to find large good chunks almost for free. If buddy 2692 * data is not ready, then this optimization makes no sense. But 2693 * we never skip the first block group in a flex_bg, since this 2694 * gets used for metadata block allocation, and we want to make 2695 * sure we locate metadata blocks in the first block group in 2696 * the flex_bg if possible. 2697 */ 2698 if (!ext4_mb_cr_expensive(cr) && 2699 (!sbi->s_log_groups_per_flex || 2700 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && 2701 !(ext4_has_group_desc_csum(sb) && 2702 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) 2703 return 0; 2704 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 2705 if (ret) 2706 return ret; 2707 } 2708 2709 if (should_lock) { 2710 ext4_lock_group(sb, group); 2711 __release(ext4_group_lock_ptr(sb, group)); 2712 } 2713 ret = ext4_mb_good_group(ac, group, cr); 2714 out: 2715 if (should_lock) { 2716 __acquire(ext4_group_lock_ptr(sb, group)); 2717 ext4_unlock_group(sb, group); 2718 } 2719 return ret; 2720 } 2721 2722 /* 2723 * Start prefetching @nr block bitmaps starting at @group. 2724 * Return the next group which needs to be prefetched. 2725 */ 2726 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, 2727 unsigned int nr, int *cnt) 2728 { 2729 ext4_group_t ngroups = ext4_get_groups_count(sb); 2730 struct buffer_head *bh; 2731 struct blk_plug plug; 2732 2733 blk_start_plug(&plug); 2734 while (nr-- > 0) { 2735 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2736 NULL); 2737 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2738 2739 /* 2740 * Prefetch block groups with free blocks; but don't 2741 * bother if it is marked uninitialized on disk, since 2742 * it won't require I/O to read. Also only try to 2743 * prefetch once, so we avoid getblk() call, which can 2744 * be expensive. 2745 */ 2746 if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) && 2747 EXT4_MB_GRP_NEED_INIT(grp) && 2748 ext4_free_group_clusters(sb, gdp) > 0 ) { 2749 bh = ext4_read_block_bitmap_nowait(sb, group, true); 2750 if (bh && !IS_ERR(bh)) { 2751 if (!buffer_uptodate(bh) && cnt) 2752 (*cnt)++; 2753 brelse(bh); 2754 } 2755 } 2756 if (++group >= ngroups) 2757 group = 0; 2758 } 2759 blk_finish_plug(&plug); 2760 return group; 2761 } 2762 2763 /* 2764 * Prefetching reads the block bitmap into the buffer cache; but we 2765 * need to make sure that the buddy bitmap in the page cache has been 2766 * initialized. Note that ext4_mb_init_group() will block if the I/O 2767 * is not yet completed, or indeed if it was not initiated by 2768 * ext4_mb_prefetch did not start the I/O. 2769 * 2770 * TODO: We should actually kick off the buddy bitmap setup in a work 2771 * queue when the buffer I/O is completed, so that we don't block 2772 * waiting for the block allocation bitmap read to finish when 2773 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). 2774 */ 2775 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, 2776 unsigned int nr) 2777 { 2778 struct ext4_group_desc *gdp; 2779 struct ext4_group_info *grp; 2780 2781 while (nr-- > 0) { 2782 if (!group) 2783 group = ext4_get_groups_count(sb); 2784 group--; 2785 gdp = ext4_get_group_desc(sb, group, NULL); 2786 grp = ext4_get_group_info(sb, group); 2787 2788 if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) && 2789 ext4_free_group_clusters(sb, gdp) > 0) { 2790 if (ext4_mb_init_group(sb, group, GFP_NOFS)) 2791 break; 2792 } 2793 } 2794 } 2795 2796 static noinline_for_stack int 2797 ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2798 { 2799 ext4_group_t prefetch_grp = 0, ngroups, group, i; 2800 enum criteria new_cr, cr = CR_GOAL_LEN_FAST; 2801 int err = 0, first_err = 0; 2802 unsigned int nr = 0, prefetch_ios = 0; 2803 struct ext4_sb_info *sbi; 2804 struct super_block *sb; 2805 struct ext4_buddy e4b; 2806 int lost; 2807 2808 sb = ac->ac_sb; 2809 sbi = EXT4_SB(sb); 2810 ngroups = ext4_get_groups_count(sb); 2811 /* non-extent files are limited to low blocks/groups */ 2812 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2813 ngroups = sbi->s_blockfile_groups; 2814 2815 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2816 2817 /* first, try the goal */ 2818 err = ext4_mb_find_by_goal(ac, &e4b); 2819 if (err || ac->ac_status == AC_STATUS_FOUND) 2820 goto out; 2821 2822 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2823 goto out; 2824 2825 /* 2826 * ac->ac_2order is set only if the fe_len is a power of 2 2827 * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED 2828 * so that we try exact allocation using buddy. 2829 */ 2830 i = fls(ac->ac_g_ex.fe_len); 2831 ac->ac_2order = 0; 2832 /* 2833 * We search using buddy data only if the order of the request 2834 * is greater than equal to the sbi_s_mb_order2_reqs 2835 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2836 * We also support searching for power-of-two requests only for 2837 * requests upto maximum buddy size we have constructed. 2838 */ 2839 if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) { 2840 if (is_power_of_2(ac->ac_g_ex.fe_len)) 2841 ac->ac_2order = array_index_nospec(i - 1, 2842 MB_NUM_ORDERS(sb)); 2843 } 2844 2845 /* if stream allocation is enabled, use global goal */ 2846 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2847 /* TBD: may be hot point */ 2848 spin_lock(&sbi->s_md_lock); 2849 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2850 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2851 spin_unlock(&sbi->s_md_lock); 2852 } 2853 2854 /* 2855 * Let's just scan groups to find more-less suitable blocks We 2856 * start with CR_GOAL_LEN_FAST, unless it is power of 2 2857 * aligned, in which case let's do that faster approach first. 2858 */ 2859 if (ac->ac_2order) 2860 cr = CR_POWER2_ALIGNED; 2861 repeat: 2862 for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2863 ac->ac_criteria = cr; 2864 /* 2865 * searching for the right group start 2866 * from the goal value specified 2867 */ 2868 group = ac->ac_g_ex.fe_group; 2869 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; 2870 prefetch_grp = group; 2871 nr = 0; 2872 2873 for (i = 0, new_cr = cr; i < ngroups; i++, 2874 ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) { 2875 int ret = 0; 2876 2877 cond_resched(); 2878 if (new_cr != cr) { 2879 cr = new_cr; 2880 goto repeat; 2881 } 2882 2883 /* 2884 * Batch reads of the block allocation bitmaps 2885 * to get multiple READs in flight; limit 2886 * prefetching at inexpensive CR, otherwise mballoc 2887 * can spend a lot of time loading imperfect groups 2888 */ 2889 if ((prefetch_grp == group) && 2890 (ext4_mb_cr_expensive(cr) || 2891 prefetch_ios < sbi->s_mb_prefetch_limit)) { 2892 nr = sbi->s_mb_prefetch; 2893 if (ext4_has_feature_flex_bg(sb)) { 2894 nr = 1 << sbi->s_log_groups_per_flex; 2895 nr -= group & (nr - 1); 2896 nr = min(nr, sbi->s_mb_prefetch); 2897 } 2898 prefetch_grp = ext4_mb_prefetch(sb, group, 2899 nr, &prefetch_ios); 2900 } 2901 2902 /* This now checks without needing the buddy page */ 2903 ret = ext4_mb_good_group_nolock(ac, group, cr); 2904 if (ret <= 0) { 2905 if (!first_err) 2906 first_err = ret; 2907 continue; 2908 } 2909 2910 err = ext4_mb_load_buddy(sb, group, &e4b); 2911 if (err) 2912 goto out; 2913 2914 ext4_lock_group(sb, group); 2915 2916 /* 2917 * We need to check again after locking the 2918 * block group 2919 */ 2920 ret = ext4_mb_good_group(ac, group, cr); 2921 if (ret == 0) { 2922 ext4_unlock_group(sb, group); 2923 ext4_mb_unload_buddy(&e4b); 2924 continue; 2925 } 2926 2927 ac->ac_groups_scanned++; 2928 if (cr == CR_POWER2_ALIGNED) 2929 ext4_mb_simple_scan_group(ac, &e4b); 2930 else { 2931 bool is_stripe_aligned = sbi->s_stripe && 2932 !(ac->ac_g_ex.fe_len % 2933 EXT4_B2C(sbi, sbi->s_stripe)); 2934 2935 if ((cr == CR_GOAL_LEN_FAST || 2936 cr == CR_BEST_AVAIL_LEN) && 2937 is_stripe_aligned) 2938 ext4_mb_scan_aligned(ac, &e4b); 2939 2940 if (ac->ac_status == AC_STATUS_CONTINUE) 2941 ext4_mb_complex_scan_group(ac, &e4b); 2942 } 2943 2944 ext4_unlock_group(sb, group); 2945 ext4_mb_unload_buddy(&e4b); 2946 2947 if (ac->ac_status != AC_STATUS_CONTINUE) 2948 break; 2949 } 2950 /* Processed all groups and haven't found blocks */ 2951 if (sbi->s_mb_stats && i == ngroups) 2952 atomic64_inc(&sbi->s_bal_cX_failed[cr]); 2953 2954 if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN) 2955 /* Reset goal length to original goal length before 2956 * falling into CR_GOAL_LEN_SLOW */ 2957 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; 2958 } 2959 2960 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2961 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2962 /* 2963 * We've been searching too long. Let's try to allocate 2964 * the best chunk we've found so far 2965 */ 2966 ext4_mb_try_best_found(ac, &e4b); 2967 if (ac->ac_status != AC_STATUS_FOUND) { 2968 /* 2969 * Someone more lucky has already allocated it. 2970 * The only thing we can do is just take first 2971 * found block(s) 2972 */ 2973 lost = atomic_inc_return(&sbi->s_mb_lost_chunks); 2974 mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", 2975 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, 2976 ac->ac_b_ex.fe_len, lost); 2977 2978 ac->ac_b_ex.fe_group = 0; 2979 ac->ac_b_ex.fe_start = 0; 2980 ac->ac_b_ex.fe_len = 0; 2981 ac->ac_status = AC_STATUS_CONTINUE; 2982 ac->ac_flags |= EXT4_MB_HINT_FIRST; 2983 cr = CR_ANY_FREE; 2984 goto repeat; 2985 } 2986 } 2987 2988 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) 2989 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); 2990 out: 2991 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) 2992 err = first_err; 2993 2994 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", 2995 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, 2996 ac->ac_flags, cr, err); 2997 2998 if (nr) 2999 ext4_mb_prefetch_fini(sb, prefetch_grp, nr); 3000 3001 return err; 3002 } 3003 3004 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 3005 { 3006 struct super_block *sb = pde_data(file_inode(seq->file)); 3007 ext4_group_t group; 3008 3009 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 3010 return NULL; 3011 group = *pos + 1; 3012 return (void *) ((unsigned long) group); 3013 } 3014 3015 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 3016 { 3017 struct super_block *sb = pde_data(file_inode(seq->file)); 3018 ext4_group_t group; 3019 3020 ++*pos; 3021 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 3022 return NULL; 3023 group = *pos + 1; 3024 return (void *) ((unsigned long) group); 3025 } 3026 3027 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 3028 { 3029 struct super_block *sb = pde_data(file_inode(seq->file)); 3030 ext4_group_t group = (ext4_group_t) ((unsigned long) v); 3031 int i, err; 3032 char nbuf[16]; 3033 struct ext4_buddy e4b; 3034 struct ext4_group_info *grinfo; 3035 unsigned char blocksize_bits = min_t(unsigned char, 3036 sb->s_blocksize_bits, 3037 EXT4_MAX_BLOCK_LOG_SIZE); 3038 struct sg { 3039 struct ext4_group_info info; 3040 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; 3041 } sg; 3042 3043 group--; 3044 if (group == 0) 3045 seq_puts(seq, "#group: free frags first [" 3046 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 3047 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); 3048 3049 i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 3050 sizeof(struct ext4_group_info); 3051 3052 grinfo = ext4_get_group_info(sb, group); 3053 if (!grinfo) 3054 return 0; 3055 /* Load the group info in memory only if not already loaded. */ 3056 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 3057 err = ext4_mb_load_buddy(sb, group, &e4b); 3058 if (err) { 3059 seq_printf(seq, "#%-5u: %s\n", group, ext4_decode_error(NULL, err, nbuf)); 3060 return 0; 3061 } 3062 ext4_mb_unload_buddy(&e4b); 3063 } 3064 3065 /* 3066 * We care only about free space counters in the group info and 3067 * these are safe to access even after the buddy has been unloaded 3068 */ 3069 memcpy(&sg, grinfo, i); 3070 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 3071 sg.info.bb_fragments, sg.info.bb_first_free); 3072 for (i = 0; i <= 13; i++) 3073 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? 3074 sg.info.bb_counters[i] : 0); 3075 seq_puts(seq, " ]"); 3076 if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info)) 3077 seq_puts(seq, " Block bitmap corrupted!"); 3078 seq_puts(seq, "\n"); 3079 3080 return 0; 3081 } 3082 3083 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 3084 { 3085 } 3086 3087 const struct seq_operations ext4_mb_seq_groups_ops = { 3088 .start = ext4_mb_seq_groups_start, 3089 .next = ext4_mb_seq_groups_next, 3090 .stop = ext4_mb_seq_groups_stop, 3091 .show = ext4_mb_seq_groups_show, 3092 }; 3093 3094 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset) 3095 { 3096 struct super_block *sb = seq->private; 3097 struct ext4_sb_info *sbi = EXT4_SB(sb); 3098 3099 seq_puts(seq, "mballoc:\n"); 3100 if (!sbi->s_mb_stats) { 3101 seq_puts(seq, "\tmb stats collection turned off.\n"); 3102 seq_puts( 3103 seq, 3104 "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); 3105 return 0; 3106 } 3107 seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); 3108 seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); 3109 3110 seq_printf(seq, "\tgroups_scanned: %u\n", 3111 atomic_read(&sbi->s_bal_groups_scanned)); 3112 3113 /* CR_POWER2_ALIGNED stats */ 3114 seq_puts(seq, "\tcr_p2_aligned_stats:\n"); 3115 seq_printf(seq, "\t\thits: %llu\n", 3116 atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED])); 3117 seq_printf( 3118 seq, "\t\tgroups_considered: %llu\n", 3119 atomic64_read( 3120 &sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED])); 3121 seq_printf(seq, "\t\textents_scanned: %u\n", 3122 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED])); 3123 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3124 atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED])); 3125 seq_printf(seq, "\t\tbad_suggestions: %u\n", 3126 atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions)); 3127 3128 /* CR_GOAL_LEN_FAST stats */ 3129 seq_puts(seq, "\tcr_goal_fast_stats:\n"); 3130 seq_printf(seq, "\t\thits: %llu\n", 3131 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST])); 3132 seq_printf(seq, "\t\tgroups_considered: %llu\n", 3133 atomic64_read( 3134 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST])); 3135 seq_printf(seq, "\t\textents_scanned: %u\n", 3136 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST])); 3137 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3138 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST])); 3139 seq_printf(seq, "\t\tbad_suggestions: %u\n", 3140 atomic_read(&sbi->s_bal_goal_fast_bad_suggestions)); 3141 3142 /* CR_BEST_AVAIL_LEN stats */ 3143 seq_puts(seq, "\tcr_best_avail_stats:\n"); 3144 seq_printf(seq, "\t\thits: %llu\n", 3145 atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN])); 3146 seq_printf( 3147 seq, "\t\tgroups_considered: %llu\n", 3148 atomic64_read( 3149 &sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN])); 3150 seq_printf(seq, "\t\textents_scanned: %u\n", 3151 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN])); 3152 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3153 atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN])); 3154 seq_printf(seq, "\t\tbad_suggestions: %u\n", 3155 atomic_read(&sbi->s_bal_best_avail_bad_suggestions)); 3156 3157 /* CR_GOAL_LEN_SLOW stats */ 3158 seq_puts(seq, "\tcr_goal_slow_stats:\n"); 3159 seq_printf(seq, "\t\thits: %llu\n", 3160 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW])); 3161 seq_printf(seq, "\t\tgroups_considered: %llu\n", 3162 atomic64_read( 3163 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW])); 3164 seq_printf(seq, "\t\textents_scanned: %u\n", 3165 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW])); 3166 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3167 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW])); 3168 3169 /* CR_ANY_FREE stats */ 3170 seq_puts(seq, "\tcr_any_free_stats:\n"); 3171 seq_printf(seq, "\t\thits: %llu\n", 3172 atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE])); 3173 seq_printf( 3174 seq, "\t\tgroups_considered: %llu\n", 3175 atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE])); 3176 seq_printf(seq, "\t\textents_scanned: %u\n", 3177 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE])); 3178 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3179 atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE])); 3180 3181 /* Aggregates */ 3182 seq_printf(seq, "\textents_scanned: %u\n", 3183 atomic_read(&sbi->s_bal_ex_scanned)); 3184 seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); 3185 seq_printf(seq, "\t\tlen_goal_hits: %u\n", 3186 atomic_read(&sbi->s_bal_len_goals)); 3187 seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); 3188 seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); 3189 seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); 3190 seq_printf(seq, "\tbuddies_generated: %u/%u\n", 3191 atomic_read(&sbi->s_mb_buddies_generated), 3192 ext4_get_groups_count(sb)); 3193 seq_printf(seq, "\tbuddies_time_used: %llu\n", 3194 atomic64_read(&sbi->s_mb_generation_time)); 3195 seq_printf(seq, "\tpreallocated: %u\n", 3196 atomic_read(&sbi->s_mb_preallocated)); 3197 seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded)); 3198 return 0; 3199 } 3200 3201 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos) 3202 { 3203 struct super_block *sb = pde_data(file_inode(seq->file)); 3204 unsigned long position; 3205 3206 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 3207 return NULL; 3208 position = *pos + 1; 3209 return (void *) ((unsigned long) position); 3210 } 3211 3212 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos) 3213 { 3214 struct super_block *sb = pde_data(file_inode(seq->file)); 3215 unsigned long position; 3216 3217 ++*pos; 3218 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 3219 return NULL; 3220 position = *pos + 1; 3221 return (void *) ((unsigned long) position); 3222 } 3223 3224 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) 3225 { 3226 struct super_block *sb = pde_data(file_inode(seq->file)); 3227 struct ext4_sb_info *sbi = EXT4_SB(sb); 3228 unsigned long position = ((unsigned long) v); 3229 struct ext4_group_info *grp; 3230 unsigned int count; 3231 3232 position--; 3233 if (position >= MB_NUM_ORDERS(sb)) { 3234 position -= MB_NUM_ORDERS(sb); 3235 if (position == 0) 3236 seq_puts(seq, "avg_fragment_size_lists:\n"); 3237 3238 count = 0; 3239 read_lock(&sbi->s_mb_avg_fragment_size_locks[position]); 3240 list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position], 3241 bb_avg_fragment_size_node) 3242 count++; 3243 read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]); 3244 seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3245 (unsigned int)position, count); 3246 return 0; 3247 } 3248 3249 if (position == 0) { 3250 seq_printf(seq, "optimize_scan: %d\n", 3251 test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0); 3252 seq_puts(seq, "max_free_order_lists:\n"); 3253 } 3254 count = 0; 3255 read_lock(&sbi->s_mb_largest_free_orders_locks[position]); 3256 list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position], 3257 bb_largest_free_order_node) 3258 count++; 3259 read_unlock(&sbi->s_mb_largest_free_orders_locks[position]); 3260 seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3261 (unsigned int)position, count); 3262 3263 return 0; 3264 } 3265 3266 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v) 3267 { 3268 } 3269 3270 const struct seq_operations ext4_mb_seq_structs_summary_ops = { 3271 .start = ext4_mb_seq_structs_summary_start, 3272 .next = ext4_mb_seq_structs_summary_next, 3273 .stop = ext4_mb_seq_structs_summary_stop, 3274 .show = ext4_mb_seq_structs_summary_show, 3275 }; 3276 3277 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 3278 { 3279 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3280 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 3281 3282 BUG_ON(!cachep); 3283 return cachep; 3284 } 3285 3286 /* 3287 * Allocate the top-level s_group_info array for the specified number 3288 * of groups 3289 */ 3290 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 3291 { 3292 struct ext4_sb_info *sbi = EXT4_SB(sb); 3293 unsigned size; 3294 struct ext4_group_info ***old_groupinfo, ***new_groupinfo; 3295 3296 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 3297 EXT4_DESC_PER_BLOCK_BITS(sb); 3298 if (size <= sbi->s_group_info_size) 3299 return 0; 3300 3301 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 3302 new_groupinfo = kvzalloc(size, GFP_KERNEL); 3303 if (!new_groupinfo) { 3304 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 3305 return -ENOMEM; 3306 } 3307 rcu_read_lock(); 3308 old_groupinfo = rcu_dereference(sbi->s_group_info); 3309 if (old_groupinfo) 3310 memcpy(new_groupinfo, old_groupinfo, 3311 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 3312 rcu_read_unlock(); 3313 rcu_assign_pointer(sbi->s_group_info, new_groupinfo); 3314 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 3315 if (old_groupinfo) 3316 ext4_kvfree_array_rcu(old_groupinfo); 3317 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 3318 sbi->s_group_info_size); 3319 return 0; 3320 } 3321 3322 /* Create and initialize ext4_group_info data for the given group. */ 3323 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 3324 struct ext4_group_desc *desc) 3325 { 3326 int i; 3327 int metalen = 0; 3328 int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); 3329 struct ext4_sb_info *sbi = EXT4_SB(sb); 3330 struct ext4_group_info **meta_group_info; 3331 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3332 3333 /* 3334 * First check if this group is the first of a reserved block. 3335 * If it's true, we have to allocate a new table of pointers 3336 * to ext4_group_info structures 3337 */ 3338 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3339 metalen = sizeof(*meta_group_info) << 3340 EXT4_DESC_PER_BLOCK_BITS(sb); 3341 meta_group_info = kmalloc(metalen, GFP_NOFS); 3342 if (meta_group_info == NULL) { 3343 ext4_msg(sb, KERN_ERR, "can't allocate mem " 3344 "for a buddy group"); 3345 return -ENOMEM; 3346 } 3347 rcu_read_lock(); 3348 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; 3349 rcu_read_unlock(); 3350 } 3351 3352 meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); 3353 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 3354 3355 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 3356 if (meta_group_info[i] == NULL) { 3357 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 3358 goto exit_group_info; 3359 } 3360 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 3361 &(meta_group_info[i]->bb_state)); 3362 3363 /* 3364 * initialize bb_free to be able to skip 3365 * empty groups without initialization 3366 */ 3367 if (ext4_has_group_desc_csum(sb) && 3368 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3369 meta_group_info[i]->bb_free = 3370 ext4_free_clusters_after_init(sb, group, desc); 3371 } else { 3372 meta_group_info[i]->bb_free = 3373 ext4_free_group_clusters(sb, desc); 3374 } 3375 3376 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 3377 init_rwsem(&meta_group_info[i]->alloc_sem); 3378 meta_group_info[i]->bb_free_root = RB_ROOT; 3379 INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node); 3380 INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node); 3381 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 3382 meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ 3383 meta_group_info[i]->bb_group = group; 3384 3385 mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); 3386 return 0; 3387 3388 exit_group_info: 3389 /* If a meta_group_info table has been allocated, release it now */ 3390 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3391 struct ext4_group_info ***group_info; 3392 3393 rcu_read_lock(); 3394 group_info = rcu_dereference(sbi->s_group_info); 3395 kfree(group_info[idx]); 3396 group_info[idx] = NULL; 3397 rcu_read_unlock(); 3398 } 3399 return -ENOMEM; 3400 } /* ext4_mb_add_groupinfo */ 3401 3402 static int ext4_mb_init_backend(struct super_block *sb) 3403 { 3404 ext4_group_t ngroups = ext4_get_groups_count(sb); 3405 ext4_group_t i; 3406 struct ext4_sb_info *sbi = EXT4_SB(sb); 3407 int err; 3408 struct ext4_group_desc *desc; 3409 struct ext4_group_info ***group_info; 3410 struct kmem_cache *cachep; 3411 3412 err = ext4_mb_alloc_groupinfo(sb, ngroups); 3413 if (err) 3414 return err; 3415 3416 sbi->s_buddy_cache = new_inode(sb); 3417 if (sbi->s_buddy_cache == NULL) { 3418 ext4_msg(sb, KERN_ERR, "can't get new inode"); 3419 goto err_freesgi; 3420 } 3421 /* To avoid potentially colliding with an valid on-disk inode number, 3422 * use EXT4_BAD_INO for the buddy cache inode number. This inode is 3423 * not in the inode hash, so it should never be found by iget(), but 3424 * this will avoid confusion if it ever shows up during debugging. */ 3425 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 3426 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 3427 for (i = 0; i < ngroups; i++) { 3428 cond_resched(); 3429 desc = ext4_get_group_desc(sb, i, NULL); 3430 if (desc == NULL) { 3431 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 3432 goto err_freebuddy; 3433 } 3434 if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 3435 goto err_freebuddy; 3436 } 3437 3438 if (ext4_has_feature_flex_bg(sb)) { 3439 /* a single flex group is supposed to be read by a single IO. 3440 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is 3441 * unsigned integer, so the maximum shift is 32. 3442 */ 3443 if (sbi->s_es->s_log_groups_per_flex >= 32) { 3444 ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); 3445 goto err_freebuddy; 3446 } 3447 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, 3448 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); 3449 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ 3450 } else { 3451 sbi->s_mb_prefetch = 32; 3452 } 3453 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) 3454 sbi->s_mb_prefetch = ext4_get_groups_count(sb); 3455 /* 3456 * now many real IOs to prefetch within a single allocation at 3457 * CR_POWER2_ALIGNED. Given CR_POWER2_ALIGNED is an CPU-related 3458 * optimization we shouldn't try to load too many groups, at some point 3459 * we should start to use what we've got in memory. 3460 * with an average random access time 5ms, it'd take a second to get 3461 * 200 groups (* N with flex_bg), so let's make this limit 4 3462 */ 3463 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; 3464 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) 3465 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); 3466 3467 return 0; 3468 3469 err_freebuddy: 3470 cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3471 while (i-- > 0) { 3472 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 3473 3474 if (grp) 3475 kmem_cache_free(cachep, grp); 3476 } 3477 i = sbi->s_group_info_size; 3478 rcu_read_lock(); 3479 group_info = rcu_dereference(sbi->s_group_info); 3480 while (i-- > 0) 3481 kfree(group_info[i]); 3482 rcu_read_unlock(); 3483 iput(sbi->s_buddy_cache); 3484 err_freesgi: 3485 rcu_read_lock(); 3486 kvfree(rcu_dereference(sbi->s_group_info)); 3487 rcu_read_unlock(); 3488 return -ENOMEM; 3489 } 3490 3491 static void ext4_groupinfo_destroy_slabs(void) 3492 { 3493 int i; 3494 3495 for (i = 0; i < NR_GRPINFO_CACHES; i++) { 3496 kmem_cache_destroy(ext4_groupinfo_caches[i]); 3497 ext4_groupinfo_caches[i] = NULL; 3498 } 3499 } 3500 3501 static int ext4_groupinfo_create_slab(size_t size) 3502 { 3503 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 3504 int slab_size; 3505 int blocksize_bits = order_base_2(size); 3506 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3507 struct kmem_cache *cachep; 3508 3509 if (cache_index >= NR_GRPINFO_CACHES) 3510 return -EINVAL; 3511 3512 if (unlikely(cache_index < 0)) 3513 cache_index = 0; 3514 3515 mutex_lock(&ext4_grpinfo_slab_create_mutex); 3516 if (ext4_groupinfo_caches[cache_index]) { 3517 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 3518 return 0; /* Already created */ 3519 } 3520 3521 slab_size = offsetof(struct ext4_group_info, 3522 bb_counters[blocksize_bits + 2]); 3523 3524 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 3525 slab_size, 0, SLAB_RECLAIM_ACCOUNT, 3526 NULL); 3527 3528 ext4_groupinfo_caches[cache_index] = cachep; 3529 3530 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 3531 if (!cachep) { 3532 printk(KERN_EMERG 3533 "EXT4-fs: no memory for groupinfo slab cache\n"); 3534 return -ENOMEM; 3535 } 3536 3537 return 0; 3538 } 3539 3540 static void ext4_discard_work(struct work_struct *work) 3541 { 3542 struct ext4_sb_info *sbi = container_of(work, 3543 struct ext4_sb_info, s_discard_work); 3544 struct super_block *sb = sbi->s_sb; 3545 struct ext4_free_data *fd, *nfd; 3546 struct ext4_buddy e4b; 3547 LIST_HEAD(discard_list); 3548 ext4_group_t grp, load_grp; 3549 int err = 0; 3550 3551 spin_lock(&sbi->s_md_lock); 3552 list_splice_init(&sbi->s_discard_list, &discard_list); 3553 spin_unlock(&sbi->s_md_lock); 3554 3555 load_grp = UINT_MAX; 3556 list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) { 3557 /* 3558 * If filesystem is umounting or no memory or suffering 3559 * from no space, give up the discard 3560 */ 3561 if ((sb->s_flags & SB_ACTIVE) && !err && 3562 !atomic_read(&sbi->s_retry_alloc_pending)) { 3563 grp = fd->efd_group; 3564 if (grp != load_grp) { 3565 if (load_grp != UINT_MAX) 3566 ext4_mb_unload_buddy(&e4b); 3567 3568 err = ext4_mb_load_buddy(sb, grp, &e4b); 3569 if (err) { 3570 kmem_cache_free(ext4_free_data_cachep, fd); 3571 load_grp = UINT_MAX; 3572 continue; 3573 } else { 3574 load_grp = grp; 3575 } 3576 } 3577 3578 ext4_lock_group(sb, grp); 3579 ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster, 3580 fd->efd_start_cluster + fd->efd_count - 1, 1); 3581 ext4_unlock_group(sb, grp); 3582 } 3583 kmem_cache_free(ext4_free_data_cachep, fd); 3584 } 3585 3586 if (load_grp != UINT_MAX) 3587 ext4_mb_unload_buddy(&e4b); 3588 } 3589 3590 int ext4_mb_init(struct super_block *sb) 3591 { 3592 struct ext4_sb_info *sbi = EXT4_SB(sb); 3593 unsigned i, j; 3594 unsigned offset, offset_incr; 3595 unsigned max; 3596 int ret; 3597 3598 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets); 3599 3600 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 3601 if (sbi->s_mb_offsets == NULL) { 3602 ret = -ENOMEM; 3603 goto out; 3604 } 3605 3606 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs); 3607 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 3608 if (sbi->s_mb_maxs == NULL) { 3609 ret = -ENOMEM; 3610 goto out; 3611 } 3612 3613 ret = ext4_groupinfo_create_slab(sb->s_blocksize); 3614 if (ret < 0) 3615 goto out; 3616 3617 /* order 0 is regular bitmap */ 3618 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 3619 sbi->s_mb_offsets[0] = 0; 3620 3621 i = 1; 3622 offset = 0; 3623 offset_incr = 1 << (sb->s_blocksize_bits - 1); 3624 max = sb->s_blocksize << 2; 3625 do { 3626 sbi->s_mb_offsets[i] = offset; 3627 sbi->s_mb_maxs[i] = max; 3628 offset += offset_incr; 3629 offset_incr = offset_incr >> 1; 3630 max = max >> 1; 3631 i++; 3632 } while (i < MB_NUM_ORDERS(sb)); 3633 3634 sbi->s_mb_avg_fragment_size = 3635 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 3636 GFP_KERNEL); 3637 if (!sbi->s_mb_avg_fragment_size) { 3638 ret = -ENOMEM; 3639 goto out; 3640 } 3641 sbi->s_mb_avg_fragment_size_locks = 3642 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 3643 GFP_KERNEL); 3644 if (!sbi->s_mb_avg_fragment_size_locks) { 3645 ret = -ENOMEM; 3646 goto out; 3647 } 3648 for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 3649 INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]); 3650 rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]); 3651 } 3652 sbi->s_mb_largest_free_orders = 3653 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 3654 GFP_KERNEL); 3655 if (!sbi->s_mb_largest_free_orders) { 3656 ret = -ENOMEM; 3657 goto out; 3658 } 3659 sbi->s_mb_largest_free_orders_locks = 3660 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 3661 GFP_KERNEL); 3662 if (!sbi->s_mb_largest_free_orders_locks) { 3663 ret = -ENOMEM; 3664 goto out; 3665 } 3666 for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 3667 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]); 3668 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]); 3669 } 3670 3671 spin_lock_init(&sbi->s_md_lock); 3672 sbi->s_mb_free_pending = 0; 3673 INIT_LIST_HEAD(&sbi->s_freed_data_list[0]); 3674 INIT_LIST_HEAD(&sbi->s_freed_data_list[1]); 3675 INIT_LIST_HEAD(&sbi->s_discard_list); 3676 INIT_WORK(&sbi->s_discard_work, ext4_discard_work); 3677 atomic_set(&sbi->s_retry_alloc_pending, 0); 3678 3679 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 3680 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 3681 sbi->s_mb_stats = MB_DEFAULT_STATS; 3682 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 3683 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 3684 sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER; 3685 3686 /* 3687 * The default group preallocation is 512, which for 4k block 3688 * sizes translates to 2 megabytes. However for bigalloc file 3689 * systems, this is probably too big (i.e, if the cluster size 3690 * is 1 megabyte, then group preallocation size becomes half a 3691 * gigabyte!). As a default, we will keep a two megabyte 3692 * group pralloc size for cluster sizes up to 64k, and after 3693 * that, we will force a minimum group preallocation size of 3694 * 32 clusters. This translates to 8 megs when the cluster 3695 * size is 256k, and 32 megs when the cluster size is 1 meg, 3696 * which seems reasonable as a default. 3697 */ 3698 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 3699 sbi->s_cluster_bits, 32); 3700 /* 3701 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 3702 * to the lowest multiple of s_stripe which is bigger than 3703 * the s_mb_group_prealloc as determined above. We want 3704 * the preallocation size to be an exact multiple of the 3705 * RAID stripe size so that preallocations don't fragment 3706 * the stripes. 3707 */ 3708 if (sbi->s_stripe > 1) { 3709 sbi->s_mb_group_prealloc = roundup( 3710 sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe)); 3711 } 3712 3713 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 3714 if (sbi->s_locality_groups == NULL) { 3715 ret = -ENOMEM; 3716 goto out; 3717 } 3718 for_each_possible_cpu(i) { 3719 struct ext4_locality_group *lg; 3720 lg = per_cpu_ptr(sbi->s_locality_groups, i); 3721 mutex_init(&lg->lg_mutex); 3722 for (j = 0; j < PREALLOC_TB_SIZE; j++) 3723 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 3724 spin_lock_init(&lg->lg_prealloc_lock); 3725 } 3726 3727 if (bdev_nonrot(sb->s_bdev)) 3728 sbi->s_mb_max_linear_groups = 0; 3729 else 3730 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; 3731 /* init file for buddy data */ 3732 ret = ext4_mb_init_backend(sb); 3733 if (ret != 0) 3734 goto out_free_locality_groups; 3735 3736 return 0; 3737 3738 out_free_locality_groups: 3739 free_percpu(sbi->s_locality_groups); 3740 sbi->s_locality_groups = NULL; 3741 out: 3742 kfree(sbi->s_mb_avg_fragment_size); 3743 kfree(sbi->s_mb_avg_fragment_size_locks); 3744 kfree(sbi->s_mb_largest_free_orders); 3745 kfree(sbi->s_mb_largest_free_orders_locks); 3746 kfree(sbi->s_mb_offsets); 3747 sbi->s_mb_offsets = NULL; 3748 kfree(sbi->s_mb_maxs); 3749 sbi->s_mb_maxs = NULL; 3750 return ret; 3751 } 3752 3753 /* need to called with the ext4 group lock held */ 3754 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp) 3755 { 3756 struct ext4_prealloc_space *pa; 3757 struct list_head *cur, *tmp; 3758 int count = 0; 3759 3760 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 3761 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3762 list_del(&pa->pa_group_list); 3763 count++; 3764 kmem_cache_free(ext4_pspace_cachep, pa); 3765 } 3766 return count; 3767 } 3768 3769 void ext4_mb_release(struct super_block *sb) 3770 { 3771 ext4_group_t ngroups = ext4_get_groups_count(sb); 3772 ext4_group_t i; 3773 int num_meta_group_infos; 3774 struct ext4_group_info *grinfo, ***group_info; 3775 struct ext4_sb_info *sbi = EXT4_SB(sb); 3776 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3777 int count; 3778 3779 if (test_opt(sb, DISCARD)) { 3780 /* 3781 * wait the discard work to drain all of ext4_free_data 3782 */ 3783 flush_work(&sbi->s_discard_work); 3784 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list)); 3785 } 3786 3787 if (sbi->s_group_info) { 3788 for (i = 0; i < ngroups; i++) { 3789 cond_resched(); 3790 grinfo = ext4_get_group_info(sb, i); 3791 if (!grinfo) 3792 continue; 3793 mb_group_bb_bitmap_free(grinfo); 3794 ext4_lock_group(sb, i); 3795 count = ext4_mb_cleanup_pa(grinfo); 3796 if (count) 3797 mb_debug(sb, "mballoc: %d PAs left\n", 3798 count); 3799 ext4_unlock_group(sb, i); 3800 kmem_cache_free(cachep, grinfo); 3801 } 3802 num_meta_group_infos = (ngroups + 3803 EXT4_DESC_PER_BLOCK(sb) - 1) >> 3804 EXT4_DESC_PER_BLOCK_BITS(sb); 3805 rcu_read_lock(); 3806 group_info = rcu_dereference(sbi->s_group_info); 3807 for (i = 0; i < num_meta_group_infos; i++) 3808 kfree(group_info[i]); 3809 kvfree(group_info); 3810 rcu_read_unlock(); 3811 } 3812 kfree(sbi->s_mb_avg_fragment_size); 3813 kfree(sbi->s_mb_avg_fragment_size_locks); 3814 kfree(sbi->s_mb_largest_free_orders); 3815 kfree(sbi->s_mb_largest_free_orders_locks); 3816 kfree(sbi->s_mb_offsets); 3817 kfree(sbi->s_mb_maxs); 3818 iput(sbi->s_buddy_cache); 3819 if (sbi->s_mb_stats) { 3820 ext4_msg(sb, KERN_INFO, 3821 "mballoc: %u blocks %u reqs (%u success)", 3822 atomic_read(&sbi->s_bal_allocated), 3823 atomic_read(&sbi->s_bal_reqs), 3824 atomic_read(&sbi->s_bal_success)); 3825 ext4_msg(sb, KERN_INFO, 3826 "mballoc: %u extents scanned, %u groups scanned, %u goal hits, " 3827 "%u 2^N hits, %u breaks, %u lost", 3828 atomic_read(&sbi->s_bal_ex_scanned), 3829 atomic_read(&sbi->s_bal_groups_scanned), 3830 atomic_read(&sbi->s_bal_goals), 3831 atomic_read(&sbi->s_bal_2orders), 3832 atomic_read(&sbi->s_bal_breaks), 3833 atomic_read(&sbi->s_mb_lost_chunks)); 3834 ext4_msg(sb, KERN_INFO, 3835 "mballoc: %u generated and it took %llu", 3836 atomic_read(&sbi->s_mb_buddies_generated), 3837 atomic64_read(&sbi->s_mb_generation_time)); 3838 ext4_msg(sb, KERN_INFO, 3839 "mballoc: %u preallocated, %u discarded", 3840 atomic_read(&sbi->s_mb_preallocated), 3841 atomic_read(&sbi->s_mb_discarded)); 3842 } 3843 3844 free_percpu(sbi->s_locality_groups); 3845 } 3846 3847 static inline int ext4_issue_discard(struct super_block *sb, 3848 ext4_group_t block_group, ext4_grpblk_t cluster, int count) 3849 { 3850 ext4_fsblk_t discard_block; 3851 3852 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 3853 ext4_group_first_block_no(sb, block_group)); 3854 count = EXT4_C2B(EXT4_SB(sb), count); 3855 trace_ext4_discard_blocks(sb, 3856 (unsigned long long) discard_block, count); 3857 3858 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 3859 } 3860 3861 static void ext4_free_data_in_buddy(struct super_block *sb, 3862 struct ext4_free_data *entry) 3863 { 3864 struct ext4_buddy e4b; 3865 struct ext4_group_info *db; 3866 int err, count = 0; 3867 3868 mb_debug(sb, "gonna free %u blocks in group %u (0x%p):", 3869 entry->efd_count, entry->efd_group, entry); 3870 3871 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 3872 /* we expect to find existing buddy because it's pinned */ 3873 BUG_ON(err != 0); 3874 3875 spin_lock(&EXT4_SB(sb)->s_md_lock); 3876 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; 3877 spin_unlock(&EXT4_SB(sb)->s_md_lock); 3878 3879 db = e4b.bd_info; 3880 /* there are blocks to put in buddy to make them really free */ 3881 count += entry->efd_count; 3882 ext4_lock_group(sb, entry->efd_group); 3883 /* Take it out of per group rb tree */ 3884 rb_erase(&entry->efd_node, &(db->bb_free_root)); 3885 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 3886 3887 /* 3888 * Clear the trimmed flag for the group so that the next 3889 * ext4_trim_fs can trim it. 3890 */ 3891 EXT4_MB_GRP_CLEAR_TRIMMED(db); 3892 3893 if (!db->bb_free_root.rb_node) { 3894 /* No more items in the per group rb tree 3895 * balance refcounts from ext4_mb_free_metadata() 3896 */ 3897 folio_put(e4b.bd_buddy_folio); 3898 folio_put(e4b.bd_bitmap_folio); 3899 } 3900 ext4_unlock_group(sb, entry->efd_group); 3901 ext4_mb_unload_buddy(&e4b); 3902 3903 mb_debug(sb, "freed %d blocks in 1 structures\n", count); 3904 } 3905 3906 /* 3907 * This function is called by the jbd2 layer once the commit has finished, 3908 * so we know we can free the blocks that were released with that commit. 3909 */ 3910 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid) 3911 { 3912 struct ext4_sb_info *sbi = EXT4_SB(sb); 3913 struct ext4_free_data *entry, *tmp; 3914 LIST_HEAD(freed_data_list); 3915 struct list_head *s_freed_head = &sbi->s_freed_data_list[commit_tid & 1]; 3916 bool wake; 3917 3918 list_replace_init(s_freed_head, &freed_data_list); 3919 3920 list_for_each_entry(entry, &freed_data_list, efd_list) 3921 ext4_free_data_in_buddy(sb, entry); 3922 3923 if (test_opt(sb, DISCARD)) { 3924 spin_lock(&sbi->s_md_lock); 3925 wake = list_empty(&sbi->s_discard_list); 3926 list_splice_tail(&freed_data_list, &sbi->s_discard_list); 3927 spin_unlock(&sbi->s_md_lock); 3928 if (wake) 3929 queue_work(system_unbound_wq, &sbi->s_discard_work); 3930 } else { 3931 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) 3932 kmem_cache_free(ext4_free_data_cachep, entry); 3933 } 3934 } 3935 3936 int __init ext4_init_mballoc(void) 3937 { 3938 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 3939 SLAB_RECLAIM_ACCOUNT); 3940 if (ext4_pspace_cachep == NULL) 3941 goto out; 3942 3943 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 3944 SLAB_RECLAIM_ACCOUNT); 3945 if (ext4_ac_cachep == NULL) 3946 goto out_pa_free; 3947 3948 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 3949 SLAB_RECLAIM_ACCOUNT); 3950 if (ext4_free_data_cachep == NULL) 3951 goto out_ac_free; 3952 3953 return 0; 3954 3955 out_ac_free: 3956 kmem_cache_destroy(ext4_ac_cachep); 3957 out_pa_free: 3958 kmem_cache_destroy(ext4_pspace_cachep); 3959 out: 3960 return -ENOMEM; 3961 } 3962 3963 void ext4_exit_mballoc(void) 3964 { 3965 /* 3966 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 3967 * before destroying the slab cache. 3968 */ 3969 rcu_barrier(); 3970 kmem_cache_destroy(ext4_pspace_cachep); 3971 kmem_cache_destroy(ext4_ac_cachep); 3972 kmem_cache_destroy(ext4_free_data_cachep); 3973 ext4_groupinfo_destroy_slabs(); 3974 } 3975 3976 #define EXT4_MB_BITMAP_MARKED_CHECK 0x0001 3977 #define EXT4_MB_SYNC_UPDATE 0x0002 3978 static int 3979 ext4_mb_mark_context(handle_t *handle, struct super_block *sb, bool state, 3980 ext4_group_t group, ext4_grpblk_t blkoff, 3981 ext4_grpblk_t len, int flags, ext4_grpblk_t *ret_changed) 3982 { 3983 struct ext4_sb_info *sbi = EXT4_SB(sb); 3984 struct buffer_head *bitmap_bh = NULL; 3985 struct ext4_group_desc *gdp; 3986 struct buffer_head *gdp_bh; 3987 int err; 3988 unsigned int i, already, changed = len; 3989 3990 KUNIT_STATIC_STUB_REDIRECT(ext4_mb_mark_context, 3991 handle, sb, state, group, blkoff, len, 3992 flags, ret_changed); 3993 3994 if (ret_changed) 3995 *ret_changed = 0; 3996 bitmap_bh = ext4_read_block_bitmap(sb, group); 3997 if (IS_ERR(bitmap_bh)) 3998 return PTR_ERR(bitmap_bh); 3999 4000 if (handle) { 4001 BUFFER_TRACE(bitmap_bh, "getting write access"); 4002 err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 4003 EXT4_JTR_NONE); 4004 if (err) 4005 goto out_err; 4006 } 4007 4008 err = -EIO; 4009 gdp = ext4_get_group_desc(sb, group, &gdp_bh); 4010 if (!gdp) 4011 goto out_err; 4012 4013 if (handle) { 4014 BUFFER_TRACE(gdp_bh, "get_write_access"); 4015 err = ext4_journal_get_write_access(handle, sb, gdp_bh, 4016 EXT4_JTR_NONE); 4017 if (err) 4018 goto out_err; 4019 } 4020 4021 ext4_lock_group(sb, group); 4022 if (ext4_has_group_desc_csum(sb) && 4023 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 4024 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 4025 ext4_free_group_clusters_set(sb, gdp, 4026 ext4_free_clusters_after_init(sb, group, gdp)); 4027 } 4028 4029 if (flags & EXT4_MB_BITMAP_MARKED_CHECK) { 4030 already = 0; 4031 for (i = 0; i < len; i++) 4032 if (mb_test_bit(blkoff + i, bitmap_bh->b_data) == 4033 state) 4034 already++; 4035 changed = len - already; 4036 } 4037 4038 if (state) { 4039 mb_set_bits(bitmap_bh->b_data, blkoff, len); 4040 ext4_free_group_clusters_set(sb, gdp, 4041 ext4_free_group_clusters(sb, gdp) - changed); 4042 } else { 4043 mb_clear_bits(bitmap_bh->b_data, blkoff, len); 4044 ext4_free_group_clusters_set(sb, gdp, 4045 ext4_free_group_clusters(sb, gdp) + changed); 4046 } 4047 4048 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 4049 ext4_group_desc_csum_set(sb, group, gdp); 4050 ext4_unlock_group(sb, group); 4051 if (ret_changed) 4052 *ret_changed = changed; 4053 4054 if (sbi->s_log_groups_per_flex) { 4055 ext4_group_t flex_group = ext4_flex_group(sbi, group); 4056 struct flex_groups *fg = sbi_array_rcu_deref(sbi, 4057 s_flex_groups, flex_group); 4058 4059 if (state) 4060 atomic64_sub(changed, &fg->free_clusters); 4061 else 4062 atomic64_add(changed, &fg->free_clusters); 4063 } 4064 4065 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 4066 if (err) 4067 goto out_err; 4068 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 4069 if (err) 4070 goto out_err; 4071 4072 if (flags & EXT4_MB_SYNC_UPDATE) { 4073 sync_dirty_buffer(bitmap_bh); 4074 sync_dirty_buffer(gdp_bh); 4075 } 4076 4077 out_err: 4078 brelse(bitmap_bh); 4079 return err; 4080 } 4081 4082 /* 4083 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 4084 * Returns 0 if success or error code 4085 */ 4086 static noinline_for_stack int 4087 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 4088 handle_t *handle, unsigned int reserv_clstrs) 4089 { 4090 struct ext4_group_desc *gdp; 4091 struct ext4_sb_info *sbi; 4092 struct super_block *sb; 4093 ext4_fsblk_t block; 4094 int err, len; 4095 int flags = 0; 4096 ext4_grpblk_t changed; 4097 4098 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4099 BUG_ON(ac->ac_b_ex.fe_len <= 0); 4100 4101 sb = ac->ac_sb; 4102 sbi = EXT4_SB(sb); 4103 4104 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, NULL); 4105 if (!gdp) 4106 return -EIO; 4107 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 4108 ext4_free_group_clusters(sb, gdp)); 4109 4110 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4111 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4112 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { 4113 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 4114 "fs metadata", block, block+len); 4115 /* File system mounted not to panic on error 4116 * Fix the bitmap and return EFSCORRUPTED 4117 * We leak some of the blocks here. 4118 */ 4119 err = ext4_mb_mark_context(handle, sb, true, 4120 ac->ac_b_ex.fe_group, 4121 ac->ac_b_ex.fe_start, 4122 ac->ac_b_ex.fe_len, 4123 0, NULL); 4124 if (!err) 4125 err = -EFSCORRUPTED; 4126 return err; 4127 } 4128 4129 #ifdef AGGRESSIVE_CHECK 4130 flags |= EXT4_MB_BITMAP_MARKED_CHECK; 4131 #endif 4132 err = ext4_mb_mark_context(handle, sb, true, ac->ac_b_ex.fe_group, 4133 ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len, 4134 flags, &changed); 4135 4136 if (err && changed == 0) 4137 return err; 4138 4139 #ifdef AGGRESSIVE_CHECK 4140 BUG_ON(changed != ac->ac_b_ex.fe_len); 4141 #endif 4142 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 4143 /* 4144 * Now reduce the dirty block count also. Should not go negative 4145 */ 4146 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 4147 /* release all the reserved blocks if non delalloc */ 4148 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 4149 reserv_clstrs); 4150 4151 return err; 4152 } 4153 4154 /* 4155 * Idempotent helper for Ext4 fast commit replay path to set the state of 4156 * blocks in bitmaps and update counters. 4157 */ 4158 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, 4159 int len, bool state) 4160 { 4161 struct ext4_sb_info *sbi = EXT4_SB(sb); 4162 ext4_group_t group; 4163 ext4_grpblk_t blkoff; 4164 int err = 0; 4165 unsigned int clen, thisgrp_len; 4166 4167 while (len > 0) { 4168 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 4169 4170 /* 4171 * Check to see if we are freeing blocks across a group 4172 * boundary. 4173 * In case of flex_bg, this can happen that (block, len) may 4174 * span across more than one group. In that case we need to 4175 * get the corresponding group metadata to work with. 4176 * For this we have goto again loop. 4177 */ 4178 thisgrp_len = min_t(unsigned int, (unsigned int)len, 4179 EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); 4180 clen = EXT4_NUM_B2C(sbi, thisgrp_len); 4181 4182 if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) { 4183 ext4_error(sb, "Marking blocks in system zone - " 4184 "Block = %llu, len = %u", 4185 block, thisgrp_len); 4186 break; 4187 } 4188 4189 err = ext4_mb_mark_context(NULL, sb, state, 4190 group, blkoff, clen, 4191 EXT4_MB_BITMAP_MARKED_CHECK | 4192 EXT4_MB_SYNC_UPDATE, 4193 NULL); 4194 if (err) 4195 break; 4196 4197 block += thisgrp_len; 4198 len -= thisgrp_len; 4199 BUG_ON(len < 0); 4200 } 4201 } 4202 4203 /* 4204 * here we normalize request for locality group 4205 * Group request are normalized to s_mb_group_prealloc, which goes to 4206 * s_strip if we set the same via mount option. 4207 * s_mb_group_prealloc can be configured via 4208 * /sys/fs/ext4/<partition>/mb_group_prealloc 4209 * 4210 * XXX: should we try to preallocate more than the group has now? 4211 */ 4212 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 4213 { 4214 struct super_block *sb = ac->ac_sb; 4215 struct ext4_locality_group *lg = ac->ac_lg; 4216 4217 BUG_ON(lg == NULL); 4218 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 4219 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); 4220 } 4221 4222 /* 4223 * This function returns the next element to look at during inode 4224 * PA rbtree walk. We assume that we have held the inode PA rbtree lock 4225 * (ei->i_prealloc_lock) 4226 * 4227 * new_start The start of the range we want to compare 4228 * cur_start The existing start that we are comparing against 4229 * node The node of the rb_tree 4230 */ 4231 static inline struct rb_node* 4232 ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node) 4233 { 4234 if (new_start < cur_start) 4235 return node->rb_left; 4236 else 4237 return node->rb_right; 4238 } 4239 4240 static inline void 4241 ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac, 4242 ext4_lblk_t start, loff_t end) 4243 { 4244 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4245 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4246 struct ext4_prealloc_space *tmp_pa; 4247 ext4_lblk_t tmp_pa_start; 4248 loff_t tmp_pa_end; 4249 struct rb_node *iter; 4250 4251 read_lock(&ei->i_prealloc_lock); 4252 for (iter = ei->i_prealloc_node.rb_node; iter; 4253 iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) { 4254 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4255 pa_node.inode_node); 4256 tmp_pa_start = tmp_pa->pa_lstart; 4257 tmp_pa_end = pa_logical_end(sbi, tmp_pa); 4258 4259 spin_lock(&tmp_pa->pa_lock); 4260 if (tmp_pa->pa_deleted == 0) 4261 BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start)); 4262 spin_unlock(&tmp_pa->pa_lock); 4263 } 4264 read_unlock(&ei->i_prealloc_lock); 4265 } 4266 4267 /* 4268 * Given an allocation context "ac" and a range "start", "end", check 4269 * and adjust boundaries if the range overlaps with any of the existing 4270 * preallocatoins stored in the corresponding inode of the allocation context. 4271 * 4272 * Parameters: 4273 * ac allocation context 4274 * start start of the new range 4275 * end end of the new range 4276 */ 4277 static inline void 4278 ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac, 4279 ext4_lblk_t *start, loff_t *end) 4280 { 4281 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4282 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4283 struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL; 4284 struct rb_node *iter; 4285 ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1; 4286 loff_t new_end, tmp_pa_end, left_pa_end = -1; 4287 4288 new_start = *start; 4289 new_end = *end; 4290 4291 /* 4292 * Adjust the normalized range so that it doesn't overlap with any 4293 * existing preallocated blocks(PAs). Make sure to hold the rbtree lock 4294 * so it doesn't change underneath us. 4295 */ 4296 read_lock(&ei->i_prealloc_lock); 4297 4298 /* Step 1: find any one immediate neighboring PA of the normalized range */ 4299 for (iter = ei->i_prealloc_node.rb_node; iter; 4300 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 4301 tmp_pa_start, iter)) { 4302 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4303 pa_node.inode_node); 4304 tmp_pa_start = tmp_pa->pa_lstart; 4305 tmp_pa_end = pa_logical_end(sbi, tmp_pa); 4306 4307 /* PA must not overlap original request */ 4308 spin_lock(&tmp_pa->pa_lock); 4309 if (tmp_pa->pa_deleted == 0) 4310 BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end || 4311 ac->ac_o_ex.fe_logical < tmp_pa_start)); 4312 spin_unlock(&tmp_pa->pa_lock); 4313 } 4314 4315 /* 4316 * Step 2: check if the found PA is left or right neighbor and 4317 * get the other neighbor 4318 */ 4319 if (tmp_pa) { 4320 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) { 4321 struct rb_node *tmp; 4322 4323 left_pa = tmp_pa; 4324 tmp = rb_next(&left_pa->pa_node.inode_node); 4325 if (tmp) { 4326 right_pa = rb_entry(tmp, 4327 struct ext4_prealloc_space, 4328 pa_node.inode_node); 4329 } 4330 } else { 4331 struct rb_node *tmp; 4332 4333 right_pa = tmp_pa; 4334 tmp = rb_prev(&right_pa->pa_node.inode_node); 4335 if (tmp) { 4336 left_pa = rb_entry(tmp, 4337 struct ext4_prealloc_space, 4338 pa_node.inode_node); 4339 } 4340 } 4341 } 4342 4343 /* Step 3: get the non deleted neighbors */ 4344 if (left_pa) { 4345 for (iter = &left_pa->pa_node.inode_node;; 4346 iter = rb_prev(iter)) { 4347 if (!iter) { 4348 left_pa = NULL; 4349 break; 4350 } 4351 4352 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4353 pa_node.inode_node); 4354 left_pa = tmp_pa; 4355 spin_lock(&tmp_pa->pa_lock); 4356 if (tmp_pa->pa_deleted == 0) { 4357 spin_unlock(&tmp_pa->pa_lock); 4358 break; 4359 } 4360 spin_unlock(&tmp_pa->pa_lock); 4361 } 4362 } 4363 4364 if (right_pa) { 4365 for (iter = &right_pa->pa_node.inode_node;; 4366 iter = rb_next(iter)) { 4367 if (!iter) { 4368 right_pa = NULL; 4369 break; 4370 } 4371 4372 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4373 pa_node.inode_node); 4374 right_pa = tmp_pa; 4375 spin_lock(&tmp_pa->pa_lock); 4376 if (tmp_pa->pa_deleted == 0) { 4377 spin_unlock(&tmp_pa->pa_lock); 4378 break; 4379 } 4380 spin_unlock(&tmp_pa->pa_lock); 4381 } 4382 } 4383 4384 if (left_pa) { 4385 left_pa_end = pa_logical_end(sbi, left_pa); 4386 BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical); 4387 } 4388 4389 if (right_pa) { 4390 right_pa_start = right_pa->pa_lstart; 4391 BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical); 4392 } 4393 4394 /* Step 4: trim our normalized range to not overlap with the neighbors */ 4395 if (left_pa) { 4396 if (left_pa_end > new_start) 4397 new_start = left_pa_end; 4398 } 4399 4400 if (right_pa) { 4401 if (right_pa_start < new_end) 4402 new_end = right_pa_start; 4403 } 4404 read_unlock(&ei->i_prealloc_lock); 4405 4406 /* XXX: extra loop to check we really don't overlap preallocations */ 4407 ext4_mb_pa_assert_overlap(ac, new_start, new_end); 4408 4409 *start = new_start; 4410 *end = new_end; 4411 } 4412 4413 /* 4414 * Normalization means making request better in terms of 4415 * size and alignment 4416 */ 4417 static noinline_for_stack void 4418 ext4_mb_normalize_request(struct ext4_allocation_context *ac, 4419 struct ext4_allocation_request *ar) 4420 { 4421 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4422 struct ext4_super_block *es = sbi->s_es; 4423 int bsbits, max; 4424 loff_t size, start_off, end; 4425 loff_t orig_size __maybe_unused; 4426 ext4_lblk_t start; 4427 4428 /* do normalize only data requests, metadata requests 4429 do not need preallocation */ 4430 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4431 return; 4432 4433 /* sometime caller may want exact blocks */ 4434 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4435 return; 4436 4437 /* caller may indicate that preallocation isn't 4438 * required (it's a tail, for example) */ 4439 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 4440 return; 4441 4442 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 4443 ext4_mb_normalize_group_request(ac); 4444 return ; 4445 } 4446 4447 bsbits = ac->ac_sb->s_blocksize_bits; 4448 4449 /* first, let's learn actual file size 4450 * given current request is allocated */ 4451 size = extent_logical_end(sbi, &ac->ac_o_ex); 4452 size = size << bsbits; 4453 if (size < i_size_read(ac->ac_inode)) 4454 size = i_size_read(ac->ac_inode); 4455 orig_size = size; 4456 4457 /* max size of free chunks */ 4458 max = 2 << bsbits; 4459 4460 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 4461 (req <= (size) || max <= (chunk_size)) 4462 4463 /* first, try to predict filesize */ 4464 /* XXX: should this table be tunable? */ 4465 start_off = 0; 4466 if (size <= 16 * 1024) { 4467 size = 16 * 1024; 4468 } else if (size <= 32 * 1024) { 4469 size = 32 * 1024; 4470 } else if (size <= 64 * 1024) { 4471 size = 64 * 1024; 4472 } else if (size <= 128 * 1024) { 4473 size = 128 * 1024; 4474 } else if (size <= 256 * 1024) { 4475 size = 256 * 1024; 4476 } else if (size <= 512 * 1024) { 4477 size = 512 * 1024; 4478 } else if (size <= 1024 * 1024) { 4479 size = 1024 * 1024; 4480 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 4481 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4482 (21 - bsbits)) << 21; 4483 size = 2 * 1024 * 1024; 4484 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 4485 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4486 (22 - bsbits)) << 22; 4487 size = 4 * 1024 * 1024; 4488 } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len), 4489 (8<<20)>>bsbits, max, 8 * 1024)) { 4490 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4491 (23 - bsbits)) << 23; 4492 size = 8 * 1024 * 1024; 4493 } else { 4494 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 4495 size = (loff_t) EXT4_C2B(sbi, 4496 ac->ac_o_ex.fe_len) << bsbits; 4497 } 4498 size = size >> bsbits; 4499 start = start_off >> bsbits; 4500 4501 /* 4502 * For tiny groups (smaller than 8MB) the chosen allocation 4503 * alignment may be larger than group size. Make sure the 4504 * alignment does not move allocation to a different group which 4505 * makes mballoc fail assertions later. 4506 */ 4507 start = max(start, rounddown(ac->ac_o_ex.fe_logical, 4508 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); 4509 4510 /* avoid unnecessary preallocation that may trigger assertions */ 4511 if (start + size > EXT_MAX_BLOCKS) 4512 size = EXT_MAX_BLOCKS - start; 4513 4514 /* don't cover already allocated blocks in selected range */ 4515 if (ar->pleft && start <= ar->lleft) { 4516 size -= ar->lleft + 1 - start; 4517 start = ar->lleft + 1; 4518 } 4519 if (ar->pright && start + size - 1 >= ar->lright) 4520 size -= start + size - ar->lright; 4521 4522 /* 4523 * Trim allocation request for filesystems with artificially small 4524 * groups. 4525 */ 4526 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) 4527 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); 4528 4529 end = start + size; 4530 4531 ext4_mb_pa_adjust_overlap(ac, &start, &end); 4532 4533 size = end - start; 4534 4535 /* 4536 * In this function "start" and "size" are normalized for better 4537 * alignment and length such that we could preallocate more blocks. 4538 * This normalization is done such that original request of 4539 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and 4540 * "size" boundaries. 4541 * (Note fe_len can be relaxed since FS block allocation API does not 4542 * provide gurantee on number of contiguous blocks allocation since that 4543 * depends upon free space left, etc). 4544 * In case of inode pa, later we use the allocated blocks 4545 * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated 4546 * range of goal/best blocks [start, size] to put it at the 4547 * ac_o_ex.fe_logical extent of this inode. 4548 * (See ext4_mb_use_inode_pa() for more details) 4549 */ 4550 if (start + size <= ac->ac_o_ex.fe_logical || 4551 start > ac->ac_o_ex.fe_logical) { 4552 ext4_msg(ac->ac_sb, KERN_ERR, 4553 "start %lu, size %lu, fe_logical %lu", 4554 (unsigned long) start, (unsigned long) size, 4555 (unsigned long) ac->ac_o_ex.fe_logical); 4556 BUG(); 4557 } 4558 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 4559 4560 /* now prepare goal request */ 4561 4562 /* XXX: is it better to align blocks WRT to logical 4563 * placement or satisfy big request as is */ 4564 ac->ac_g_ex.fe_logical = start; 4565 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 4566 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; 4567 4568 /* define goal start in order to merge */ 4569 if (ar->pright && (ar->lright == (start + size)) && 4570 ar->pright >= size && 4571 ar->pright - size >= le32_to_cpu(es->s_first_data_block)) { 4572 /* merge to the right */ 4573 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 4574 &ac->ac_g_ex.fe_group, 4575 &ac->ac_g_ex.fe_start); 4576 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4577 } 4578 if (ar->pleft && (ar->lleft + 1 == start) && 4579 ar->pleft + 1 < ext4_blocks_count(es)) { 4580 /* merge to the left */ 4581 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 4582 &ac->ac_g_ex.fe_group, 4583 &ac->ac_g_ex.fe_start); 4584 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4585 } 4586 4587 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, 4588 orig_size, start); 4589 } 4590 4591 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 4592 { 4593 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4594 4595 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { 4596 atomic_inc(&sbi->s_bal_reqs); 4597 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 4598 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 4599 atomic_inc(&sbi->s_bal_success); 4600 4601 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 4602 for (int i=0; i<EXT4_MB_NUM_CRS; i++) { 4603 atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]); 4604 } 4605 4606 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); 4607 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 4608 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 4609 atomic_inc(&sbi->s_bal_goals); 4610 /* did we allocate as much as normalizer originally wanted? */ 4611 if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len) 4612 atomic_inc(&sbi->s_bal_len_goals); 4613 4614 if (ac->ac_found > sbi->s_mb_max_to_scan) 4615 atomic_inc(&sbi->s_bal_breaks); 4616 } 4617 4618 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 4619 trace_ext4_mballoc_alloc(ac); 4620 else 4621 trace_ext4_mballoc_prealloc(ac); 4622 } 4623 4624 /* 4625 * Called on failure; free up any blocks from the inode PA for this 4626 * context. We don't need this for MB_GROUP_PA because we only change 4627 * pa_free in ext4_mb_release_context(), but on failure, we've already 4628 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 4629 */ 4630 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 4631 { 4632 struct ext4_prealloc_space *pa = ac->ac_pa; 4633 struct ext4_buddy e4b; 4634 int err; 4635 4636 if (pa == NULL) { 4637 if (ac->ac_f_ex.fe_len == 0) 4638 return; 4639 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 4640 if (WARN_RATELIMIT(err, 4641 "ext4: mb_load_buddy failed (%d)", err)) 4642 /* 4643 * This should never happen since we pin the 4644 * pages in the ext4_allocation_context so 4645 * ext4_mb_load_buddy() should never fail. 4646 */ 4647 return; 4648 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4649 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 4650 ac->ac_f_ex.fe_len); 4651 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4652 ext4_mb_unload_buddy(&e4b); 4653 return; 4654 } 4655 if (pa->pa_type == MB_INODE_PA) { 4656 spin_lock(&pa->pa_lock); 4657 pa->pa_free += ac->ac_b_ex.fe_len; 4658 spin_unlock(&pa->pa_lock); 4659 } 4660 } 4661 4662 /* 4663 * use blocks preallocated to inode 4664 */ 4665 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 4666 struct ext4_prealloc_space *pa) 4667 { 4668 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4669 ext4_fsblk_t start; 4670 ext4_fsblk_t end; 4671 int len; 4672 4673 /* found preallocated blocks, use them */ 4674 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 4675 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 4676 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 4677 len = EXT4_NUM_B2C(sbi, end - start); 4678 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 4679 &ac->ac_b_ex.fe_start); 4680 ac->ac_b_ex.fe_len = len; 4681 ac->ac_status = AC_STATUS_FOUND; 4682 ac->ac_pa = pa; 4683 4684 BUG_ON(start < pa->pa_pstart); 4685 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 4686 BUG_ON(pa->pa_free < len); 4687 BUG_ON(ac->ac_b_ex.fe_len <= 0); 4688 pa->pa_free -= len; 4689 4690 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); 4691 } 4692 4693 /* 4694 * use blocks preallocated to locality group 4695 */ 4696 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 4697 struct ext4_prealloc_space *pa) 4698 { 4699 unsigned int len = ac->ac_o_ex.fe_len; 4700 4701 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 4702 &ac->ac_b_ex.fe_group, 4703 &ac->ac_b_ex.fe_start); 4704 ac->ac_b_ex.fe_len = len; 4705 ac->ac_status = AC_STATUS_FOUND; 4706 ac->ac_pa = pa; 4707 4708 /* we don't correct pa_pstart or pa_len here to avoid 4709 * possible race when the group is being loaded concurrently 4710 * instead we correct pa later, after blocks are marked 4711 * in on-disk bitmap -- see ext4_mb_release_context() 4712 * Other CPUs are prevented from allocating from this pa by lg_mutex 4713 */ 4714 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", 4715 pa->pa_lstart, len, pa); 4716 } 4717 4718 /* 4719 * Return the prealloc space that have minimal distance 4720 * from the goal block. @cpa is the prealloc 4721 * space that is having currently known minimal distance 4722 * from the goal block. 4723 */ 4724 static struct ext4_prealloc_space * 4725 ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 4726 struct ext4_prealloc_space *pa, 4727 struct ext4_prealloc_space *cpa) 4728 { 4729 ext4_fsblk_t cur_distance, new_distance; 4730 4731 if (cpa == NULL) { 4732 atomic_inc(&pa->pa_count); 4733 return pa; 4734 } 4735 cur_distance = abs(goal_block - cpa->pa_pstart); 4736 new_distance = abs(goal_block - pa->pa_pstart); 4737 4738 if (cur_distance <= new_distance) 4739 return cpa; 4740 4741 /* drop the previous reference */ 4742 atomic_dec(&cpa->pa_count); 4743 atomic_inc(&pa->pa_count); 4744 return pa; 4745 } 4746 4747 /* 4748 * check if found pa meets EXT4_MB_HINT_GOAL_ONLY 4749 */ 4750 static bool 4751 ext4_mb_pa_goal_check(struct ext4_allocation_context *ac, 4752 struct ext4_prealloc_space *pa) 4753 { 4754 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4755 ext4_fsblk_t start; 4756 4757 if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))) 4758 return true; 4759 4760 /* 4761 * If EXT4_MB_HINT_GOAL_ONLY is set, ac_g_ex will not be adjusted 4762 * in ext4_mb_normalize_request and will keep same with ac_o_ex 4763 * from ext4_mb_initialize_context. Choose ac_g_ex here to keep 4764 * consistent with ext4_mb_find_by_goal. 4765 */ 4766 start = pa->pa_pstart + 4767 (ac->ac_g_ex.fe_logical - pa->pa_lstart); 4768 if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start) 4769 return false; 4770 4771 if (ac->ac_g_ex.fe_len > pa->pa_len - 4772 EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart)) 4773 return false; 4774 4775 return true; 4776 } 4777 4778 /* 4779 * search goal blocks in preallocated space 4780 */ 4781 static noinline_for_stack bool 4782 ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 4783 { 4784 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4785 int order, i; 4786 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4787 struct ext4_locality_group *lg; 4788 struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL; 4789 struct rb_node *iter; 4790 ext4_fsblk_t goal_block; 4791 4792 /* only data can be preallocated */ 4793 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4794 return false; 4795 4796 /* 4797 * first, try per-file preallocation by searching the inode pa rbtree. 4798 * 4799 * Here, we can't do a direct traversal of the tree because 4800 * ext4_mb_discard_group_preallocation() can paralelly mark the pa 4801 * deleted and that can cause direct traversal to skip some entries. 4802 */ 4803 read_lock(&ei->i_prealloc_lock); 4804 4805 if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) { 4806 goto try_group_pa; 4807 } 4808 4809 /* 4810 * Step 1: Find a pa with logical start immediately adjacent to the 4811 * original logical start. This could be on the left or right. 4812 * 4813 * (tmp_pa->pa_lstart never changes so we can skip locking for it). 4814 */ 4815 for (iter = ei->i_prealloc_node.rb_node; iter; 4816 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, 4817 tmp_pa->pa_lstart, iter)) { 4818 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4819 pa_node.inode_node); 4820 } 4821 4822 /* 4823 * Step 2: The adjacent pa might be to the right of logical start, find 4824 * the left adjacent pa. After this step we'd have a valid tmp_pa whose 4825 * logical start is towards the left of original request's logical start 4826 */ 4827 if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) { 4828 struct rb_node *tmp; 4829 tmp = rb_prev(&tmp_pa->pa_node.inode_node); 4830 4831 if (tmp) { 4832 tmp_pa = rb_entry(tmp, struct ext4_prealloc_space, 4833 pa_node.inode_node); 4834 } else { 4835 /* 4836 * If there is no adjacent pa to the left then finding 4837 * an overlapping pa is not possible hence stop searching 4838 * inode pa tree 4839 */ 4840 goto try_group_pa; 4841 } 4842 } 4843 4844 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); 4845 4846 /* 4847 * Step 3: If the left adjacent pa is deleted, keep moving left to find 4848 * the first non deleted adjacent pa. After this step we should have a 4849 * valid tmp_pa which is guaranteed to be non deleted. 4850 */ 4851 for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) { 4852 if (!iter) { 4853 /* 4854 * no non deleted left adjacent pa, so stop searching 4855 * inode pa tree 4856 */ 4857 goto try_group_pa; 4858 } 4859 tmp_pa = rb_entry(iter, struct ext4_prealloc_space, 4860 pa_node.inode_node); 4861 spin_lock(&tmp_pa->pa_lock); 4862 if (tmp_pa->pa_deleted == 0) { 4863 /* 4864 * We will keep holding the pa_lock from 4865 * this point on because we don't want group discard 4866 * to delete this pa underneath us. Since group 4867 * discard is anyways an ENOSPC operation it 4868 * should be okay for it to wait a few more cycles. 4869 */ 4870 break; 4871 } else { 4872 spin_unlock(&tmp_pa->pa_lock); 4873 } 4874 } 4875 4876 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); 4877 BUG_ON(tmp_pa->pa_deleted == 1); 4878 4879 /* 4880 * Step 4: We now have the non deleted left adjacent pa. Only this 4881 * pa can possibly satisfy the request hence check if it overlaps 4882 * original logical start and stop searching if it doesn't. 4883 */ 4884 if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) { 4885 spin_unlock(&tmp_pa->pa_lock); 4886 goto try_group_pa; 4887 } 4888 4889 /* non-extent files can't have physical blocks past 2^32 */ 4890 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 4891 (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) > 4892 EXT4_MAX_BLOCK_FILE_PHYS)) { 4893 /* 4894 * Since PAs don't overlap, we won't find any other PA to 4895 * satisfy this. 4896 */ 4897 spin_unlock(&tmp_pa->pa_lock); 4898 goto try_group_pa; 4899 } 4900 4901 if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) { 4902 atomic_inc(&tmp_pa->pa_count); 4903 ext4_mb_use_inode_pa(ac, tmp_pa); 4904 spin_unlock(&tmp_pa->pa_lock); 4905 read_unlock(&ei->i_prealloc_lock); 4906 return true; 4907 } else { 4908 /* 4909 * We found a valid overlapping pa but couldn't use it because 4910 * it had no free blocks. This should ideally never happen 4911 * because: 4912 * 4913 * 1. When a new inode pa is added to rbtree it must have 4914 * pa_free > 0 since otherwise we won't actually need 4915 * preallocation. 4916 * 4917 * 2. An inode pa that is in the rbtree can only have it's 4918 * pa_free become zero when another thread calls: 4919 * ext4_mb_new_blocks 4920 * ext4_mb_use_preallocated 4921 * ext4_mb_use_inode_pa 4922 * 4923 * 3. Further, after the above calls make pa_free == 0, we will 4924 * immediately remove it from the rbtree in: 4925 * ext4_mb_new_blocks 4926 * ext4_mb_release_context 4927 * ext4_mb_put_pa 4928 * 4929 * 4. Since the pa_free becoming 0 and pa_free getting removed 4930 * from tree both happen in ext4_mb_new_blocks, which is always 4931 * called with i_data_sem held for data allocations, we can be 4932 * sure that another process will never see a pa in rbtree with 4933 * pa_free == 0. 4934 */ 4935 WARN_ON_ONCE(tmp_pa->pa_free == 0); 4936 } 4937 spin_unlock(&tmp_pa->pa_lock); 4938 try_group_pa: 4939 read_unlock(&ei->i_prealloc_lock); 4940 4941 /* can we use group allocation? */ 4942 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 4943 return false; 4944 4945 /* inode may have no locality group for some reason */ 4946 lg = ac->ac_lg; 4947 if (lg == NULL) 4948 return false; 4949 order = fls(ac->ac_o_ex.fe_len) - 1; 4950 if (order > PREALLOC_TB_SIZE - 1) 4951 /* The max size of hash table is PREALLOC_TB_SIZE */ 4952 order = PREALLOC_TB_SIZE - 1; 4953 4954 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 4955 /* 4956 * search for the prealloc space that is having 4957 * minimal distance from the goal block. 4958 */ 4959 for (i = order; i < PREALLOC_TB_SIZE; i++) { 4960 rcu_read_lock(); 4961 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i], 4962 pa_node.lg_list) { 4963 spin_lock(&tmp_pa->pa_lock); 4964 if (tmp_pa->pa_deleted == 0 && 4965 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) { 4966 4967 cpa = ext4_mb_check_group_pa(goal_block, 4968 tmp_pa, cpa); 4969 } 4970 spin_unlock(&tmp_pa->pa_lock); 4971 } 4972 rcu_read_unlock(); 4973 } 4974 if (cpa) { 4975 ext4_mb_use_group_pa(ac, cpa); 4976 return true; 4977 } 4978 return false; 4979 } 4980 4981 /* 4982 * the function goes through all preallocation in this group and marks them 4983 * used in in-core bitmap. buddy must be generated from this bitmap 4984 * Need to be called with ext4 group lock held 4985 */ 4986 static noinline_for_stack 4987 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 4988 ext4_group_t group) 4989 { 4990 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 4991 struct ext4_prealloc_space *pa; 4992 struct list_head *cur; 4993 ext4_group_t groupnr; 4994 ext4_grpblk_t start; 4995 int preallocated = 0; 4996 int len; 4997 4998 if (!grp) 4999 return; 5000 5001 /* all form of preallocation discards first load group, 5002 * so the only competing code is preallocation use. 5003 * we don't need any locking here 5004 * notice we do NOT ignore preallocations with pa_deleted 5005 * otherwise we could leave used blocks available for 5006 * allocation in buddy when concurrent ext4_mb_put_pa() 5007 * is dropping preallocation 5008 */ 5009 list_for_each(cur, &grp->bb_prealloc_list) { 5010 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 5011 spin_lock(&pa->pa_lock); 5012 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5013 &groupnr, &start); 5014 len = pa->pa_len; 5015 spin_unlock(&pa->pa_lock); 5016 if (unlikely(len == 0)) 5017 continue; 5018 BUG_ON(groupnr != group); 5019 mb_set_bits(bitmap, start, len); 5020 preallocated += len; 5021 } 5022 mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); 5023 } 5024 5025 static void ext4_mb_mark_pa_deleted(struct super_block *sb, 5026 struct ext4_prealloc_space *pa) 5027 { 5028 struct ext4_inode_info *ei; 5029 5030 if (pa->pa_deleted) { 5031 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", 5032 pa->pa_type, pa->pa_pstart, pa->pa_lstart, 5033 pa->pa_len); 5034 return; 5035 } 5036 5037 pa->pa_deleted = 1; 5038 5039 if (pa->pa_type == MB_INODE_PA) { 5040 ei = EXT4_I(pa->pa_inode); 5041 atomic_dec(&ei->i_prealloc_active); 5042 } 5043 } 5044 5045 static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa) 5046 { 5047 BUG_ON(!pa); 5048 BUG_ON(atomic_read(&pa->pa_count)); 5049 BUG_ON(pa->pa_deleted == 0); 5050 kmem_cache_free(ext4_pspace_cachep, pa); 5051 } 5052 5053 static void ext4_mb_pa_callback(struct rcu_head *head) 5054 { 5055 struct ext4_prealloc_space *pa; 5056 5057 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 5058 ext4_mb_pa_free(pa); 5059 } 5060 5061 /* 5062 * drops a reference to preallocated space descriptor 5063 * if this was the last reference and the space is consumed 5064 */ 5065 static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 5066 struct super_block *sb, struct ext4_prealloc_space *pa) 5067 { 5068 ext4_group_t grp; 5069 ext4_fsblk_t grp_blk; 5070 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 5071 5072 /* in this short window concurrent discard can set pa_deleted */ 5073 spin_lock(&pa->pa_lock); 5074 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 5075 spin_unlock(&pa->pa_lock); 5076 return; 5077 } 5078 5079 if (pa->pa_deleted == 1) { 5080 spin_unlock(&pa->pa_lock); 5081 return; 5082 } 5083 5084 ext4_mb_mark_pa_deleted(sb, pa); 5085 spin_unlock(&pa->pa_lock); 5086 5087 grp_blk = pa->pa_pstart; 5088 /* 5089 * If doing group-based preallocation, pa_pstart may be in the 5090 * next group when pa is used up 5091 */ 5092 if (pa->pa_type == MB_GROUP_PA) 5093 grp_blk--; 5094 5095 grp = ext4_get_group_number(sb, grp_blk); 5096 5097 /* 5098 * possible race: 5099 * 5100 * P1 (buddy init) P2 (regular allocation) 5101 * find block B in PA 5102 * copy on-disk bitmap to buddy 5103 * mark B in on-disk bitmap 5104 * drop PA from group 5105 * mark all PAs in buddy 5106 * 5107 * thus, P1 initializes buddy with B available. to prevent this 5108 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 5109 * against that pair 5110 */ 5111 ext4_lock_group(sb, grp); 5112 list_del(&pa->pa_group_list); 5113 ext4_unlock_group(sb, grp); 5114 5115 if (pa->pa_type == MB_INODE_PA) { 5116 write_lock(pa->pa_node_lock.inode_lock); 5117 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5118 write_unlock(pa->pa_node_lock.inode_lock); 5119 ext4_mb_pa_free(pa); 5120 } else { 5121 spin_lock(pa->pa_node_lock.lg_lock); 5122 list_del_rcu(&pa->pa_node.lg_list); 5123 spin_unlock(pa->pa_node_lock.lg_lock); 5124 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5125 } 5126 } 5127 5128 static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new) 5129 { 5130 struct rb_node **iter = &root->rb_node, *parent = NULL; 5131 struct ext4_prealloc_space *iter_pa, *new_pa; 5132 ext4_lblk_t iter_start, new_start; 5133 5134 while (*iter) { 5135 iter_pa = rb_entry(*iter, struct ext4_prealloc_space, 5136 pa_node.inode_node); 5137 new_pa = rb_entry(new, struct ext4_prealloc_space, 5138 pa_node.inode_node); 5139 iter_start = iter_pa->pa_lstart; 5140 new_start = new_pa->pa_lstart; 5141 5142 parent = *iter; 5143 if (new_start < iter_start) 5144 iter = &((*iter)->rb_left); 5145 else 5146 iter = &((*iter)->rb_right); 5147 } 5148 5149 rb_link_node(new, parent, iter); 5150 rb_insert_color(new, root); 5151 } 5152 5153 /* 5154 * creates new preallocated space for given inode 5155 */ 5156 static noinline_for_stack void 5157 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 5158 { 5159 struct super_block *sb = ac->ac_sb; 5160 struct ext4_sb_info *sbi = EXT4_SB(sb); 5161 struct ext4_prealloc_space *pa; 5162 struct ext4_group_info *grp; 5163 struct ext4_inode_info *ei; 5164 5165 /* preallocate only when found space is larger then requested */ 5166 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 5167 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 5168 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 5169 BUG_ON(ac->ac_pa == NULL); 5170 5171 pa = ac->ac_pa; 5172 5173 if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) { 5174 struct ext4_free_extent ex = { 5175 .fe_logical = ac->ac_g_ex.fe_logical, 5176 .fe_len = ac->ac_orig_goal_len, 5177 }; 5178 loff_t orig_goal_end = extent_logical_end(sbi, &ex); 5179 loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex); 5180 5181 /* 5182 * We can't allocate as much as normalizer wants, so we try 5183 * to get proper lstart to cover the original request, except 5184 * when the goal doesn't cover the original request as below: 5185 * 5186 * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048 5187 * best_ex:0/200(200) -> adjusted: 1848/2048(200) 5188 */ 5189 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 5190 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 5191 5192 /* 5193 * Use the below logic for adjusting best extent as it keeps 5194 * fragmentation in check while ensuring logical range of best 5195 * extent doesn't overflow out of goal extent: 5196 * 5197 * 1. Check if best ex can be kept at end of goal (before 5198 * cr_best_avail trimmed it) and still cover original start 5199 * 2. Else, check if best ex can be kept at start of goal and 5200 * still cover original end 5201 * 3. Else, keep the best ex at start of original request. 5202 */ 5203 ex.fe_len = ac->ac_b_ex.fe_len; 5204 5205 ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len); 5206 if (ac->ac_o_ex.fe_logical >= ex.fe_logical) 5207 goto adjust_bex; 5208 5209 ex.fe_logical = ac->ac_g_ex.fe_logical; 5210 if (o_ex_end <= extent_logical_end(sbi, &ex)) 5211 goto adjust_bex; 5212 5213 ex.fe_logical = ac->ac_o_ex.fe_logical; 5214 adjust_bex: 5215 ac->ac_b_ex.fe_logical = ex.fe_logical; 5216 5217 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 5218 BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end); 5219 } 5220 5221 pa->pa_lstart = ac->ac_b_ex.fe_logical; 5222 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5223 pa->pa_len = ac->ac_b_ex.fe_len; 5224 pa->pa_free = pa->pa_len; 5225 spin_lock_init(&pa->pa_lock); 5226 INIT_LIST_HEAD(&pa->pa_group_list); 5227 pa->pa_deleted = 0; 5228 pa->pa_type = MB_INODE_PA; 5229 5230 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 5231 pa->pa_len, pa->pa_lstart); 5232 trace_ext4_mb_new_inode_pa(ac, pa); 5233 5234 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 5235 ext4_mb_use_inode_pa(ac, pa); 5236 5237 ei = EXT4_I(ac->ac_inode); 5238 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 5239 if (!grp) 5240 return; 5241 5242 pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock; 5243 pa->pa_inode = ac->ac_inode; 5244 5245 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 5246 5247 write_lock(pa->pa_node_lock.inode_lock); 5248 ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node); 5249 write_unlock(pa->pa_node_lock.inode_lock); 5250 atomic_inc(&ei->i_prealloc_active); 5251 } 5252 5253 /* 5254 * creates new preallocated space for locality group inodes belongs to 5255 */ 5256 static noinline_for_stack void 5257 ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 5258 { 5259 struct super_block *sb = ac->ac_sb; 5260 struct ext4_locality_group *lg; 5261 struct ext4_prealloc_space *pa; 5262 struct ext4_group_info *grp; 5263 5264 /* preallocate only when found space is larger then requested */ 5265 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 5266 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 5267 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 5268 BUG_ON(ac->ac_pa == NULL); 5269 5270 pa = ac->ac_pa; 5271 5272 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5273 pa->pa_lstart = pa->pa_pstart; 5274 pa->pa_len = ac->ac_b_ex.fe_len; 5275 pa->pa_free = pa->pa_len; 5276 spin_lock_init(&pa->pa_lock); 5277 INIT_LIST_HEAD(&pa->pa_node.lg_list); 5278 INIT_LIST_HEAD(&pa->pa_group_list); 5279 pa->pa_deleted = 0; 5280 pa->pa_type = MB_GROUP_PA; 5281 5282 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 5283 pa->pa_len, pa->pa_lstart); 5284 trace_ext4_mb_new_group_pa(ac, pa); 5285 5286 ext4_mb_use_group_pa(ac, pa); 5287 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 5288 5289 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 5290 if (!grp) 5291 return; 5292 lg = ac->ac_lg; 5293 BUG_ON(lg == NULL); 5294 5295 pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock; 5296 pa->pa_inode = NULL; 5297 5298 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 5299 5300 /* 5301 * We will later add the new pa to the right bucket 5302 * after updating the pa_free in ext4_mb_release_context 5303 */ 5304 } 5305 5306 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 5307 { 5308 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 5309 ext4_mb_new_group_pa(ac); 5310 else 5311 ext4_mb_new_inode_pa(ac); 5312 } 5313 5314 /* 5315 * finds all unused blocks in on-disk bitmap, frees them in 5316 * in-core bitmap and buddy. 5317 * @pa must be unlinked from inode and group lists, so that 5318 * nobody else can find/use it. 5319 * the caller MUST hold group/inode locks. 5320 * TODO: optimize the case when there are no in-core structures yet 5321 */ 5322 static noinline_for_stack void 5323 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 5324 struct ext4_prealloc_space *pa) 5325 { 5326 struct super_block *sb = e4b->bd_sb; 5327 struct ext4_sb_info *sbi = EXT4_SB(sb); 5328 unsigned int end; 5329 unsigned int next; 5330 ext4_group_t group; 5331 ext4_grpblk_t bit; 5332 unsigned long long grp_blk_start; 5333 int free = 0; 5334 5335 BUG_ON(pa->pa_deleted == 0); 5336 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 5337 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 5338 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 5339 end = bit + pa->pa_len; 5340 5341 while (bit < end) { 5342 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 5343 if (bit >= end) 5344 break; 5345 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 5346 mb_debug(sb, "free preallocated %u/%u in group %u\n", 5347 (unsigned) ext4_group_first_block_no(sb, group) + bit, 5348 (unsigned) next - bit, (unsigned) group); 5349 free += next - bit; 5350 5351 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 5352 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 5353 EXT4_C2B(sbi, bit)), 5354 next - bit); 5355 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 5356 bit = next + 1; 5357 } 5358 if (free != pa->pa_free) { 5359 ext4_msg(e4b->bd_sb, KERN_CRIT, 5360 "pa %p: logic %lu, phys. %lu, len %d", 5361 pa, (unsigned long) pa->pa_lstart, 5362 (unsigned long) pa->pa_pstart, 5363 pa->pa_len); 5364 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 5365 free, pa->pa_free); 5366 /* 5367 * pa is already deleted so we use the value obtained 5368 * from the bitmap and continue. 5369 */ 5370 } 5371 atomic_add(free, &sbi->s_mb_discarded); 5372 } 5373 5374 static noinline_for_stack void 5375 ext4_mb_release_group_pa(struct ext4_buddy *e4b, 5376 struct ext4_prealloc_space *pa) 5377 { 5378 struct super_block *sb = e4b->bd_sb; 5379 ext4_group_t group; 5380 ext4_grpblk_t bit; 5381 5382 trace_ext4_mb_release_group_pa(sb, pa); 5383 BUG_ON(pa->pa_deleted == 0); 5384 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 5385 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) { 5386 ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu", 5387 e4b->bd_group, group, pa->pa_pstart); 5388 return; 5389 } 5390 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 5391 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 5392 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 5393 } 5394 5395 /* 5396 * releases all preallocations in given group 5397 * 5398 * first, we need to decide discard policy: 5399 * - when do we discard 5400 * 1) ENOSPC 5401 * - how many do we discard 5402 * 1) how many requested 5403 */ 5404 static noinline_for_stack int 5405 ext4_mb_discard_group_preallocations(struct super_block *sb, 5406 ext4_group_t group, int *busy) 5407 { 5408 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 5409 struct buffer_head *bitmap_bh = NULL; 5410 struct ext4_prealloc_space *pa, *tmp; 5411 LIST_HEAD(list); 5412 struct ext4_buddy e4b; 5413 struct ext4_inode_info *ei; 5414 int err; 5415 int free = 0; 5416 5417 if (!grp) 5418 return 0; 5419 mb_debug(sb, "discard preallocation for group %u\n", group); 5420 if (list_empty(&grp->bb_prealloc_list)) 5421 goto out_dbg; 5422 5423 bitmap_bh = ext4_read_block_bitmap(sb, group); 5424 if (IS_ERR(bitmap_bh)) { 5425 err = PTR_ERR(bitmap_bh); 5426 ext4_error_err(sb, -err, 5427 "Error %d reading block bitmap for %u", 5428 err, group); 5429 goto out_dbg; 5430 } 5431 5432 err = ext4_mb_load_buddy(sb, group, &e4b); 5433 if (err) { 5434 ext4_warning(sb, "Error %d loading buddy information for %u", 5435 err, group); 5436 put_bh(bitmap_bh); 5437 goto out_dbg; 5438 } 5439 5440 ext4_lock_group(sb, group); 5441 list_for_each_entry_safe(pa, tmp, 5442 &grp->bb_prealloc_list, pa_group_list) { 5443 spin_lock(&pa->pa_lock); 5444 if (atomic_read(&pa->pa_count)) { 5445 spin_unlock(&pa->pa_lock); 5446 *busy = 1; 5447 continue; 5448 } 5449 if (pa->pa_deleted) { 5450 spin_unlock(&pa->pa_lock); 5451 continue; 5452 } 5453 5454 /* seems this one can be freed ... */ 5455 ext4_mb_mark_pa_deleted(sb, pa); 5456 5457 if (!free) 5458 this_cpu_inc(discard_pa_seq); 5459 5460 /* we can trust pa_free ... */ 5461 free += pa->pa_free; 5462 5463 spin_unlock(&pa->pa_lock); 5464 5465 list_del(&pa->pa_group_list); 5466 list_add(&pa->u.pa_tmp_list, &list); 5467 } 5468 5469 /* now free all selected PAs */ 5470 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5471 5472 /* remove from object (inode or locality group) */ 5473 if (pa->pa_type == MB_GROUP_PA) { 5474 spin_lock(pa->pa_node_lock.lg_lock); 5475 list_del_rcu(&pa->pa_node.lg_list); 5476 spin_unlock(pa->pa_node_lock.lg_lock); 5477 } else { 5478 write_lock(pa->pa_node_lock.inode_lock); 5479 ei = EXT4_I(pa->pa_inode); 5480 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5481 write_unlock(pa->pa_node_lock.inode_lock); 5482 } 5483 5484 list_del(&pa->u.pa_tmp_list); 5485 5486 if (pa->pa_type == MB_GROUP_PA) { 5487 ext4_mb_release_group_pa(&e4b, pa); 5488 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5489 } else { 5490 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5491 ext4_mb_pa_free(pa); 5492 } 5493 } 5494 5495 ext4_unlock_group(sb, group); 5496 ext4_mb_unload_buddy(&e4b); 5497 put_bh(bitmap_bh); 5498 out_dbg: 5499 mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", 5500 free, group, grp->bb_free); 5501 return free; 5502 } 5503 5504 /* 5505 * releases all non-used preallocated blocks for given inode 5506 * 5507 * It's important to discard preallocations under i_data_sem 5508 * We don't want another block to be served from the prealloc 5509 * space when we are discarding the inode prealloc space. 5510 * 5511 * FIXME!! Make sure it is valid at all the call sites 5512 */ 5513 void ext4_discard_preallocations(struct inode *inode) 5514 { 5515 struct ext4_inode_info *ei = EXT4_I(inode); 5516 struct super_block *sb = inode->i_sb; 5517 struct buffer_head *bitmap_bh = NULL; 5518 struct ext4_prealloc_space *pa, *tmp; 5519 ext4_group_t group = 0; 5520 LIST_HEAD(list); 5521 struct ext4_buddy e4b; 5522 struct rb_node *iter; 5523 int err; 5524 5525 if (!S_ISREG(inode->i_mode)) 5526 return; 5527 5528 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) 5529 return; 5530 5531 mb_debug(sb, "discard preallocation for inode %lu\n", 5532 inode->i_ino); 5533 trace_ext4_discard_preallocations(inode, 5534 atomic_read(&ei->i_prealloc_active)); 5535 5536 repeat: 5537 /* first, collect all pa's in the inode */ 5538 write_lock(&ei->i_prealloc_lock); 5539 for (iter = rb_first(&ei->i_prealloc_node); iter; 5540 iter = rb_next(iter)) { 5541 pa = rb_entry(iter, struct ext4_prealloc_space, 5542 pa_node.inode_node); 5543 BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock); 5544 5545 spin_lock(&pa->pa_lock); 5546 if (atomic_read(&pa->pa_count)) { 5547 /* this shouldn't happen often - nobody should 5548 * use preallocation while we're discarding it */ 5549 spin_unlock(&pa->pa_lock); 5550 write_unlock(&ei->i_prealloc_lock); 5551 ext4_msg(sb, KERN_ERR, 5552 "uh-oh! used pa while discarding"); 5553 WARN_ON(1); 5554 schedule_timeout_uninterruptible(HZ); 5555 goto repeat; 5556 5557 } 5558 if (pa->pa_deleted == 0) { 5559 ext4_mb_mark_pa_deleted(sb, pa); 5560 spin_unlock(&pa->pa_lock); 5561 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); 5562 list_add(&pa->u.pa_tmp_list, &list); 5563 continue; 5564 } 5565 5566 /* someone is deleting pa right now */ 5567 spin_unlock(&pa->pa_lock); 5568 write_unlock(&ei->i_prealloc_lock); 5569 5570 /* we have to wait here because pa_deleted 5571 * doesn't mean pa is already unlinked from 5572 * the list. as we might be called from 5573 * ->clear_inode() the inode will get freed 5574 * and concurrent thread which is unlinking 5575 * pa from inode's list may access already 5576 * freed memory, bad-bad-bad */ 5577 5578 /* XXX: if this happens too often, we can 5579 * add a flag to force wait only in case 5580 * of ->clear_inode(), but not in case of 5581 * regular truncate */ 5582 schedule_timeout_uninterruptible(HZ); 5583 goto repeat; 5584 } 5585 write_unlock(&ei->i_prealloc_lock); 5586 5587 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5588 BUG_ON(pa->pa_type != MB_INODE_PA); 5589 group = ext4_get_group_number(sb, pa->pa_pstart); 5590 5591 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 5592 GFP_NOFS|__GFP_NOFAIL); 5593 if (err) { 5594 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 5595 err, group); 5596 continue; 5597 } 5598 5599 bitmap_bh = ext4_read_block_bitmap(sb, group); 5600 if (IS_ERR(bitmap_bh)) { 5601 err = PTR_ERR(bitmap_bh); 5602 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", 5603 err, group); 5604 ext4_mb_unload_buddy(&e4b); 5605 continue; 5606 } 5607 5608 ext4_lock_group(sb, group); 5609 list_del(&pa->pa_group_list); 5610 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5611 ext4_unlock_group(sb, group); 5612 5613 ext4_mb_unload_buddy(&e4b); 5614 put_bh(bitmap_bh); 5615 5616 list_del(&pa->u.pa_tmp_list); 5617 ext4_mb_pa_free(pa); 5618 } 5619 } 5620 5621 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) 5622 { 5623 struct ext4_prealloc_space *pa; 5624 5625 BUG_ON(ext4_pspace_cachep == NULL); 5626 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); 5627 if (!pa) 5628 return -ENOMEM; 5629 atomic_set(&pa->pa_count, 1); 5630 ac->ac_pa = pa; 5631 return 0; 5632 } 5633 5634 static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac) 5635 { 5636 struct ext4_prealloc_space *pa = ac->ac_pa; 5637 5638 BUG_ON(!pa); 5639 ac->ac_pa = NULL; 5640 WARN_ON(!atomic_dec_and_test(&pa->pa_count)); 5641 /* 5642 * current function is only called due to an error or due to 5643 * len of found blocks < len of requested blocks hence the PA has not 5644 * been added to grp->bb_prealloc_list. So we don't need to lock it 5645 */ 5646 pa->pa_deleted = 1; 5647 ext4_mb_pa_free(pa); 5648 } 5649 5650 #ifdef CONFIG_EXT4_DEBUG 5651 static inline void ext4_mb_show_pa(struct super_block *sb) 5652 { 5653 ext4_group_t i, ngroups; 5654 5655 if (ext4_forced_shutdown(sb)) 5656 return; 5657 5658 ngroups = ext4_get_groups_count(sb); 5659 mb_debug(sb, "groups: "); 5660 for (i = 0; i < ngroups; i++) { 5661 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 5662 struct ext4_prealloc_space *pa; 5663 ext4_grpblk_t start; 5664 struct list_head *cur; 5665 5666 if (!grp) 5667 continue; 5668 ext4_lock_group(sb, i); 5669 list_for_each(cur, &grp->bb_prealloc_list) { 5670 pa = list_entry(cur, struct ext4_prealloc_space, 5671 pa_group_list); 5672 spin_lock(&pa->pa_lock); 5673 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5674 NULL, &start); 5675 spin_unlock(&pa->pa_lock); 5676 mb_debug(sb, "PA:%u:%d:%d\n", i, start, 5677 pa->pa_len); 5678 } 5679 ext4_unlock_group(sb, i); 5680 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, 5681 grp->bb_fragments); 5682 } 5683 } 5684 5685 static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5686 { 5687 struct super_block *sb = ac->ac_sb; 5688 5689 if (ext4_forced_shutdown(sb)) 5690 return; 5691 5692 mb_debug(sb, "Can't allocate:" 5693 " Allocation context details:"); 5694 mb_debug(sb, "status %u flags 0x%x", 5695 ac->ac_status, ac->ac_flags); 5696 mb_debug(sb, "orig %lu/%lu/%lu@%lu, " 5697 "goal %lu/%lu/%lu@%lu, " 5698 "best %lu/%lu/%lu@%lu cr %d", 5699 (unsigned long)ac->ac_o_ex.fe_group, 5700 (unsigned long)ac->ac_o_ex.fe_start, 5701 (unsigned long)ac->ac_o_ex.fe_len, 5702 (unsigned long)ac->ac_o_ex.fe_logical, 5703 (unsigned long)ac->ac_g_ex.fe_group, 5704 (unsigned long)ac->ac_g_ex.fe_start, 5705 (unsigned long)ac->ac_g_ex.fe_len, 5706 (unsigned long)ac->ac_g_ex.fe_logical, 5707 (unsigned long)ac->ac_b_ex.fe_group, 5708 (unsigned long)ac->ac_b_ex.fe_start, 5709 (unsigned long)ac->ac_b_ex.fe_len, 5710 (unsigned long)ac->ac_b_ex.fe_logical, 5711 (int)ac->ac_criteria); 5712 mb_debug(sb, "%u found", ac->ac_found); 5713 mb_debug(sb, "used pa: %s, ", ac->ac_pa ? "yes" : "no"); 5714 if (ac->ac_pa) 5715 mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ? 5716 "group pa" : "inode pa"); 5717 ext4_mb_show_pa(sb); 5718 } 5719 #else 5720 static inline void ext4_mb_show_pa(struct super_block *sb) 5721 { 5722 } 5723 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5724 { 5725 ext4_mb_show_pa(ac->ac_sb); 5726 } 5727 #endif 5728 5729 /* 5730 * We use locality group preallocation for small size file. The size of the 5731 * file is determined by the current size or the resulting size after 5732 * allocation which ever is larger 5733 * 5734 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 5735 */ 5736 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 5737 { 5738 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5739 int bsbits = ac->ac_sb->s_blocksize_bits; 5740 loff_t size, isize; 5741 bool inode_pa_eligible, group_pa_eligible; 5742 5743 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 5744 return; 5745 5746 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 5747 return; 5748 5749 group_pa_eligible = sbi->s_mb_group_prealloc > 0; 5750 inode_pa_eligible = true; 5751 size = extent_logical_end(sbi, &ac->ac_o_ex); 5752 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 5753 >> bsbits; 5754 5755 /* No point in using inode preallocation for closed files */ 5756 if ((size == isize) && !ext4_fs_is_busy(sbi) && 5757 !inode_is_open_for_write(ac->ac_inode)) 5758 inode_pa_eligible = false; 5759 5760 size = max(size, isize); 5761 /* Don't use group allocation for large files */ 5762 if (size > sbi->s_mb_stream_request) 5763 group_pa_eligible = false; 5764 5765 if (!group_pa_eligible) { 5766 if (inode_pa_eligible) 5767 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 5768 else 5769 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 5770 return; 5771 } 5772 5773 BUG_ON(ac->ac_lg != NULL); 5774 /* 5775 * locality group prealloc space are per cpu. The reason for having 5776 * per cpu locality group is to reduce the contention between block 5777 * request from multiple CPUs. 5778 */ 5779 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 5780 5781 /* we're going to use group allocation */ 5782 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 5783 5784 /* serialize all allocations in the group */ 5785 mutex_lock(&ac->ac_lg->lg_mutex); 5786 } 5787 5788 static noinline_for_stack void 5789 ext4_mb_initialize_context(struct ext4_allocation_context *ac, 5790 struct ext4_allocation_request *ar) 5791 { 5792 struct super_block *sb = ar->inode->i_sb; 5793 struct ext4_sb_info *sbi = EXT4_SB(sb); 5794 struct ext4_super_block *es = sbi->s_es; 5795 ext4_group_t group; 5796 unsigned int len; 5797 ext4_fsblk_t goal; 5798 ext4_grpblk_t block; 5799 5800 /* we can't allocate > group size */ 5801 len = ar->len; 5802 5803 /* just a dirty hack to filter too big requests */ 5804 if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 5805 len = EXT4_CLUSTERS_PER_GROUP(sb); 5806 5807 /* start searching from the goal */ 5808 goal = ar->goal; 5809 if (goal < le32_to_cpu(es->s_first_data_block) || 5810 goal >= ext4_blocks_count(es)) 5811 goal = le32_to_cpu(es->s_first_data_block); 5812 ext4_get_group_no_and_offset(sb, goal, &group, &block); 5813 5814 /* set up allocation goals */ 5815 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 5816 ac->ac_status = AC_STATUS_CONTINUE; 5817 ac->ac_sb = sb; 5818 ac->ac_inode = ar->inode; 5819 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 5820 ac->ac_o_ex.fe_group = group; 5821 ac->ac_o_ex.fe_start = block; 5822 ac->ac_o_ex.fe_len = len; 5823 ac->ac_g_ex = ac->ac_o_ex; 5824 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; 5825 ac->ac_flags = ar->flags; 5826 5827 /* we have to define context: we'll work with a file or 5828 * locality group. this is a policy, actually */ 5829 ext4_mb_group_or_file(ac); 5830 5831 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " 5832 "left: %u/%u, right %u/%u to %swritable\n", 5833 (unsigned) ar->len, (unsigned) ar->logical, 5834 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 5835 (unsigned) ar->lleft, (unsigned) ar->pleft, 5836 (unsigned) ar->lright, (unsigned) ar->pright, 5837 inode_is_open_for_write(ar->inode) ? "" : "non-"); 5838 } 5839 5840 static noinline_for_stack void 5841 ext4_mb_discard_lg_preallocations(struct super_block *sb, 5842 struct ext4_locality_group *lg, 5843 int order, int total_entries) 5844 { 5845 ext4_group_t group = 0; 5846 struct ext4_buddy e4b; 5847 LIST_HEAD(discard_list); 5848 struct ext4_prealloc_space *pa, *tmp; 5849 5850 mb_debug(sb, "discard locality group preallocation\n"); 5851 5852 spin_lock(&lg->lg_prealloc_lock); 5853 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 5854 pa_node.lg_list, 5855 lockdep_is_held(&lg->lg_prealloc_lock)) { 5856 spin_lock(&pa->pa_lock); 5857 if (atomic_read(&pa->pa_count)) { 5858 /* 5859 * This is the pa that we just used 5860 * for block allocation. So don't 5861 * free that 5862 */ 5863 spin_unlock(&pa->pa_lock); 5864 continue; 5865 } 5866 if (pa->pa_deleted) { 5867 spin_unlock(&pa->pa_lock); 5868 continue; 5869 } 5870 /* only lg prealloc space */ 5871 BUG_ON(pa->pa_type != MB_GROUP_PA); 5872 5873 /* seems this one can be freed ... */ 5874 ext4_mb_mark_pa_deleted(sb, pa); 5875 spin_unlock(&pa->pa_lock); 5876 5877 list_del_rcu(&pa->pa_node.lg_list); 5878 list_add(&pa->u.pa_tmp_list, &discard_list); 5879 5880 total_entries--; 5881 if (total_entries <= 5) { 5882 /* 5883 * we want to keep only 5 entries 5884 * allowing it to grow to 8. This 5885 * mak sure we don't call discard 5886 * soon for this list. 5887 */ 5888 break; 5889 } 5890 } 5891 spin_unlock(&lg->lg_prealloc_lock); 5892 5893 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 5894 int err; 5895 5896 group = ext4_get_group_number(sb, pa->pa_pstart); 5897 err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 5898 GFP_NOFS|__GFP_NOFAIL); 5899 if (err) { 5900 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 5901 err, group); 5902 continue; 5903 } 5904 ext4_lock_group(sb, group); 5905 list_del(&pa->pa_group_list); 5906 ext4_mb_release_group_pa(&e4b, pa); 5907 ext4_unlock_group(sb, group); 5908 5909 ext4_mb_unload_buddy(&e4b); 5910 list_del(&pa->u.pa_tmp_list); 5911 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5912 } 5913 } 5914 5915 /* 5916 * We have incremented pa_count. So it cannot be freed at this 5917 * point. Also we hold lg_mutex. So no parallel allocation is 5918 * possible from this lg. That means pa_free cannot be updated. 5919 * 5920 * A parallel ext4_mb_discard_group_preallocations is possible. 5921 * which can cause the lg_prealloc_list to be updated. 5922 */ 5923 5924 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 5925 { 5926 int order, added = 0, lg_prealloc_count = 1; 5927 struct super_block *sb = ac->ac_sb; 5928 struct ext4_locality_group *lg = ac->ac_lg; 5929 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 5930 5931 order = fls(pa->pa_free) - 1; 5932 if (order > PREALLOC_TB_SIZE - 1) 5933 /* The max size of hash table is PREALLOC_TB_SIZE */ 5934 order = PREALLOC_TB_SIZE - 1; 5935 /* Add the prealloc space to lg */ 5936 spin_lock(&lg->lg_prealloc_lock); 5937 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 5938 pa_node.lg_list, 5939 lockdep_is_held(&lg->lg_prealloc_lock)) { 5940 spin_lock(&tmp_pa->pa_lock); 5941 if (tmp_pa->pa_deleted) { 5942 spin_unlock(&tmp_pa->pa_lock); 5943 continue; 5944 } 5945 if (!added && pa->pa_free < tmp_pa->pa_free) { 5946 /* Add to the tail of the previous entry */ 5947 list_add_tail_rcu(&pa->pa_node.lg_list, 5948 &tmp_pa->pa_node.lg_list); 5949 added = 1; 5950 /* 5951 * we want to count the total 5952 * number of entries in the list 5953 */ 5954 } 5955 spin_unlock(&tmp_pa->pa_lock); 5956 lg_prealloc_count++; 5957 } 5958 if (!added) 5959 list_add_tail_rcu(&pa->pa_node.lg_list, 5960 &lg->lg_prealloc_list[order]); 5961 spin_unlock(&lg->lg_prealloc_lock); 5962 5963 /* Now trim the list to be not more than 8 elements */ 5964 if (lg_prealloc_count > 8) 5965 ext4_mb_discard_lg_preallocations(sb, lg, 5966 order, lg_prealloc_count); 5967 } 5968 5969 /* 5970 * release all resource we used in allocation 5971 */ 5972 static void ext4_mb_release_context(struct ext4_allocation_context *ac) 5973 { 5974 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5975 struct ext4_prealloc_space *pa = ac->ac_pa; 5976 if (pa) { 5977 if (pa->pa_type == MB_GROUP_PA) { 5978 /* see comment in ext4_mb_use_group_pa() */ 5979 spin_lock(&pa->pa_lock); 5980 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 5981 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 5982 pa->pa_free -= ac->ac_b_ex.fe_len; 5983 pa->pa_len -= ac->ac_b_ex.fe_len; 5984 spin_unlock(&pa->pa_lock); 5985 5986 /* 5987 * We want to add the pa to the right bucket. 5988 * Remove it from the list and while adding 5989 * make sure the list to which we are adding 5990 * doesn't grow big. 5991 */ 5992 if (likely(pa->pa_free)) { 5993 spin_lock(pa->pa_node_lock.lg_lock); 5994 list_del_rcu(&pa->pa_node.lg_list); 5995 spin_unlock(pa->pa_node_lock.lg_lock); 5996 ext4_mb_add_n_trim(ac); 5997 } 5998 } 5999 6000 ext4_mb_put_pa(ac, ac->ac_sb, pa); 6001 } 6002 if (ac->ac_bitmap_folio) 6003 folio_put(ac->ac_bitmap_folio); 6004 if (ac->ac_buddy_folio) 6005 folio_put(ac->ac_buddy_folio); 6006 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 6007 mutex_unlock(&ac->ac_lg->lg_mutex); 6008 ext4_mb_collect_stats(ac); 6009 } 6010 6011 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 6012 { 6013 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 6014 int ret; 6015 int freed = 0, busy = 0; 6016 int retry = 0; 6017 6018 trace_ext4_mb_discard_preallocations(sb, needed); 6019 6020 if (needed == 0) 6021 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 6022 repeat: 6023 for (i = 0; i < ngroups && needed > 0; i++) { 6024 ret = ext4_mb_discard_group_preallocations(sb, i, &busy); 6025 freed += ret; 6026 needed -= ret; 6027 cond_resched(); 6028 } 6029 6030 if (needed > 0 && busy && ++retry < 3) { 6031 busy = 0; 6032 goto repeat; 6033 } 6034 6035 return freed; 6036 } 6037 6038 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb, 6039 struct ext4_allocation_context *ac, u64 *seq) 6040 { 6041 int freed; 6042 u64 seq_retry = 0; 6043 bool ret = false; 6044 6045 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 6046 if (freed) { 6047 ret = true; 6048 goto out_dbg; 6049 } 6050 seq_retry = ext4_get_discard_pa_seq_sum(); 6051 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { 6052 ac->ac_flags |= EXT4_MB_STRICT_CHECK; 6053 *seq = seq_retry; 6054 ret = true; 6055 } 6056 6057 out_dbg: 6058 mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no"); 6059 return ret; 6060 } 6061 6062 /* 6063 * Simple allocator for Ext4 fast commit replay path. It searches for blocks 6064 * linearly starting at the goal block and also excludes the blocks which 6065 * are going to be in use after fast commit replay. 6066 */ 6067 static ext4_fsblk_t 6068 ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp) 6069 { 6070 struct buffer_head *bitmap_bh; 6071 struct super_block *sb = ar->inode->i_sb; 6072 struct ext4_sb_info *sbi = EXT4_SB(sb); 6073 ext4_group_t group, nr; 6074 ext4_grpblk_t blkoff; 6075 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 6076 ext4_grpblk_t i = 0; 6077 ext4_fsblk_t goal, block; 6078 struct ext4_super_block *es = sbi->s_es; 6079 6080 goal = ar->goal; 6081 if (goal < le32_to_cpu(es->s_first_data_block) || 6082 goal >= ext4_blocks_count(es)) 6083 goal = le32_to_cpu(es->s_first_data_block); 6084 6085 ar->len = 0; 6086 ext4_get_group_no_and_offset(sb, goal, &group, &blkoff); 6087 for (nr = ext4_get_groups_count(sb); nr > 0; nr--) { 6088 bitmap_bh = ext4_read_block_bitmap(sb, group); 6089 if (IS_ERR(bitmap_bh)) { 6090 *errp = PTR_ERR(bitmap_bh); 6091 pr_warn("Failed to read block bitmap\n"); 6092 return 0; 6093 } 6094 6095 while (1) { 6096 i = mb_find_next_zero_bit(bitmap_bh->b_data, max, 6097 blkoff); 6098 if (i >= max) 6099 break; 6100 if (ext4_fc_replay_check_excluded(sb, 6101 ext4_group_first_block_no(sb, group) + 6102 EXT4_C2B(sbi, i))) { 6103 blkoff = i + 1; 6104 } else 6105 break; 6106 } 6107 brelse(bitmap_bh); 6108 if (i < max) 6109 break; 6110 6111 if (++group >= ext4_get_groups_count(sb)) 6112 group = 0; 6113 6114 blkoff = 0; 6115 } 6116 6117 if (i >= max) { 6118 *errp = -ENOSPC; 6119 return 0; 6120 } 6121 6122 block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i); 6123 ext4_mb_mark_bb(sb, block, 1, true); 6124 ar->len = 1; 6125 6126 *errp = 0; 6127 return block; 6128 } 6129 6130 /* 6131 * Main entry point into mballoc to allocate blocks 6132 * it tries to use preallocation first, then falls back 6133 * to usual allocation 6134 */ 6135 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 6136 struct ext4_allocation_request *ar, int *errp) 6137 { 6138 struct ext4_allocation_context *ac = NULL; 6139 struct ext4_sb_info *sbi; 6140 struct super_block *sb; 6141 ext4_fsblk_t block = 0; 6142 unsigned int inquota = 0; 6143 unsigned int reserv_clstrs = 0; 6144 int retries = 0; 6145 u64 seq; 6146 6147 might_sleep(); 6148 sb = ar->inode->i_sb; 6149 sbi = EXT4_SB(sb); 6150 6151 trace_ext4_request_blocks(ar); 6152 if (sbi->s_mount_state & EXT4_FC_REPLAY) 6153 return ext4_mb_new_blocks_simple(ar, errp); 6154 6155 /* Allow to use superuser reservation for quota file */ 6156 if (ext4_is_quota_file(ar->inode)) 6157 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 6158 6159 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 6160 /* Without delayed allocation we need to verify 6161 * there is enough free blocks to do block allocation 6162 * and verify allocation doesn't exceed the quota limits. 6163 */ 6164 while (ar->len && 6165 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 6166 6167 /* let others to free the space */ 6168 cond_resched(); 6169 ar->len = ar->len >> 1; 6170 } 6171 if (!ar->len) { 6172 ext4_mb_show_pa(sb); 6173 *errp = -ENOSPC; 6174 return 0; 6175 } 6176 reserv_clstrs = ar->len; 6177 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 6178 dquot_alloc_block_nofail(ar->inode, 6179 EXT4_C2B(sbi, ar->len)); 6180 } else { 6181 while (ar->len && 6182 dquot_alloc_block(ar->inode, 6183 EXT4_C2B(sbi, ar->len))) { 6184 6185 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 6186 ar->len--; 6187 } 6188 } 6189 inquota = ar->len; 6190 if (ar->len == 0) { 6191 *errp = -EDQUOT; 6192 goto out; 6193 } 6194 } 6195 6196 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 6197 if (!ac) { 6198 ar->len = 0; 6199 *errp = -ENOMEM; 6200 goto out; 6201 } 6202 6203 ext4_mb_initialize_context(ac, ar); 6204 6205 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 6206 seq = this_cpu_read(discard_pa_seq); 6207 if (!ext4_mb_use_preallocated(ac)) { 6208 ac->ac_op = EXT4_MB_HISTORY_ALLOC; 6209 ext4_mb_normalize_request(ac, ar); 6210 6211 *errp = ext4_mb_pa_alloc(ac); 6212 if (*errp) 6213 goto errout; 6214 repeat: 6215 /* allocate space in core */ 6216 *errp = ext4_mb_regular_allocator(ac); 6217 /* 6218 * pa allocated above is added to grp->bb_prealloc_list only 6219 * when we were able to allocate some block i.e. when 6220 * ac->ac_status == AC_STATUS_FOUND. 6221 * And error from above mean ac->ac_status != AC_STATUS_FOUND 6222 * So we have to free this pa here itself. 6223 */ 6224 if (*errp) { 6225 ext4_mb_pa_put_free(ac); 6226 ext4_discard_allocated_blocks(ac); 6227 goto errout; 6228 } 6229 if (ac->ac_status == AC_STATUS_FOUND && 6230 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) 6231 ext4_mb_pa_put_free(ac); 6232 } 6233 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 6234 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 6235 if (*errp) { 6236 ext4_discard_allocated_blocks(ac); 6237 goto errout; 6238 } else { 6239 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 6240 ar->len = ac->ac_b_ex.fe_len; 6241 } 6242 } else { 6243 if (++retries < 3 && 6244 ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) 6245 goto repeat; 6246 /* 6247 * If block allocation fails then the pa allocated above 6248 * needs to be freed here itself. 6249 */ 6250 ext4_mb_pa_put_free(ac); 6251 *errp = -ENOSPC; 6252 } 6253 6254 if (*errp) { 6255 errout: 6256 ac->ac_b_ex.fe_len = 0; 6257 ar->len = 0; 6258 ext4_mb_show_ac(ac); 6259 } 6260 ext4_mb_release_context(ac); 6261 kmem_cache_free(ext4_ac_cachep, ac); 6262 out: 6263 if (inquota && ar->len < inquota) 6264 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 6265 if (!ar->len) { 6266 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) 6267 /* release all the reserved blocks if non delalloc */ 6268 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 6269 reserv_clstrs); 6270 } 6271 6272 trace_ext4_allocate_blocks(ar, (unsigned long long)block); 6273 6274 return block; 6275 } 6276 6277 /* 6278 * We can merge two free data extents only if the physical blocks 6279 * are contiguous, AND the extents were freed by the same transaction, 6280 * AND the blocks are associated with the same group. 6281 */ 6282 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi, 6283 struct ext4_free_data *entry, 6284 struct ext4_free_data *new_entry, 6285 struct rb_root *entry_rb_root) 6286 { 6287 if ((entry->efd_tid != new_entry->efd_tid) || 6288 (entry->efd_group != new_entry->efd_group)) 6289 return; 6290 if (entry->efd_start_cluster + entry->efd_count == 6291 new_entry->efd_start_cluster) { 6292 new_entry->efd_start_cluster = entry->efd_start_cluster; 6293 new_entry->efd_count += entry->efd_count; 6294 } else if (new_entry->efd_start_cluster + new_entry->efd_count == 6295 entry->efd_start_cluster) { 6296 new_entry->efd_count += entry->efd_count; 6297 } else 6298 return; 6299 spin_lock(&sbi->s_md_lock); 6300 list_del(&entry->efd_list); 6301 spin_unlock(&sbi->s_md_lock); 6302 rb_erase(&entry->efd_node, entry_rb_root); 6303 kmem_cache_free(ext4_free_data_cachep, entry); 6304 } 6305 6306 static noinline_for_stack void 6307 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 6308 struct ext4_free_data *new_entry) 6309 { 6310 ext4_group_t group = e4b->bd_group; 6311 ext4_grpblk_t cluster; 6312 ext4_grpblk_t clusters = new_entry->efd_count; 6313 struct ext4_free_data *entry; 6314 struct ext4_group_info *db = e4b->bd_info; 6315 struct super_block *sb = e4b->bd_sb; 6316 struct ext4_sb_info *sbi = EXT4_SB(sb); 6317 struct rb_node **n = &db->bb_free_root.rb_node, *node; 6318 struct rb_node *parent = NULL, *new_node; 6319 6320 BUG_ON(!ext4_handle_valid(handle)); 6321 BUG_ON(e4b->bd_bitmap_folio == NULL); 6322 BUG_ON(e4b->bd_buddy_folio == NULL); 6323 6324 new_node = &new_entry->efd_node; 6325 cluster = new_entry->efd_start_cluster; 6326 6327 if (!*n) { 6328 /* first free block exent. We need to 6329 protect buddy cache from being freed, 6330 * otherwise we'll refresh it from 6331 * on-disk bitmap and lose not-yet-available 6332 * blocks */ 6333 folio_get(e4b->bd_buddy_folio); 6334 folio_get(e4b->bd_bitmap_folio); 6335 } 6336 while (*n) { 6337 parent = *n; 6338 entry = rb_entry(parent, struct ext4_free_data, efd_node); 6339 if (cluster < entry->efd_start_cluster) 6340 n = &(*n)->rb_left; 6341 else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 6342 n = &(*n)->rb_right; 6343 else { 6344 ext4_grp_locked_error(sb, group, 0, 6345 ext4_group_first_block_no(sb, group) + 6346 EXT4_C2B(sbi, cluster), 6347 "Block already on to-be-freed list"); 6348 kmem_cache_free(ext4_free_data_cachep, new_entry); 6349 return; 6350 } 6351 } 6352 6353 rb_link_node(new_node, parent, n); 6354 rb_insert_color(new_node, &db->bb_free_root); 6355 6356 /* Now try to see the extent can be merged to left and right */ 6357 node = rb_prev(new_node); 6358 if (node) { 6359 entry = rb_entry(node, struct ext4_free_data, efd_node); 6360 ext4_try_merge_freed_extent(sbi, entry, new_entry, 6361 &(db->bb_free_root)); 6362 } 6363 6364 node = rb_next(new_node); 6365 if (node) { 6366 entry = rb_entry(node, struct ext4_free_data, efd_node); 6367 ext4_try_merge_freed_extent(sbi, entry, new_entry, 6368 &(db->bb_free_root)); 6369 } 6370 6371 spin_lock(&sbi->s_md_lock); 6372 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list[new_entry->efd_tid & 1]); 6373 sbi->s_mb_free_pending += clusters; 6374 spin_unlock(&sbi->s_md_lock); 6375 } 6376 6377 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, 6378 unsigned long count) 6379 { 6380 struct super_block *sb = inode->i_sb; 6381 ext4_group_t group; 6382 ext4_grpblk_t blkoff; 6383 6384 ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 6385 ext4_mb_mark_context(NULL, sb, false, group, blkoff, count, 6386 EXT4_MB_BITMAP_MARKED_CHECK | 6387 EXT4_MB_SYNC_UPDATE, 6388 NULL); 6389 } 6390 6391 /** 6392 * ext4_mb_clear_bb() -- helper function for freeing blocks. 6393 * Used by ext4_free_blocks() 6394 * @handle: handle for this transaction 6395 * @inode: inode 6396 * @block: starting physical block to be freed 6397 * @count: number of blocks to be freed 6398 * @flags: flags used by ext4_free_blocks 6399 */ 6400 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, 6401 ext4_fsblk_t block, unsigned long count, 6402 int flags) 6403 { 6404 struct super_block *sb = inode->i_sb; 6405 struct ext4_group_info *grp; 6406 unsigned int overflow; 6407 ext4_grpblk_t bit; 6408 ext4_group_t block_group; 6409 struct ext4_sb_info *sbi; 6410 struct ext4_buddy e4b; 6411 unsigned int count_clusters; 6412 int err = 0; 6413 int mark_flags = 0; 6414 ext4_grpblk_t changed; 6415 6416 sbi = EXT4_SB(sb); 6417 6418 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6419 !ext4_inode_block_valid(inode, block, count)) { 6420 ext4_error(sb, "Freeing blocks in system zone - " 6421 "Block = %llu, count = %lu", block, count); 6422 /* err = 0. ext4_std_error should be a no op */ 6423 goto error_out; 6424 } 6425 flags |= EXT4_FREE_BLOCKS_VALIDATED; 6426 6427 do_more: 6428 overflow = 0; 6429 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 6430 6431 grp = ext4_get_group_info(sb, block_group); 6432 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 6433 return; 6434 6435 /* 6436 * Check to see if we are freeing blocks across a group 6437 * boundary. 6438 */ 6439 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 6440 overflow = EXT4_C2B(sbi, bit) + count - 6441 EXT4_BLOCKS_PER_GROUP(sb); 6442 count -= overflow; 6443 /* The range changed so it's no longer validated */ 6444 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6445 } 6446 count_clusters = EXT4_NUM_B2C(sbi, count); 6447 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 6448 6449 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ 6450 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, 6451 GFP_NOFS|__GFP_NOFAIL); 6452 if (err) 6453 goto error_out; 6454 6455 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6456 !ext4_inode_block_valid(inode, block, count)) { 6457 ext4_error(sb, "Freeing blocks in system zone - " 6458 "Block = %llu, count = %lu", block, count); 6459 /* err = 0. ext4_std_error should be a no op */ 6460 goto error_clean; 6461 } 6462 6463 #ifdef AGGRESSIVE_CHECK 6464 mark_flags |= EXT4_MB_BITMAP_MARKED_CHECK; 6465 #endif 6466 err = ext4_mb_mark_context(handle, sb, false, block_group, bit, 6467 count_clusters, mark_flags, &changed); 6468 6469 6470 if (err && changed == 0) 6471 goto error_clean; 6472 6473 #ifdef AGGRESSIVE_CHECK 6474 BUG_ON(changed != count_clusters); 6475 #endif 6476 6477 /* 6478 * We need to make sure we don't reuse the freed block until after the 6479 * transaction is committed. We make an exception if the inode is to be 6480 * written in writeback mode since writeback mode has weak data 6481 * consistency guarantees. 6482 */ 6483 if (ext4_handle_valid(handle) && 6484 ((flags & EXT4_FREE_BLOCKS_METADATA) || 6485 !ext4_should_writeback_data(inode))) { 6486 struct ext4_free_data *new_entry; 6487 /* 6488 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 6489 * to fail. 6490 */ 6491 new_entry = kmem_cache_alloc(ext4_free_data_cachep, 6492 GFP_NOFS|__GFP_NOFAIL); 6493 new_entry->efd_start_cluster = bit; 6494 new_entry->efd_group = block_group; 6495 new_entry->efd_count = count_clusters; 6496 new_entry->efd_tid = handle->h_transaction->t_tid; 6497 6498 ext4_lock_group(sb, block_group); 6499 ext4_mb_free_metadata(handle, &e4b, new_entry); 6500 } else { 6501 if (test_opt(sb, DISCARD)) { 6502 err = ext4_issue_discard(sb, block_group, bit, 6503 count_clusters); 6504 /* 6505 * Ignore EOPNOTSUPP error. This is consistent with 6506 * what happens when using journal. 6507 */ 6508 if (err == -EOPNOTSUPP) 6509 err = 0; 6510 if (err) 6511 ext4_msg(sb, KERN_WARNING, "discard request in" 6512 " group:%u block:%d count:%lu failed" 6513 " with %d", block_group, bit, count, 6514 err); 6515 } 6516 6517 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 6518 6519 ext4_lock_group(sb, block_group); 6520 mb_free_blocks(inode, &e4b, bit, count_clusters); 6521 } 6522 6523 ext4_unlock_group(sb, block_group); 6524 6525 /* 6526 * on a bigalloc file system, defer the s_freeclusters_counter 6527 * update to the caller (ext4_remove_space and friends) so they 6528 * can determine if a cluster freed here should be rereserved 6529 */ 6530 if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) { 6531 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 6532 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 6533 percpu_counter_add(&sbi->s_freeclusters_counter, 6534 count_clusters); 6535 } 6536 6537 if (overflow && !err) { 6538 block += count; 6539 count = overflow; 6540 ext4_mb_unload_buddy(&e4b); 6541 /* The range changed so it's no longer validated */ 6542 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6543 goto do_more; 6544 } 6545 6546 error_clean: 6547 ext4_mb_unload_buddy(&e4b); 6548 error_out: 6549 ext4_std_error(sb, err); 6550 } 6551 6552 /** 6553 * ext4_free_blocks() -- Free given blocks and update quota 6554 * @handle: handle for this transaction 6555 * @inode: inode 6556 * @bh: optional buffer of the block to be freed 6557 * @block: starting physical block to be freed 6558 * @count: number of blocks to be freed 6559 * @flags: flags used by ext4_free_blocks 6560 */ 6561 void ext4_free_blocks(handle_t *handle, struct inode *inode, 6562 struct buffer_head *bh, ext4_fsblk_t block, 6563 unsigned long count, int flags) 6564 { 6565 struct super_block *sb = inode->i_sb; 6566 unsigned int overflow; 6567 struct ext4_sb_info *sbi; 6568 6569 sbi = EXT4_SB(sb); 6570 6571 if (bh) { 6572 if (block) 6573 BUG_ON(block != bh->b_blocknr); 6574 else 6575 block = bh->b_blocknr; 6576 } 6577 6578 if (sbi->s_mount_state & EXT4_FC_REPLAY) { 6579 ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count)); 6580 return; 6581 } 6582 6583 might_sleep(); 6584 6585 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 6586 !ext4_inode_block_valid(inode, block, count)) { 6587 ext4_error(sb, "Freeing blocks not in datazone - " 6588 "block = %llu, count = %lu", block, count); 6589 return; 6590 } 6591 flags |= EXT4_FREE_BLOCKS_VALIDATED; 6592 6593 ext4_debug("freeing block %llu\n", block); 6594 trace_ext4_free_blocks(inode, block, count, flags); 6595 6596 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 6597 BUG_ON(count > 1); 6598 6599 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 6600 inode, bh, block); 6601 } 6602 6603 /* 6604 * If the extent to be freed does not begin on a cluster 6605 * boundary, we need to deal with partial clusters at the 6606 * beginning and end of the extent. Normally we will free 6607 * blocks at the beginning or the end unless we are explicitly 6608 * requested to avoid doing so. 6609 */ 6610 overflow = EXT4_PBLK_COFF(sbi, block); 6611 if (overflow) { 6612 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 6613 overflow = sbi->s_cluster_ratio - overflow; 6614 block += overflow; 6615 if (count > overflow) 6616 count -= overflow; 6617 else 6618 return; 6619 } else { 6620 block -= overflow; 6621 count += overflow; 6622 } 6623 /* The range changed so it's no longer validated */ 6624 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6625 } 6626 overflow = EXT4_LBLK_COFF(sbi, count); 6627 if (overflow) { 6628 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 6629 if (count > overflow) 6630 count -= overflow; 6631 else 6632 return; 6633 } else 6634 count += sbi->s_cluster_ratio - overflow; 6635 /* The range changed so it's no longer validated */ 6636 flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6637 } 6638 6639 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 6640 int i; 6641 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA; 6642 6643 for (i = 0; i < count; i++) { 6644 cond_resched(); 6645 if (is_metadata) 6646 bh = sb_find_get_block(inode->i_sb, block + i); 6647 ext4_forget(handle, is_metadata, inode, bh, block + i); 6648 } 6649 } 6650 6651 ext4_mb_clear_bb(handle, inode, block, count, flags); 6652 } 6653 6654 /** 6655 * ext4_group_add_blocks() -- Add given blocks to an existing group 6656 * @handle: handle to this transaction 6657 * @sb: super block 6658 * @block: start physical block to add to the block group 6659 * @count: number of blocks to free 6660 * 6661 * This marks the blocks as free in the bitmap and buddy. 6662 */ 6663 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 6664 ext4_fsblk_t block, unsigned long count) 6665 { 6666 ext4_group_t block_group; 6667 ext4_grpblk_t bit; 6668 struct ext4_sb_info *sbi = EXT4_SB(sb); 6669 struct ext4_buddy e4b; 6670 int err = 0; 6671 ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block); 6672 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); 6673 unsigned long cluster_count = last_cluster - first_cluster + 1; 6674 ext4_grpblk_t changed; 6675 6676 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 6677 6678 if (cluster_count == 0) 6679 return 0; 6680 6681 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 6682 /* 6683 * Check to see if we are freeing blocks across a group 6684 * boundary. 6685 */ 6686 if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) { 6687 ext4_warning(sb, "too many blocks added to group %u", 6688 block_group); 6689 err = -EINVAL; 6690 goto error_out; 6691 } 6692 6693 err = ext4_mb_load_buddy(sb, block_group, &e4b); 6694 if (err) 6695 goto error_out; 6696 6697 if (!ext4_sb_block_valid(sb, NULL, block, count)) { 6698 ext4_error(sb, "Adding blocks in system zones - " 6699 "Block = %llu, count = %lu", 6700 block, count); 6701 err = -EINVAL; 6702 goto error_clean; 6703 } 6704 6705 err = ext4_mb_mark_context(handle, sb, false, block_group, bit, 6706 cluster_count, EXT4_MB_BITMAP_MARKED_CHECK, 6707 &changed); 6708 if (err && changed == 0) 6709 goto error_clean; 6710 6711 if (changed != cluster_count) 6712 ext4_error(sb, "bit already cleared in group %u", block_group); 6713 6714 ext4_lock_group(sb, block_group); 6715 mb_free_blocks(NULL, &e4b, bit, cluster_count); 6716 ext4_unlock_group(sb, block_group); 6717 percpu_counter_add(&sbi->s_freeclusters_counter, 6718 changed); 6719 6720 error_clean: 6721 ext4_mb_unload_buddy(&e4b); 6722 error_out: 6723 ext4_std_error(sb, err); 6724 return err; 6725 } 6726 6727 /** 6728 * ext4_trim_extent -- function to TRIM one single free extent in the group 6729 * @sb: super block for the file system 6730 * @start: starting block of the free extent in the alloc. group 6731 * @count: number of blocks to TRIM 6732 * @e4b: ext4 buddy for the group 6733 * 6734 * Trim "count" blocks starting at "start" in the "group". To assure that no 6735 * one will allocate those blocks, mark it as used in buddy bitmap. This must 6736 * be called with under the group lock. 6737 */ 6738 static int ext4_trim_extent(struct super_block *sb, 6739 int start, int count, struct ext4_buddy *e4b) 6740 __releases(bitlock) 6741 __acquires(bitlock) 6742 { 6743 struct ext4_free_extent ex; 6744 ext4_group_t group = e4b->bd_group; 6745 int ret = 0; 6746 6747 trace_ext4_trim_extent(sb, group, start, count); 6748 6749 assert_spin_locked(ext4_group_lock_ptr(sb, group)); 6750 6751 ex.fe_start = start; 6752 ex.fe_group = group; 6753 ex.fe_len = count; 6754 6755 /* 6756 * Mark blocks used, so no one can reuse them while 6757 * being trimmed. 6758 */ 6759 mb_mark_used(e4b, &ex); 6760 ext4_unlock_group(sb, group); 6761 ret = ext4_issue_discard(sb, group, start, count); 6762 ext4_lock_group(sb, group); 6763 mb_free_blocks(NULL, e4b, start, ex.fe_len); 6764 return ret; 6765 } 6766 6767 static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb, 6768 ext4_group_t grp) 6769 { 6770 unsigned long nr_clusters_in_group; 6771 6772 if (grp < (ext4_get_groups_count(sb) - 1)) 6773 nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb); 6774 else 6775 nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) - 6776 ext4_group_first_block_no(sb, grp)) 6777 >> EXT4_CLUSTER_BITS(sb); 6778 6779 return nr_clusters_in_group - 1; 6780 } 6781 6782 static bool ext4_trim_interrupted(void) 6783 { 6784 return fatal_signal_pending(current) || freezing(current); 6785 } 6786 6787 static int ext4_try_to_trim_range(struct super_block *sb, 6788 struct ext4_buddy *e4b, ext4_grpblk_t start, 6789 ext4_grpblk_t max, ext4_grpblk_t minblocks) 6790 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) 6791 __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) 6792 { 6793 ext4_grpblk_t next, count, free_count, last, origin_start; 6794 bool set_trimmed = false; 6795 void *bitmap; 6796 6797 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 6798 return 0; 6799 6800 last = ext4_last_grp_cluster(sb, e4b->bd_group); 6801 bitmap = e4b->bd_bitmap; 6802 if (start == 0 && max >= last) 6803 set_trimmed = true; 6804 origin_start = start; 6805 start = max(e4b->bd_info->bb_first_free, start); 6806 count = 0; 6807 free_count = 0; 6808 6809 while (start <= max) { 6810 start = mb_find_next_zero_bit(bitmap, max + 1, start); 6811 if (start > max) 6812 break; 6813 6814 next = mb_find_next_bit(bitmap, last + 1, start); 6815 if (origin_start == 0 && next >= last) 6816 set_trimmed = true; 6817 6818 if ((next - start) >= minblocks) { 6819 int ret = ext4_trim_extent(sb, start, next - start, e4b); 6820 6821 if (ret && ret != -EOPNOTSUPP) 6822 return count; 6823 count += next - start; 6824 } 6825 free_count += next - start; 6826 start = next + 1; 6827 6828 if (ext4_trim_interrupted()) 6829 return count; 6830 6831 if (need_resched()) { 6832 ext4_unlock_group(sb, e4b->bd_group); 6833 cond_resched(); 6834 ext4_lock_group(sb, e4b->bd_group); 6835 } 6836 6837 if ((e4b->bd_info->bb_free - free_count) < minblocks) 6838 break; 6839 } 6840 6841 if (set_trimmed) 6842 EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info); 6843 6844 return count; 6845 } 6846 6847 /** 6848 * ext4_trim_all_free -- function to trim all free space in alloc. group 6849 * @sb: super block for file system 6850 * @group: group to be trimmed 6851 * @start: first group block to examine 6852 * @max: last group block to examine 6853 * @minblocks: minimum extent block count 6854 * 6855 * ext4_trim_all_free walks through group's block bitmap searching for free 6856 * extents. When the free extent is found, mark it as used in group buddy 6857 * bitmap. Then issue a TRIM command on this extent and free the extent in 6858 * the group buddy bitmap. 6859 */ 6860 static ext4_grpblk_t 6861 ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 6862 ext4_grpblk_t start, ext4_grpblk_t max, 6863 ext4_grpblk_t minblocks) 6864 { 6865 struct ext4_buddy e4b; 6866 int ret; 6867 6868 trace_ext4_trim_all_free(sb, group, start, max); 6869 6870 ret = ext4_mb_load_buddy(sb, group, &e4b); 6871 if (ret) { 6872 ext4_warning(sb, "Error %d loading buddy information for %u", 6873 ret, group); 6874 return ret; 6875 } 6876 6877 ext4_lock_group(sb, group); 6878 6879 if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) || 6880 minblocks < EXT4_SB(sb)->s_last_trim_minblks) 6881 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks); 6882 else 6883 ret = 0; 6884 6885 ext4_unlock_group(sb, group); 6886 ext4_mb_unload_buddy(&e4b); 6887 6888 ext4_debug("trimmed %d blocks in the group %d\n", 6889 ret, group); 6890 6891 return ret; 6892 } 6893 6894 /** 6895 * ext4_trim_fs() -- trim ioctl handle function 6896 * @sb: superblock for filesystem 6897 * @range: fstrim_range structure 6898 * 6899 * start: First Byte to trim 6900 * len: number of Bytes to trim from start 6901 * minlen: minimum extent length in Bytes 6902 * ext4_trim_fs goes through all allocation groups containing Bytes from 6903 * start to start+len. For each such a group ext4_trim_all_free function 6904 * is invoked to trim all free space. 6905 */ 6906 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 6907 { 6908 unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev); 6909 struct ext4_group_info *grp; 6910 ext4_group_t group, first_group, last_group; 6911 ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 6912 uint64_t start, end, minlen, trimmed = 0; 6913 ext4_fsblk_t first_data_blk = 6914 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 6915 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 6916 int ret = 0; 6917 6918 start = range->start >> sb->s_blocksize_bits; 6919 end = start + (range->len >> sb->s_blocksize_bits) - 1; 6920 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 6921 range->minlen >> sb->s_blocksize_bits); 6922 6923 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 6924 start >= max_blks || 6925 range->len < sb->s_blocksize) 6926 return -EINVAL; 6927 /* No point to try to trim less than discard granularity */ 6928 if (range->minlen < discard_granularity) { 6929 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 6930 discard_granularity >> sb->s_blocksize_bits); 6931 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) 6932 goto out; 6933 } 6934 if (end >= max_blks - 1) 6935 end = max_blks - 1; 6936 if (end <= first_data_blk) 6937 goto out; 6938 if (start < first_data_blk) 6939 start = first_data_blk; 6940 6941 /* Determine first and last group to examine based on start and end */ 6942 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 6943 &first_group, &first_cluster); 6944 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 6945 &last_group, &last_cluster); 6946 6947 /* end now represents the last cluster to discard in this group */ 6948 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 6949 6950 for (group = first_group; group <= last_group; group++) { 6951 if (ext4_trim_interrupted()) 6952 break; 6953 grp = ext4_get_group_info(sb, group); 6954 if (!grp) 6955 continue; 6956 /* We only do this if the grp has never been initialized */ 6957 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 6958 ret = ext4_mb_init_group(sb, group, GFP_NOFS); 6959 if (ret) 6960 break; 6961 } 6962 6963 /* 6964 * For all the groups except the last one, last cluster will 6965 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 6966 * change it for the last group, note that last_cluster is 6967 * already computed earlier by ext4_get_group_no_and_offset() 6968 */ 6969 if (group == last_group) 6970 end = last_cluster; 6971 if (grp->bb_free >= minlen) { 6972 cnt = ext4_trim_all_free(sb, group, first_cluster, 6973 end, minlen); 6974 if (cnt < 0) { 6975 ret = cnt; 6976 break; 6977 } 6978 trimmed += cnt; 6979 } 6980 6981 /* 6982 * For every group except the first one, we are sure 6983 * that the first cluster to discard will be cluster #0. 6984 */ 6985 first_cluster = 0; 6986 } 6987 6988 if (!ret) 6989 EXT4_SB(sb)->s_last_trim_minblks = minlen; 6990 6991 out: 6992 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 6993 return ret; 6994 } 6995 6996 /* Iterate all the free extents in the group. */ 6997 int 6998 ext4_mballoc_query_range( 6999 struct super_block *sb, 7000 ext4_group_t group, 7001 ext4_grpblk_t start, 7002 ext4_grpblk_t end, 7003 ext4_mballoc_query_range_fn formatter, 7004 void *priv) 7005 { 7006 void *bitmap; 7007 ext4_grpblk_t next; 7008 struct ext4_buddy e4b; 7009 int error; 7010 7011 error = ext4_mb_load_buddy(sb, group, &e4b); 7012 if (error) 7013 return error; 7014 bitmap = e4b.bd_bitmap; 7015 7016 ext4_lock_group(sb, group); 7017 7018 start = max(e4b.bd_info->bb_first_free, start); 7019 if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) 7020 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 7021 7022 while (start <= end) { 7023 start = mb_find_next_zero_bit(bitmap, end + 1, start); 7024 if (start > end) 7025 break; 7026 next = mb_find_next_bit(bitmap, end + 1, start); 7027 7028 ext4_unlock_group(sb, group); 7029 error = formatter(sb, group, start, next - start, priv); 7030 if (error) 7031 goto out_unload; 7032 ext4_lock_group(sb, group); 7033 7034 start = next + 1; 7035 } 7036 7037 ext4_unlock_group(sb, group); 7038 out_unload: 7039 ext4_mb_unload_buddy(&e4b); 7040 7041 return error; 7042 } 7043 7044 #ifdef CONFIG_EXT4_KUNIT_TESTS 7045 #include "mballoc-test.c" 7046 #endif 7047
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.