1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * Copyright (c) 2003-2006, Cluster File Syste 3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4 * Written by Alex Tomas <alex@clusterfs.com> 4 * Written by Alex Tomas <alex@clusterfs.com> 5 * 5 * 6 * Architecture independence: 6 * Architecture independence: 7 * Copyright (c) 2005, Bull S.A. 7 * Copyright (c) 2005, Bull S.A. 8 * Written by Pierre Peiffer <pierre.peiffer 8 * Written by Pierre Peiffer <pierre.peiffer@bull.net> 9 */ 9 */ 10 10 11 /* 11 /* 12 * Extents support for EXT4 12 * Extents support for EXT4 13 * 13 * 14 * TODO: 14 * TODO: 15 * - ext4*_error() should be used in some si 15 * - ext4*_error() should be used in some situations 16 * - analyze all BUG()/BUG_ON(), use -EIO wh 16 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 17 * - smart tree reduction 17 * - smart tree reduction 18 */ 18 */ 19 19 20 #include <linux/fs.h> 20 #include <linux/fs.h> 21 #include <linux/time.h> 21 #include <linux/time.h> 22 #include <linux/jbd2.h> 22 #include <linux/jbd2.h> 23 #include <linux/highuid.h> 23 #include <linux/highuid.h> 24 #include <linux/pagemap.h> 24 #include <linux/pagemap.h> 25 #include <linux/quotaops.h> 25 #include <linux/quotaops.h> 26 #include <linux/string.h> 26 #include <linux/string.h> 27 #include <linux/slab.h> 27 #include <linux/slab.h> 28 #include <linux/uaccess.h> 28 #include <linux/uaccess.h> 29 #include <linux/fiemap.h> 29 #include <linux/fiemap.h> 30 #include <linux/iomap.h> 30 #include <linux/iomap.h> 31 #include <linux/sched/mm.h> 31 #include <linux/sched/mm.h> 32 #include "ext4_jbd2.h" 32 #include "ext4_jbd2.h" 33 #include "ext4_extents.h" 33 #include "ext4_extents.h" 34 #include "xattr.h" 34 #include "xattr.h" 35 35 36 #include <trace/events/ext4.h> 36 #include <trace/events/ext4.h> 37 37 38 /* 38 /* 39 * used by extent splitting. 39 * used by extent splitting. 40 */ 40 */ 41 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe t 41 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 42 due to 42 due to ENOSPC */ 43 #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark f 43 #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */ 44 #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark s 44 #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */ 45 45 46 #define EXT4_EXT_DATA_VALID1 0x8 /* first 46 #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ 47 #define EXT4_EXT_DATA_VALID2 0x10 /* second 47 #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ 48 48 49 static __le32 ext4_extent_block_csum(struct in 49 static __le32 ext4_extent_block_csum(struct inode *inode, 50 struct ex 50 struct ext4_extent_header *eh) 51 { 51 { 52 struct ext4_inode_info *ei = EXT4_I(in 52 struct ext4_inode_info *ei = EXT4_I(inode); 53 struct ext4_sb_info *sbi = EXT4_SB(ino 53 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 54 __u32 csum; 54 __u32 csum; 55 55 56 csum = ext4_chksum(sbi, ei->i_csum_see 56 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, 57 EXT4_EXTENT_TAIL_OF 57 EXT4_EXTENT_TAIL_OFFSET(eh)); 58 return cpu_to_le32(csum); 58 return cpu_to_le32(csum); 59 } 59 } 60 60 61 static int ext4_extent_block_csum_verify(struc 61 static int ext4_extent_block_csum_verify(struct inode *inode, 62 struc 62 struct ext4_extent_header *eh) 63 { 63 { 64 struct ext4_extent_tail *et; 64 struct ext4_extent_tail *et; 65 65 66 if (!ext4_has_metadata_csum(inode->i_s 66 if (!ext4_has_metadata_csum(inode->i_sb)) 67 return 1; 67 return 1; 68 68 69 et = find_ext4_extent_tail(eh); 69 et = find_ext4_extent_tail(eh); 70 if (et->et_checksum != ext4_extent_blo 70 if (et->et_checksum != ext4_extent_block_csum(inode, eh)) 71 return 0; 71 return 0; 72 return 1; 72 return 1; 73 } 73 } 74 74 75 static void ext4_extent_block_csum_set(struct 75 static void ext4_extent_block_csum_set(struct inode *inode, 76 struct 76 struct ext4_extent_header *eh) 77 { 77 { 78 struct ext4_extent_tail *et; 78 struct ext4_extent_tail *et; 79 79 80 if (!ext4_has_metadata_csum(inode->i_s 80 if (!ext4_has_metadata_csum(inode->i_sb)) 81 return; 81 return; 82 82 83 et = find_ext4_extent_tail(eh); 83 et = find_ext4_extent_tail(eh); 84 et->et_checksum = ext4_extent_block_cs 84 et->et_checksum = ext4_extent_block_csum(inode, eh); 85 } 85 } 86 86 87 static struct ext4_ext_path *ext4_split_extent !! 87 static int ext4_split_extent_at(handle_t *handle, 88 !! 88 struct inode *inode, 89 !! 89 struct ext4_ext_path **ppath, 90 !! 90 ext4_lblk_t split, 91 !! 91 int split_flag, >> 92 int flags); 92 93 93 static int ext4_ext_trunc_restart_fn(struct in 94 static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped) 94 { 95 { 95 /* 96 /* 96 * Drop i_data_sem to avoid deadlock w 97 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 97 * moment, get_block can be called onl 98 * moment, get_block can be called only for blocks inside i_size since 98 * page cache has been already dropped 99 * page cache has been already dropped and writes are blocked by 99 * i_rwsem. So we can safely drop the 100 * i_rwsem. So we can safely drop the i_data_sem here. 100 */ 101 */ 101 BUG_ON(EXT4_JOURNAL(inode) == NULL); 102 BUG_ON(EXT4_JOURNAL(inode) == NULL); 102 ext4_discard_preallocations(inode); !! 103 ext4_discard_preallocations(inode, 0); 103 up_write(&EXT4_I(inode)->i_data_sem); 104 up_write(&EXT4_I(inode)->i_data_sem); 104 *dropped = 1; 105 *dropped = 1; 105 return 0; 106 return 0; 106 } 107 } 107 108 108 static inline void ext4_ext_path_brelse(struct << 109 { << 110 brelse(path->p_bh); << 111 path->p_bh = NULL; << 112 } << 113 << 114 static void ext4_ext_drop_refs(struct ext4_ext 109 static void ext4_ext_drop_refs(struct ext4_ext_path *path) 115 { 110 { 116 int depth, i; 111 int depth, i; 117 112 118 if (IS_ERR_OR_NULL(path)) !! 113 if (!path) 119 return; 114 return; 120 depth = path->p_depth; 115 depth = path->p_depth; 121 for (i = 0; i <= depth; i++, path++) !! 116 for (i = 0; i <= depth; i++, path++) { 122 ext4_ext_path_brelse(path); !! 117 brelse(path->p_bh); >> 118 path->p_bh = NULL; >> 119 } 123 } 120 } 124 121 125 void ext4_free_ext_path(struct ext4_ext_path * 122 void ext4_free_ext_path(struct ext4_ext_path *path) 126 { 123 { 127 if (IS_ERR_OR_NULL(path)) << 128 return; << 129 ext4_ext_drop_refs(path); 124 ext4_ext_drop_refs(path); 130 kfree(path); 125 kfree(path); 131 } 126 } 132 127 133 /* 128 /* 134 * Make sure 'handle' has at least 'check_cred 129 * Make sure 'handle' has at least 'check_cred' credits. If not, restart 135 * transaction with 'restart_cred' credits. Th 130 * transaction with 'restart_cred' credits. The function drops i_data_sem 136 * when restarting transaction and gets it aft 131 * when restarting transaction and gets it after transaction is restarted. 137 * 132 * 138 * The function returns 0 on success, 1 if tra 133 * The function returns 0 on success, 1 if transaction had to be restarted, 139 * and < 0 in case of fatal error. 134 * and < 0 in case of fatal error. 140 */ 135 */ 141 int ext4_datasem_ensure_credits(handle_t *hand 136 int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode, 142 int check_cred 137 int check_cred, int restart_cred, 143 int revoke_cre 138 int revoke_cred) 144 { 139 { 145 int ret; 140 int ret; 146 int dropped = 0; 141 int dropped = 0; 147 142 148 ret = ext4_journal_ensure_credits_fn(h 143 ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred, 149 revoke_cred, ext4_ext_trunc_re 144 revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped)); 150 if (dropped) 145 if (dropped) 151 down_write(&EXT4_I(inode)->i_d 146 down_write(&EXT4_I(inode)->i_data_sem); 152 return ret; 147 return ret; 153 } 148 } 154 149 155 /* 150 /* 156 * could return: 151 * could return: 157 * - EROFS 152 * - EROFS 158 * - ENOMEM 153 * - ENOMEM 159 */ 154 */ 160 static int ext4_ext_get_access(handle_t *handl 155 static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 161 struct ext4_ex 156 struct ext4_ext_path *path) 162 { 157 { 163 int err = 0; 158 int err = 0; 164 159 165 if (path->p_bh) { 160 if (path->p_bh) { 166 /* path points to block */ 161 /* path points to block */ 167 BUFFER_TRACE(path->p_bh, "get_ 162 BUFFER_TRACE(path->p_bh, "get_write_access"); 168 err = ext4_journal_get_write_a 163 err = ext4_journal_get_write_access(handle, inode->i_sb, 169 164 path->p_bh, EXT4_JTR_NONE); 170 /* 165 /* 171 * The extent buffer's verifie 166 * The extent buffer's verified bit will be set again in 172 * __ext4_ext_dirty(). We coul 167 * __ext4_ext_dirty(). We could leave an inconsistent 173 * buffer if the extents updat 168 * buffer if the extents updating procudure break off du 174 * to some error happens, forc 169 * to some error happens, force to check it again. 175 */ 170 */ 176 if (!err) 171 if (!err) 177 clear_buffer_verified( 172 clear_buffer_verified(path->p_bh); 178 } 173 } 179 /* path points to leaf/index in inode 174 /* path points to leaf/index in inode body */ 180 /* we use in-core data, no need to pro 175 /* we use in-core data, no need to protect them */ 181 return err; 176 return err; 182 } 177 } 183 178 184 /* 179 /* 185 * could return: 180 * could return: 186 * - EROFS 181 * - EROFS 187 * - ENOMEM 182 * - ENOMEM 188 * - EIO 183 * - EIO 189 */ 184 */ 190 static int __ext4_ext_dirty(const char *where, 185 static int __ext4_ext_dirty(const char *where, unsigned int line, 191 handle_t *handle, 186 handle_t *handle, struct inode *inode, 192 struct ext4_ext_pa 187 struct ext4_ext_path *path) 193 { 188 { 194 int err; 189 int err; 195 190 196 WARN_ON(!rwsem_is_locked(&EXT4_I(inode 191 WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); 197 if (path->p_bh) { 192 if (path->p_bh) { 198 ext4_extent_block_csum_set(ino 193 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); 199 /* path points to block */ 194 /* path points to block */ 200 err = __ext4_handle_dirty_meta 195 err = __ext4_handle_dirty_metadata(where, line, handle, 201 196 inode, path->p_bh); 202 /* Extents updating done, re-s 197 /* Extents updating done, re-set verified flag */ 203 if (!err) 198 if (!err) 204 set_buffer_verified(pa 199 set_buffer_verified(path->p_bh); 205 } else { 200 } else { 206 /* path points to leaf/index i 201 /* path points to leaf/index in inode body */ 207 err = ext4_mark_inode_dirty(ha 202 err = ext4_mark_inode_dirty(handle, inode); 208 } 203 } 209 return err; 204 return err; 210 } 205 } 211 206 212 #define ext4_ext_dirty(handle, inode, path) \ 207 #define ext4_ext_dirty(handle, inode, path) \ 213 __ext4_ext_dirty(__func__, __L 208 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) 214 209 215 static ext4_fsblk_t ext4_ext_find_goal(struct 210 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 216 struct ext4_ext_ 211 struct ext4_ext_path *path, 217 ext4_lblk_t bloc 212 ext4_lblk_t block) 218 { 213 { 219 if (path) { 214 if (path) { 220 int depth = path->p_depth; 215 int depth = path->p_depth; 221 struct ext4_extent *ex; 216 struct ext4_extent *ex; 222 217 223 /* 218 /* 224 * Try to predict block placem 219 * Try to predict block placement assuming that we are 225 * filling in a file which wil 220 * filling in a file which will eventually be 226 * non-sparse --- i.e., in the 221 * non-sparse --- i.e., in the case of libbfd writing 227 * an ELF object sections out- 222 * an ELF object sections out-of-order but in a way 228 * the eventually results in a 223 * the eventually results in a contiguous object or 229 * executable file, or some da 224 * executable file, or some database extending a table 230 * space file. However, this 225 * space file. However, this is actually somewhat 231 * non-ideal if we are writing 226 * non-ideal if we are writing a sparse file such as 232 * qemu or KVM writing a raw i 227 * qemu or KVM writing a raw image file that is going 233 * to stay fairly sparse, sinc 228 * to stay fairly sparse, since it will end up 234 * fragmenting the file system 229 * fragmenting the file system's free space. Maybe we 235 * should have some hueristics 230 * should have some hueristics or some way to allow 236 * userspace to pass a hint to 231 * userspace to pass a hint to file system, 237 * especially if the latter ca 232 * especially if the latter case turns out to be 238 * common. 233 * common. 239 */ 234 */ 240 ex = path[depth].p_ext; 235 ex = path[depth].p_ext; 241 if (ex) { 236 if (ex) { 242 ext4_fsblk_t ext_pblk 237 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 243 ext4_lblk_t ext_block 238 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 244 239 245 if (block > ext_block) 240 if (block > ext_block) 246 return ext_pbl 241 return ext_pblk + (block - ext_block); 247 else 242 else 248 return ext_pbl 243 return ext_pblk - (ext_block - block); 249 } 244 } 250 245 251 /* it looks like index is empt 246 /* it looks like index is empty; 252 * try to find starting block 247 * try to find starting block from index itself */ 253 if (path[depth].p_bh) 248 if (path[depth].p_bh) 254 return path[depth].p_b 249 return path[depth].p_bh->b_blocknr; 255 } 250 } 256 251 257 /* OK. use inode's group */ 252 /* OK. use inode's group */ 258 return ext4_inode_to_goal_block(inode) 253 return ext4_inode_to_goal_block(inode); 259 } 254 } 260 255 261 /* 256 /* 262 * Allocation for a meta data block 257 * Allocation for a meta data block 263 */ 258 */ 264 static ext4_fsblk_t 259 static ext4_fsblk_t 265 ext4_ext_new_meta_block(handle_t *handle, stru 260 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 266 struct ext4_ext_path * 261 struct ext4_ext_path *path, 267 struct ext4_extent *ex 262 struct ext4_extent *ex, int *err, unsigned int flags) 268 { 263 { 269 ext4_fsblk_t goal, newblock; 264 ext4_fsblk_t goal, newblock; 270 265 271 goal = ext4_ext_find_goal(inode, path, 266 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 272 newblock = ext4_new_meta_blocks(handle 267 newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 273 NULL, 268 NULL, err); 274 return newblock; 269 return newblock; 275 } 270 } 276 271 277 static inline int ext4_ext_space_block(struct 272 static inline int ext4_ext_space_block(struct inode *inode, int check) 278 { 273 { 279 int size; 274 int size; 280 275 281 size = (inode->i_sb->s_blocksize - siz 276 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 282 / sizeof(struct ext4_e 277 / sizeof(struct ext4_extent); 283 #ifdef AGGRESSIVE_TEST 278 #ifdef AGGRESSIVE_TEST 284 if (!check && size > 6) 279 if (!check && size > 6) 285 size = 6; 280 size = 6; 286 #endif 281 #endif 287 return size; 282 return size; 288 } 283 } 289 284 290 static inline int ext4_ext_space_block_idx(str 285 static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 291 { 286 { 292 int size; 287 int size; 293 288 294 size = (inode->i_sb->s_blocksize - siz 289 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 295 / sizeof(struct ext4_e 290 / sizeof(struct ext4_extent_idx); 296 #ifdef AGGRESSIVE_TEST 291 #ifdef AGGRESSIVE_TEST 297 if (!check && size > 5) 292 if (!check && size > 5) 298 size = 5; 293 size = 5; 299 #endif 294 #endif 300 return size; 295 return size; 301 } 296 } 302 297 303 static inline int ext4_ext_space_root(struct i 298 static inline int ext4_ext_space_root(struct inode *inode, int check) 304 { 299 { 305 int size; 300 int size; 306 301 307 size = sizeof(EXT4_I(inode)->i_data); 302 size = sizeof(EXT4_I(inode)->i_data); 308 size -= sizeof(struct ext4_extent_head 303 size -= sizeof(struct ext4_extent_header); 309 size /= sizeof(struct ext4_extent); 304 size /= sizeof(struct ext4_extent); 310 #ifdef AGGRESSIVE_TEST 305 #ifdef AGGRESSIVE_TEST 311 if (!check && size > 3) 306 if (!check && size > 3) 312 size = 3; 307 size = 3; 313 #endif 308 #endif 314 return size; 309 return size; 315 } 310 } 316 311 317 static inline int ext4_ext_space_root_idx(stru 312 static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 318 { 313 { 319 int size; 314 int size; 320 315 321 size = sizeof(EXT4_I(inode)->i_data); 316 size = sizeof(EXT4_I(inode)->i_data); 322 size -= sizeof(struct ext4_extent_head 317 size -= sizeof(struct ext4_extent_header); 323 size /= sizeof(struct ext4_extent_idx) 318 size /= sizeof(struct ext4_extent_idx); 324 #ifdef AGGRESSIVE_TEST 319 #ifdef AGGRESSIVE_TEST 325 if (!check && size > 4) 320 if (!check && size > 4) 326 size = 4; 321 size = 4; 327 #endif 322 #endif 328 return size; 323 return size; 329 } 324 } 330 325 331 static inline struct ext4_ext_path * !! 326 static inline int 332 ext4_force_split_extent_at(handle_t *handle, s 327 ext4_force_split_extent_at(handle_t *handle, struct inode *inode, 333 struct ext4_ext_pat !! 328 struct ext4_ext_path **ppath, ext4_lblk_t lblk, 334 int nofail) 329 int nofail) 335 { 330 { >> 331 struct ext4_ext_path *path = *ppath; 336 int unwritten = ext4_ext_is_unwritten( 332 int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext); 337 int flags = EXT4_EX_NOCACHE | EXT4_GET 333 int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO; 338 334 339 if (nofail) 335 if (nofail) 340 flags |= EXT4_GET_BLOCKS_METAD 336 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL; 341 337 342 return ext4_split_extent_at(handle, in !! 338 return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ? 343 EXT4_EXT_MARK_UNWRIT1| 339 EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0, 344 flags); 340 flags); 345 } 341 } 346 342 347 static int 343 static int 348 ext4_ext_max_entries(struct inode *inode, int 344 ext4_ext_max_entries(struct inode *inode, int depth) 349 { 345 { 350 int max; 346 int max; 351 347 352 if (depth == ext_depth(inode)) { 348 if (depth == ext_depth(inode)) { 353 if (depth == 0) 349 if (depth == 0) 354 max = ext4_ext_space_r 350 max = ext4_ext_space_root(inode, 1); 355 else 351 else 356 max = ext4_ext_space_r 352 max = ext4_ext_space_root_idx(inode, 1); 357 } else { 353 } else { 358 if (depth == 0) 354 if (depth == 0) 359 max = ext4_ext_space_b 355 max = ext4_ext_space_block(inode, 1); 360 else 356 else 361 max = ext4_ext_space_b 357 max = ext4_ext_space_block_idx(inode, 1); 362 } 358 } 363 359 364 return max; 360 return max; 365 } 361 } 366 362 367 static int ext4_valid_extent(struct inode *ino 363 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 368 { 364 { 369 ext4_fsblk_t block = ext4_ext_pblock(e 365 ext4_fsblk_t block = ext4_ext_pblock(ext); 370 int len = ext4_ext_get_actual_len(ext) 366 int len = ext4_ext_get_actual_len(ext); 371 ext4_lblk_t lblock = le32_to_cpu(ext-> 367 ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); 372 368 373 /* 369 /* 374 * We allow neither: 370 * We allow neither: 375 * - zero length 371 * - zero length 376 * - overflow/wrap-around 372 * - overflow/wrap-around 377 */ 373 */ 378 if (lblock + len <= lblock) 374 if (lblock + len <= lblock) 379 return 0; 375 return 0; 380 return ext4_inode_block_valid(inode, b 376 return ext4_inode_block_valid(inode, block, len); 381 } 377 } 382 378 383 static int ext4_valid_extent_idx(struct inode 379 static int ext4_valid_extent_idx(struct inode *inode, 384 struct ext4_ex 380 struct ext4_extent_idx *ext_idx) 385 { 381 { 386 ext4_fsblk_t block = ext4_idx_pblock(e 382 ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 387 383 388 return ext4_inode_block_valid(inode, b 384 return ext4_inode_block_valid(inode, block, 1); 389 } 385 } 390 386 391 static int ext4_valid_extent_entries(struct in 387 static int ext4_valid_extent_entries(struct inode *inode, 392 struct ex 388 struct ext4_extent_header *eh, 393 ext4_lblk 389 ext4_lblk_t lblk, ext4_fsblk_t *pblk, 394 int depth 390 int depth) 395 { 391 { 396 unsigned short entries; 392 unsigned short entries; 397 ext4_lblk_t lblock = 0; 393 ext4_lblk_t lblock = 0; 398 ext4_lblk_t cur = 0; 394 ext4_lblk_t cur = 0; 399 395 400 if (eh->eh_entries == 0) 396 if (eh->eh_entries == 0) 401 return 1; 397 return 1; 402 398 403 entries = le16_to_cpu(eh->eh_entries); 399 entries = le16_to_cpu(eh->eh_entries); 404 400 405 if (depth == 0) { 401 if (depth == 0) { 406 /* leaf entries */ 402 /* leaf entries */ 407 struct ext4_extent *ext = EXT_ 403 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); 408 404 409 /* 405 /* 410 * The logical block in the fi 406 * The logical block in the first entry should equal to 411 * the number in the index blo 407 * the number in the index block. 412 */ 408 */ 413 if (depth != ext_depth(inode) 409 if (depth != ext_depth(inode) && 414 lblk != le32_to_cpu(ext->e 410 lblk != le32_to_cpu(ext->ee_block)) 415 return 0; 411 return 0; 416 while (entries) { 412 while (entries) { 417 if (!ext4_valid_extent 413 if (!ext4_valid_extent(inode, ext)) 418 return 0; 414 return 0; 419 415 420 /* Check for overlappi 416 /* Check for overlapping extents */ 421 lblock = le32_to_cpu(e 417 lblock = le32_to_cpu(ext->ee_block); 422 if (lblock < cur) { 418 if (lblock < cur) { 423 *pblk = ext4_e 419 *pblk = ext4_ext_pblock(ext); 424 return 0; 420 return 0; 425 } 421 } 426 cur = lblock + ext4_ex 422 cur = lblock + ext4_ext_get_actual_len(ext); 427 ext++; 423 ext++; 428 entries--; 424 entries--; 429 } 425 } 430 } else { 426 } else { 431 struct ext4_extent_idx *ext_id 427 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); 432 428 433 /* 429 /* 434 * The logical block in the fi 430 * The logical block in the first entry should equal to 435 * the number in the parent in 431 * the number in the parent index block. 436 */ 432 */ 437 if (depth != ext_depth(inode) 433 if (depth != ext_depth(inode) && 438 lblk != le32_to_cpu(ext_id 434 lblk != le32_to_cpu(ext_idx->ei_block)) 439 return 0; 435 return 0; 440 while (entries) { 436 while (entries) { 441 if (!ext4_valid_extent 437 if (!ext4_valid_extent_idx(inode, ext_idx)) 442 return 0; 438 return 0; 443 439 444 /* Check for overlappi 440 /* Check for overlapping index extents */ 445 lblock = le32_to_cpu(e 441 lblock = le32_to_cpu(ext_idx->ei_block); 446 if (lblock < cur) { 442 if (lblock < cur) { 447 *pblk = ext4_i 443 *pblk = ext4_idx_pblock(ext_idx); 448 return 0; 444 return 0; 449 } 445 } 450 ext_idx++; 446 ext_idx++; 451 entries--; 447 entries--; 452 cur = lblock + 1; 448 cur = lblock + 1; 453 } 449 } 454 } 450 } 455 return 1; 451 return 1; 456 } 452 } 457 453 458 static int __ext4_ext_check(const char *functi 454 static int __ext4_ext_check(const char *function, unsigned int line, 459 struct inode *inod 455 struct inode *inode, struct ext4_extent_header *eh, 460 int depth, ext4_fs 456 int depth, ext4_fsblk_t pblk, ext4_lblk_t lblk) 461 { 457 { 462 const char *error_msg; 458 const char *error_msg; 463 int max = 0, err = -EFSCORRUPTED; 459 int max = 0, err = -EFSCORRUPTED; 464 460 465 if (unlikely(eh->eh_magic != EXT4_EXT_ 461 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 466 error_msg = "invalid magic"; 462 error_msg = "invalid magic"; 467 goto corrupted; 463 goto corrupted; 468 } 464 } 469 if (unlikely(le16_to_cpu(eh->eh_depth) 465 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 470 error_msg = "unexpected eh_dep 466 error_msg = "unexpected eh_depth"; 471 goto corrupted; 467 goto corrupted; 472 } 468 } 473 if (unlikely(eh->eh_max == 0)) { 469 if (unlikely(eh->eh_max == 0)) { 474 error_msg = "invalid eh_max"; 470 error_msg = "invalid eh_max"; 475 goto corrupted; 471 goto corrupted; 476 } 472 } 477 max = ext4_ext_max_entries(inode, dept 473 max = ext4_ext_max_entries(inode, depth); 478 if (unlikely(le16_to_cpu(eh->eh_max) > 474 if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 479 error_msg = "too large eh_max" 475 error_msg = "too large eh_max"; 480 goto corrupted; 476 goto corrupted; 481 } 477 } 482 if (unlikely(le16_to_cpu(eh->eh_entrie 478 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 483 error_msg = "invalid eh_entrie 479 error_msg = "invalid eh_entries"; 484 goto corrupted; 480 goto corrupted; 485 } 481 } 486 if (unlikely((eh->eh_entries == 0) && 482 if (unlikely((eh->eh_entries == 0) && (depth > 0))) { 487 error_msg = "eh_entries is 0 b 483 error_msg = "eh_entries is 0 but eh_depth is > 0"; 488 goto corrupted; 484 goto corrupted; 489 } 485 } 490 if (!ext4_valid_extent_entries(inode, 486 if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) { 491 error_msg = "invalid extent en 487 error_msg = "invalid extent entries"; 492 goto corrupted; 488 goto corrupted; 493 } 489 } 494 if (unlikely(depth > 32)) { 490 if (unlikely(depth > 32)) { 495 error_msg = "too large eh_dept 491 error_msg = "too large eh_depth"; 496 goto corrupted; 492 goto corrupted; 497 } 493 } 498 /* Verify checksum on non-root extent 494 /* Verify checksum on non-root extent tree nodes */ 499 if (ext_depth(inode) != depth && 495 if (ext_depth(inode) != depth && 500 !ext4_extent_block_csum_verify(ino 496 !ext4_extent_block_csum_verify(inode, eh)) { 501 error_msg = "extent tree corru 497 error_msg = "extent tree corrupted"; 502 err = -EFSBADCRC; 498 err = -EFSBADCRC; 503 goto corrupted; 499 goto corrupted; 504 } 500 } 505 return 0; 501 return 0; 506 502 507 corrupted: 503 corrupted: 508 ext4_error_inode_err(inode, function, 504 ext4_error_inode_err(inode, function, line, 0, -err, 509 "pblk %llu bad he 505 "pblk %llu bad header/extent: %s - magic %x, " 510 "entries %u, max 506 "entries %u, max %u(%u), depth %u(%u)", 511 (unsigned long lo 507 (unsigned long long) pblk, error_msg, 512 le16_to_cpu(eh->e 508 le16_to_cpu(eh->eh_magic), 513 le16_to_cpu(eh->e 509 le16_to_cpu(eh->eh_entries), 514 le16_to_cpu(eh->e 510 le16_to_cpu(eh->eh_max), 515 max, le16_to_cpu( 511 max, le16_to_cpu(eh->eh_depth), depth); 516 return err; 512 return err; 517 } 513 } 518 514 519 #define ext4_ext_check(inode, eh, depth, pblk) 515 #define ext4_ext_check(inode, eh, depth, pblk) \ 520 __ext4_ext_check(__func__, __LINE__, ( 516 __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk), 0) 521 517 522 int ext4_ext_check_inode(struct inode *inode) 518 int ext4_ext_check_inode(struct inode *inode) 523 { 519 { 524 return ext4_ext_check(inode, ext_inode 520 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); 525 } 521 } 526 522 527 static void ext4_cache_extents(struct inode *i 523 static void ext4_cache_extents(struct inode *inode, 528 struct ext4_ext 524 struct ext4_extent_header *eh) 529 { 525 { 530 struct ext4_extent *ex = EXT_FIRST_EXT 526 struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); 531 ext4_lblk_t prev = 0; 527 ext4_lblk_t prev = 0; 532 int i; 528 int i; 533 529 534 for (i = le16_to_cpu(eh->eh_entries); 530 for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { 535 unsigned int status = EXTENT_S 531 unsigned int status = EXTENT_STATUS_WRITTEN; 536 ext4_lblk_t lblk = le32_to_cpu 532 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); 537 int len = ext4_ext_get_actual_ 533 int len = ext4_ext_get_actual_len(ex); 538 534 539 if (prev && (prev != lblk)) 535 if (prev && (prev != lblk)) 540 ext4_es_cache_extent(i 536 ext4_es_cache_extent(inode, prev, lblk - prev, ~0, 541 E 537 EXTENT_STATUS_HOLE); 542 538 543 if (ext4_ext_is_unwritten(ex)) 539 if (ext4_ext_is_unwritten(ex)) 544 status = EXTENT_STATUS 540 status = EXTENT_STATUS_UNWRITTEN; 545 ext4_es_cache_extent(inode, lb 541 ext4_es_cache_extent(inode, lblk, len, 546 ext4_ext_ 542 ext4_ext_pblock(ex), status); 547 prev = lblk + len; 543 prev = lblk + len; 548 } 544 } 549 } 545 } 550 546 551 static struct buffer_head * 547 static struct buffer_head * 552 __read_extent_tree_block(const char *function, 548 __read_extent_tree_block(const char *function, unsigned int line, 553 struct inode *inode, 549 struct inode *inode, struct ext4_extent_idx *idx, 554 int depth, int flags) 550 int depth, int flags) 555 { 551 { 556 struct buffer_head *bh; 552 struct buffer_head *bh; 557 int err; 553 int err; 558 gfp_t gfp_fl 554 gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS; 559 ext4_fsblk_t pblk; 555 ext4_fsblk_t pblk; 560 556 561 if (flags & EXT4_EX_NOFAIL) 557 if (flags & EXT4_EX_NOFAIL) 562 gfp_flags |= __GFP_NOFAIL; 558 gfp_flags |= __GFP_NOFAIL; 563 559 564 pblk = ext4_idx_pblock(idx); 560 pblk = ext4_idx_pblock(idx); 565 bh = sb_getblk_gfp(inode->i_sb, pblk, 561 bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags); 566 if (unlikely(!bh)) 562 if (unlikely(!bh)) 567 return ERR_PTR(-ENOMEM); 563 return ERR_PTR(-ENOMEM); 568 564 569 if (!bh_uptodate_or_lock(bh)) { 565 if (!bh_uptodate_or_lock(bh)) { 570 trace_ext4_ext_load_extent(ino 566 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); 571 err = ext4_read_bh(bh, 0, NULL 567 err = ext4_read_bh(bh, 0, NULL); 572 if (err < 0) 568 if (err < 0) 573 goto errout; 569 goto errout; 574 } 570 } 575 if (buffer_verified(bh) && !(flags & E 571 if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) 576 return bh; 572 return bh; 577 err = __ext4_ext_check(function, line, 573 err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh), 578 depth, pblk, le 574 depth, pblk, le32_to_cpu(idx->ei_block)); 579 if (err) 575 if (err) 580 goto errout; 576 goto errout; 581 set_buffer_verified(bh); 577 set_buffer_verified(bh); 582 /* 578 /* 583 * If this is a leaf block, cache all 579 * If this is a leaf block, cache all of its entries 584 */ 580 */ 585 if (!(flags & EXT4_EX_NOCACHE) && dept 581 if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { 586 struct ext4_extent_header *eh 582 struct ext4_extent_header *eh = ext_block_hdr(bh); 587 ext4_cache_extents(inode, eh); 583 ext4_cache_extents(inode, eh); 588 } 584 } 589 return bh; 585 return bh; 590 errout: 586 errout: 591 put_bh(bh); 587 put_bh(bh); 592 return ERR_PTR(err); 588 return ERR_PTR(err); 593 589 594 } 590 } 595 591 596 #define read_extent_tree_block(inode, idx, dep 592 #define read_extent_tree_block(inode, idx, depth, flags) \ 597 __read_extent_tree_block(__func__, __L 593 __read_extent_tree_block(__func__, __LINE__, (inode), (idx), \ 598 (depth), (fla 594 (depth), (flags)) 599 595 600 /* 596 /* 601 * This function is called to cache a file's e 597 * This function is called to cache a file's extent information in the 602 * extent status tree 598 * extent status tree 603 */ 599 */ 604 int ext4_ext_precache(struct inode *inode) 600 int ext4_ext_precache(struct inode *inode) 605 { 601 { 606 struct ext4_inode_info *ei = EXT4_I(in 602 struct ext4_inode_info *ei = EXT4_I(inode); 607 struct ext4_ext_path *path = NULL; 603 struct ext4_ext_path *path = NULL; 608 struct buffer_head *bh; 604 struct buffer_head *bh; 609 int i = 0, depth, ret = 0; 605 int i = 0, depth, ret = 0; 610 606 611 if (!ext4_test_inode_flag(inode, EXT4_ 607 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 612 return 0; /* not an exte 608 return 0; /* not an extent-mapped inode */ 613 609 614 down_read(&ei->i_data_sem); 610 down_read(&ei->i_data_sem); 615 depth = ext_depth(inode); 611 depth = ext_depth(inode); 616 612 617 /* Don't cache anything if there are n 613 /* Don't cache anything if there are no external extent blocks */ 618 if (!depth) { 614 if (!depth) { 619 up_read(&ei->i_data_sem); 615 up_read(&ei->i_data_sem); 620 return ret; 616 return ret; 621 } 617 } 622 618 623 path = kcalloc(depth + 1, sizeof(struc 619 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), 624 GFP_NOFS); 620 GFP_NOFS); 625 if (path == NULL) { 621 if (path == NULL) { 626 up_read(&ei->i_data_sem); 622 up_read(&ei->i_data_sem); 627 return -ENOMEM; 623 return -ENOMEM; 628 } 624 } 629 625 630 path[0].p_hdr = ext_inode_hdr(inode); 626 path[0].p_hdr = ext_inode_hdr(inode); 631 ret = ext4_ext_check(inode, path[0].p_ 627 ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); 632 if (ret) 628 if (ret) 633 goto out; 629 goto out; 634 path[0].p_idx = EXT_FIRST_INDEX(path[0 630 path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); 635 while (i >= 0) { 631 while (i >= 0) { 636 /* 632 /* 637 * If this is a leaf block or 633 * If this is a leaf block or we've reached the end of 638 * the index block, go up 634 * the index block, go up 639 */ 635 */ 640 if ((i == depth) || 636 if ((i == depth) || 641 path[i].p_idx > EXT_LAST_I 637 path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { 642 ext4_ext_path_brelse(p !! 638 brelse(path[i].p_bh); >> 639 path[i].p_bh = NULL; 643 i--; 640 i--; 644 continue; 641 continue; 645 } 642 } 646 bh = read_extent_tree_block(in 643 bh = read_extent_tree_block(inode, path[i].p_idx++, 647 de 644 depth - i - 1, 648 EX 645 EXT4_EX_FORCE_CACHE); 649 if (IS_ERR(bh)) { 646 if (IS_ERR(bh)) { 650 ret = PTR_ERR(bh); 647 ret = PTR_ERR(bh); 651 break; 648 break; 652 } 649 } 653 i++; 650 i++; 654 path[i].p_bh = bh; 651 path[i].p_bh = bh; 655 path[i].p_hdr = ext_block_hdr( 652 path[i].p_hdr = ext_block_hdr(bh); 656 path[i].p_idx = EXT_FIRST_INDE 653 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); 657 } 654 } 658 ext4_set_inode_state(inode, EXT4_STATE 655 ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); 659 out: 656 out: 660 up_read(&ei->i_data_sem); 657 up_read(&ei->i_data_sem); 661 ext4_free_ext_path(path); 658 ext4_free_ext_path(path); 662 return ret; 659 return ret; 663 } 660 } 664 661 665 #ifdef EXT_DEBUG 662 #ifdef EXT_DEBUG 666 static void ext4_ext_show_path(struct inode *i 663 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 667 { 664 { 668 int k, l = path->p_depth; 665 int k, l = path->p_depth; 669 666 670 ext_debug(inode, "path:"); 667 ext_debug(inode, "path:"); 671 for (k = 0; k <= l; k++, path++) { 668 for (k = 0; k <= l; k++, path++) { 672 if (path->p_idx) { 669 if (path->p_idx) { 673 ext_debug(inode, " %d 670 ext_debug(inode, " %d->%llu", 674 le32_to_cpu( 671 le32_to_cpu(path->p_idx->ei_block), 675 ext4_idx_pbl 672 ext4_idx_pblock(path->p_idx)); 676 } else if (path->p_ext) { 673 } else if (path->p_ext) { 677 ext_debug(inode, " %d 674 ext_debug(inode, " %d:[%d]%d:%llu ", 678 le32_to_cpu( 675 le32_to_cpu(path->p_ext->ee_block), 679 ext4_ext_is_ 676 ext4_ext_is_unwritten(path->p_ext), 680 ext4_ext_get 677 ext4_ext_get_actual_len(path->p_ext), 681 ext4_ext_pbl 678 ext4_ext_pblock(path->p_ext)); 682 } else 679 } else 683 ext_debug(inode, " [] 680 ext_debug(inode, " []"); 684 } 681 } 685 ext_debug(inode, "\n"); 682 ext_debug(inode, "\n"); 686 } 683 } 687 684 688 static void ext4_ext_show_leaf(struct inode *i 685 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 689 { 686 { 690 int depth = ext_depth(inode); 687 int depth = ext_depth(inode); 691 struct ext4_extent_header *eh; 688 struct ext4_extent_header *eh; 692 struct ext4_extent *ex; 689 struct ext4_extent *ex; 693 int i; 690 int i; 694 691 695 if (IS_ERR_OR_NULL(path)) !! 692 if (!path) 696 return; 693 return; 697 694 698 eh = path[depth].p_hdr; 695 eh = path[depth].p_hdr; 699 ex = EXT_FIRST_EXTENT(eh); 696 ex = EXT_FIRST_EXTENT(eh); 700 697 701 ext_debug(inode, "Displaying leaf exte 698 ext_debug(inode, "Displaying leaf extents\n"); 702 699 703 for (i = 0; i < le16_to_cpu(eh->eh_ent 700 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 704 ext_debug(inode, "%d:[%d]%d:%l 701 ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 705 ext4_ext_is_unwritte 702 ext4_ext_is_unwritten(ex), 706 ext4_ext_get_actual_ 703 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 707 } 704 } 708 ext_debug(inode, "\n"); 705 ext_debug(inode, "\n"); 709 } 706 } 710 707 711 static void ext4_ext_show_move(struct inode *i 708 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 712 ext4_fsblk_t newblock, 709 ext4_fsblk_t newblock, int level) 713 { 710 { 714 int depth = ext_depth(inode); 711 int depth = ext_depth(inode); 715 struct ext4_extent *ex; 712 struct ext4_extent *ex; 716 713 717 if (depth != level) { 714 if (depth != level) { 718 struct ext4_extent_idx *idx; 715 struct ext4_extent_idx *idx; 719 idx = path[level].p_idx; 716 idx = path[level].p_idx; 720 while (idx <= EXT_MAX_INDEX(pa 717 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 721 ext_debug(inode, "%d: 718 ext_debug(inode, "%d: move %d:%llu in new index %llu\n", 722 level, le32_ 719 level, le32_to_cpu(idx->ei_block), 723 ext4_idx_pbl 720 ext4_idx_pblock(idx), newblock); 724 idx++; 721 idx++; 725 } 722 } 726 723 727 return; 724 return; 728 } 725 } 729 726 730 ex = path[depth].p_ext; 727 ex = path[depth].p_ext; 731 while (ex <= EXT_MAX_EXTENT(path[depth 728 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 732 ext_debug(inode, "move %d:%llu 729 ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n", 733 le32_to_cpu(ex 730 le32_to_cpu(ex->ee_block), 734 ext4_ext_pbloc 731 ext4_ext_pblock(ex), 735 ext4_ext_is_un 732 ext4_ext_is_unwritten(ex), 736 ext4_ext_get_a 733 ext4_ext_get_actual_len(ex), 737 newblock); 734 newblock); 738 ex++; 735 ex++; 739 } 736 } 740 } 737 } 741 738 742 #else 739 #else 743 #define ext4_ext_show_path(inode, path) 740 #define ext4_ext_show_path(inode, path) 744 #define ext4_ext_show_leaf(inode, path) 741 #define ext4_ext_show_leaf(inode, path) 745 #define ext4_ext_show_move(inode, path, newblo 742 #define ext4_ext_show_move(inode, path, newblock, level) 746 #endif 743 #endif 747 744 748 /* 745 /* 749 * ext4_ext_binsearch_idx: 746 * ext4_ext_binsearch_idx: 750 * binary search for the closest index of the 747 * binary search for the closest index of the given block 751 * the header must be checked before calling t 748 * the header must be checked before calling this 752 */ 749 */ 753 static void 750 static void 754 ext4_ext_binsearch_idx(struct inode *inode, 751 ext4_ext_binsearch_idx(struct inode *inode, 755 struct ext4_ext_path * 752 struct ext4_ext_path *path, ext4_lblk_t block) 756 { 753 { 757 struct ext4_extent_header *eh = path-> 754 struct ext4_extent_header *eh = path->p_hdr; 758 struct ext4_extent_idx *r, *l, *m; 755 struct ext4_extent_idx *r, *l, *m; 759 756 760 757 761 ext_debug(inode, "binsearch for %u(idx 758 ext_debug(inode, "binsearch for %u(idx): ", block); 762 759 763 l = EXT_FIRST_INDEX(eh) + 1; 760 l = EXT_FIRST_INDEX(eh) + 1; 764 r = EXT_LAST_INDEX(eh); 761 r = EXT_LAST_INDEX(eh); 765 while (l <= r) { 762 while (l <= r) { 766 m = l + (r - l) / 2; 763 m = l + (r - l) / 2; 767 ext_debug(inode, "%p(%u):%p(%u 764 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, 768 le32_to_cpu(l->ei_bl 765 le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block), 769 r, le32_to_cpu(r->ei 766 r, le32_to_cpu(r->ei_block)); 770 767 771 if (block < le32_to_cpu(m->ei_ 768 if (block < le32_to_cpu(m->ei_block)) 772 r = m - 1; 769 r = m - 1; 773 else 770 else 774 l = m + 1; 771 l = m + 1; 775 } 772 } 776 773 777 path->p_idx = l - 1; 774 path->p_idx = l - 1; 778 ext_debug(inode, " -> %u->%lld ", le3 775 ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), 779 ext4_idx_pblock(path->p_idx) 776 ext4_idx_pblock(path->p_idx)); 780 777 781 #ifdef CHECK_BINSEARCH 778 #ifdef CHECK_BINSEARCH 782 { 779 { 783 struct ext4_extent_idx *chix, 780 struct ext4_extent_idx *chix, *ix; 784 int k; 781 int k; 785 782 786 chix = ix = EXT_FIRST_INDEX(eh 783 chix = ix = EXT_FIRST_INDEX(eh); 787 for (k = 0; k < le16_to_cpu(eh 784 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 788 if (k != 0 && le32_to_ 785 if (k != 0 && le32_to_cpu(ix->ei_block) <= 789 le32_to_cpu(ix[-1] 786 le32_to_cpu(ix[-1].ei_block)) { 790 printk(KERN_DE 787 printk(KERN_DEBUG "k=%d, ix=0x%p, " 791 "first= 788 "first=0x%p\n", k, 792 ix, EXT 789 ix, EXT_FIRST_INDEX(eh)); 793 printk(KERN_DE 790 printk(KERN_DEBUG "%u <= %u\n", 794 le32_to 791 le32_to_cpu(ix->ei_block), 795 le32_to 792 le32_to_cpu(ix[-1].ei_block)); 796 } 793 } 797 BUG_ON(k && le32_to_cp 794 BUG_ON(k && le32_to_cpu(ix->ei_block) 798 <= 795 <= le32_to_cpu(ix[-1].ei_block)); 799 if (block < le32_to_cp 796 if (block < le32_to_cpu(ix->ei_block)) 800 break; 797 break; 801 chix = ix; 798 chix = ix; 802 } 799 } 803 BUG_ON(chix != path->p_idx); 800 BUG_ON(chix != path->p_idx); 804 } 801 } 805 #endif 802 #endif 806 803 807 } 804 } 808 805 809 /* 806 /* 810 * ext4_ext_binsearch: 807 * ext4_ext_binsearch: 811 * binary search for closest extent of the giv 808 * binary search for closest extent of the given block 812 * the header must be checked before calling t 809 * the header must be checked before calling this 813 */ 810 */ 814 static void 811 static void 815 ext4_ext_binsearch(struct inode *inode, 812 ext4_ext_binsearch(struct inode *inode, 816 struct ext4_ext_path *path, ex 813 struct ext4_ext_path *path, ext4_lblk_t block) 817 { 814 { 818 struct ext4_extent_header *eh = path-> 815 struct ext4_extent_header *eh = path->p_hdr; 819 struct ext4_extent *r, *l, *m; 816 struct ext4_extent *r, *l, *m; 820 817 821 if (eh->eh_entries == 0) { 818 if (eh->eh_entries == 0) { 822 /* 819 /* 823 * this leaf is empty: 820 * this leaf is empty: 824 * we get such a leaf in split 821 * we get such a leaf in split/add case 825 */ 822 */ 826 return; 823 return; 827 } 824 } 828 825 829 ext_debug(inode, "binsearch for %u: " 826 ext_debug(inode, "binsearch for %u: ", block); 830 827 831 l = EXT_FIRST_EXTENT(eh) + 1; 828 l = EXT_FIRST_EXTENT(eh) + 1; 832 r = EXT_LAST_EXTENT(eh); 829 r = EXT_LAST_EXTENT(eh); 833 830 834 while (l <= r) { 831 while (l <= r) { 835 m = l + (r - l) / 2; 832 m = l + (r - l) / 2; 836 ext_debug(inode, "%p(%u):%p(%u 833 ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, 837 le32_to_cpu(l->ee_bl 834 le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block), 838 r, le32_to_cpu(r->ee 835 r, le32_to_cpu(r->ee_block)); 839 836 840 if (block < le32_to_cpu(m->ee_ 837 if (block < le32_to_cpu(m->ee_block)) 841 r = m - 1; 838 r = m - 1; 842 else 839 else 843 l = m + 1; 840 l = m + 1; 844 } 841 } 845 842 846 path->p_ext = l - 1; 843 path->p_ext = l - 1; 847 ext_debug(inode, " -> %d:%llu:[%d]%d 844 ext_debug(inode, " -> %d:%llu:[%d]%d ", 848 le32_to_cpu(path->p_ex 845 le32_to_cpu(path->p_ext->ee_block), 849 ext4_ext_pblock(path-> 846 ext4_ext_pblock(path->p_ext), 850 ext4_ext_is_unwritten( 847 ext4_ext_is_unwritten(path->p_ext), 851 ext4_ext_get_actual_le 848 ext4_ext_get_actual_len(path->p_ext)); 852 849 853 #ifdef CHECK_BINSEARCH 850 #ifdef CHECK_BINSEARCH 854 { 851 { 855 struct ext4_extent *chex, *ex; 852 struct ext4_extent *chex, *ex; 856 int k; 853 int k; 857 854 858 chex = ex = EXT_FIRST_EXTENT(e 855 chex = ex = EXT_FIRST_EXTENT(eh); 859 for (k = 0; k < le16_to_cpu(eh 856 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 860 BUG_ON(k && le32_to_cp 857 BUG_ON(k && le32_to_cpu(ex->ee_block) 861 <= l 858 <= le32_to_cpu(ex[-1].ee_block)); 862 if (block < le32_to_cp 859 if (block < le32_to_cpu(ex->ee_block)) 863 break; 860 break; 864 chex = ex; 861 chex = ex; 865 } 862 } 866 BUG_ON(chex != path->p_ext); 863 BUG_ON(chex != path->p_ext); 867 } 864 } 868 #endif 865 #endif 869 866 870 } 867 } 871 868 872 void ext4_ext_tree_init(handle_t *handle, stru 869 void ext4_ext_tree_init(handle_t *handle, struct inode *inode) 873 { 870 { 874 struct ext4_extent_header *eh; 871 struct ext4_extent_header *eh; 875 872 876 eh = ext_inode_hdr(inode); 873 eh = ext_inode_hdr(inode); 877 eh->eh_depth = 0; 874 eh->eh_depth = 0; 878 eh->eh_entries = 0; 875 eh->eh_entries = 0; 879 eh->eh_magic = EXT4_EXT_MAGIC; 876 eh->eh_magic = EXT4_EXT_MAGIC; 880 eh->eh_max = cpu_to_le16(ext4_ext_spac 877 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 881 eh->eh_generation = 0; 878 eh->eh_generation = 0; 882 ext4_mark_inode_dirty(handle, inode); 879 ext4_mark_inode_dirty(handle, inode); 883 } 880 } 884 881 885 struct ext4_ext_path * 882 struct ext4_ext_path * 886 ext4_find_extent(struct inode *inode, ext4_lbl 883 ext4_find_extent(struct inode *inode, ext4_lblk_t block, 887 struct ext4_ext_path *path, i !! 884 struct ext4_ext_path **orig_path, int flags) 888 { 885 { 889 struct ext4_extent_header *eh; 886 struct ext4_extent_header *eh; 890 struct buffer_head *bh; 887 struct buffer_head *bh; >> 888 struct ext4_ext_path *path = orig_path ? *orig_path : NULL; 891 short int depth, i, ppos = 0; 889 short int depth, i, ppos = 0; 892 int ret; 890 int ret; 893 gfp_t gfp_flags = GFP_NOFS; 891 gfp_t gfp_flags = GFP_NOFS; 894 892 895 if (flags & EXT4_EX_NOFAIL) 893 if (flags & EXT4_EX_NOFAIL) 896 gfp_flags |= __GFP_NOFAIL; 894 gfp_flags |= __GFP_NOFAIL; 897 895 898 eh = ext_inode_hdr(inode); 896 eh = ext_inode_hdr(inode); 899 depth = ext_depth(inode); 897 depth = ext_depth(inode); 900 if (depth < 0 || depth > EXT4_MAX_EXTE 898 if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { 901 EXT4_ERROR_INODE(inode, "inode 899 EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", 902 depth); 900 depth); 903 ret = -EFSCORRUPTED; 901 ret = -EFSCORRUPTED; 904 goto err; 902 goto err; 905 } 903 } 906 904 907 if (path) { 905 if (path) { 908 ext4_ext_drop_refs(path); 906 ext4_ext_drop_refs(path); 909 if (depth > path[0].p_maxdepth 907 if (depth > path[0].p_maxdepth) { 910 kfree(path); 908 kfree(path); 911 path = NULL; !! 909 *orig_path = path = NULL; 912 } 910 } 913 } 911 } 914 if (!path) { 912 if (!path) { 915 /* account possible depth incr 913 /* account possible depth increase */ 916 path = kcalloc(depth + 2, size 914 path = kcalloc(depth + 2, sizeof(struct ext4_ext_path), 917 gfp_flags); 915 gfp_flags); 918 if (unlikely(!path)) 916 if (unlikely(!path)) 919 return ERR_PTR(-ENOMEM 917 return ERR_PTR(-ENOMEM); 920 path[0].p_maxdepth = depth + 1 918 path[0].p_maxdepth = depth + 1; 921 } 919 } 922 path[0].p_hdr = eh; 920 path[0].p_hdr = eh; 923 path[0].p_bh = NULL; 921 path[0].p_bh = NULL; 924 922 925 i = depth; 923 i = depth; 926 if (!(flags & EXT4_EX_NOCACHE) && dept 924 if (!(flags & EXT4_EX_NOCACHE) && depth == 0) 927 ext4_cache_extents(inode, eh); 925 ext4_cache_extents(inode, eh); 928 /* walk through the tree */ 926 /* walk through the tree */ 929 while (i) { 927 while (i) { 930 ext_debug(inode, "depth %d: nu 928 ext_debug(inode, "depth %d: num %d, max %d\n", 931 ppos, le16_to_cpu(eh 929 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 932 930 933 ext4_ext_binsearch_idx(inode, 931 ext4_ext_binsearch_idx(inode, path + ppos, block); 934 path[ppos].p_block = ext4_idx_ 932 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 935 path[ppos].p_depth = i; 933 path[ppos].p_depth = i; 936 path[ppos].p_ext = NULL; 934 path[ppos].p_ext = NULL; 937 935 938 bh = read_extent_tree_block(in 936 bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags); 939 if (IS_ERR(bh)) { 937 if (IS_ERR(bh)) { 940 ret = PTR_ERR(bh); 938 ret = PTR_ERR(bh); 941 goto err; 939 goto err; 942 } 940 } 943 941 944 eh = ext_block_hdr(bh); 942 eh = ext_block_hdr(bh); 945 ppos++; 943 ppos++; 946 path[ppos].p_bh = bh; 944 path[ppos].p_bh = bh; 947 path[ppos].p_hdr = eh; 945 path[ppos].p_hdr = eh; 948 } 946 } 949 947 950 path[ppos].p_depth = i; 948 path[ppos].p_depth = i; 951 path[ppos].p_ext = NULL; 949 path[ppos].p_ext = NULL; 952 path[ppos].p_idx = NULL; 950 path[ppos].p_idx = NULL; 953 951 954 /* find extent */ 952 /* find extent */ 955 ext4_ext_binsearch(inode, path + ppos, 953 ext4_ext_binsearch(inode, path + ppos, block); 956 /* if not an empty leaf */ 954 /* if not an empty leaf */ 957 if (path[ppos].p_ext) 955 if (path[ppos].p_ext) 958 path[ppos].p_block = ext4_ext_ 956 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 959 957 960 ext4_ext_show_path(inode, path); 958 ext4_ext_show_path(inode, path); 961 959 962 return path; 960 return path; 963 961 964 err: 962 err: 965 ext4_free_ext_path(path); 963 ext4_free_ext_path(path); >> 964 if (orig_path) >> 965 *orig_path = NULL; 966 return ERR_PTR(ret); 966 return ERR_PTR(ret); 967 } 967 } 968 968 969 /* 969 /* 970 * ext4_ext_insert_index: 970 * ext4_ext_insert_index: 971 * insert new index [@logical;@ptr] into the b 971 * insert new index [@logical;@ptr] into the block at @curp; 972 * check where to insert: before @curp or afte 972 * check where to insert: before @curp or after @curp 973 */ 973 */ 974 static int ext4_ext_insert_index(handle_t *han 974 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 975 struct ext4_e 975 struct ext4_ext_path *curp, 976 int logical, 976 int logical, ext4_fsblk_t ptr) 977 { 977 { 978 struct ext4_extent_idx *ix; 978 struct ext4_extent_idx *ix; 979 int len, err; 979 int len, err; 980 980 981 err = ext4_ext_get_access(handle, inod 981 err = ext4_ext_get_access(handle, inode, curp); 982 if (err) 982 if (err) 983 return err; 983 return err; 984 984 985 if (unlikely(logical == le32_to_cpu(cu 985 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 986 EXT4_ERROR_INODE(inode, 986 EXT4_ERROR_INODE(inode, 987 "logical %d = 987 "logical %d == ei_block %d!", 988 logical, le32 988 logical, le32_to_cpu(curp->p_idx->ei_block)); 989 return -EFSCORRUPTED; 989 return -EFSCORRUPTED; 990 } 990 } 991 991 992 if (unlikely(le16_to_cpu(curp->p_hdr-> 992 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 993 >= le16_to_cpu(cu 993 >= le16_to_cpu(curp->p_hdr->eh_max))) { 994 EXT4_ERROR_INODE(inode, 994 EXT4_ERROR_INODE(inode, 995 "eh_entries % 995 "eh_entries %d >= eh_max %d!", 996 le16_to_cpu(c 996 le16_to_cpu(curp->p_hdr->eh_entries), 997 le16_to_cpu(c 997 le16_to_cpu(curp->p_hdr->eh_max)); 998 return -EFSCORRUPTED; 998 return -EFSCORRUPTED; 999 } 999 } 1000 1000 1001 if (logical > le32_to_cpu(curp->p_idx 1001 if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 1002 /* insert after */ 1002 /* insert after */ 1003 ext_debug(inode, "insert new 1003 ext_debug(inode, "insert new index %d after: %llu\n", 1004 logical, ptr); 1004 logical, ptr); 1005 ix = curp->p_idx + 1; 1005 ix = curp->p_idx + 1; 1006 } else { 1006 } else { 1007 /* insert before */ 1007 /* insert before */ 1008 ext_debug(inode, "insert new 1008 ext_debug(inode, "insert new index %d before: %llu\n", 1009 logical, ptr); 1009 logical, ptr); 1010 ix = curp->p_idx; 1010 ix = curp->p_idx; 1011 } 1011 } 1012 1012 1013 if (unlikely(ix > EXT_MAX_INDEX(curp- 1013 if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { 1014 EXT4_ERROR_INODE(inode, "ix > 1014 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); 1015 return -EFSCORRUPTED; 1015 return -EFSCORRUPTED; 1016 } 1016 } 1017 1017 1018 len = EXT_LAST_INDEX(curp->p_hdr) - i 1018 len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; 1019 BUG_ON(len < 0); 1019 BUG_ON(len < 0); 1020 if (len > 0) { 1020 if (len > 0) { 1021 ext_debug(inode, "insert new 1021 ext_debug(inode, "insert new index %d: " 1022 "move %d indi 1022 "move %d indices from 0x%p to 0x%p\n", 1023 logical, len, 1023 logical, len, ix, ix + 1); 1024 memmove(ix + 1, ix, len * siz 1024 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); 1025 } 1025 } 1026 1026 1027 ix->ei_block = cpu_to_le32(logical); 1027 ix->ei_block = cpu_to_le32(logical); 1028 ext4_idx_store_pblock(ix, ptr); 1028 ext4_idx_store_pblock(ix, ptr); 1029 le16_add_cpu(&curp->p_hdr->eh_entries 1029 le16_add_cpu(&curp->p_hdr->eh_entries, 1); 1030 1030 1031 if (unlikely(ix > EXT_LAST_INDEX(curp 1031 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 1032 EXT4_ERROR_INODE(inode, "ix > 1032 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 1033 return -EFSCORRUPTED; 1033 return -EFSCORRUPTED; 1034 } 1034 } 1035 1035 1036 err = ext4_ext_dirty(handle, inode, c 1036 err = ext4_ext_dirty(handle, inode, curp); 1037 ext4_std_error(inode->i_sb, err); 1037 ext4_std_error(inode->i_sb, err); 1038 1038 1039 return err; 1039 return err; 1040 } 1040 } 1041 1041 1042 /* 1042 /* 1043 * ext4_ext_split: 1043 * ext4_ext_split: 1044 * inserts new subtree into the path, using f 1044 * inserts new subtree into the path, using free index entry 1045 * at depth @at: 1045 * at depth @at: 1046 * - allocates all needed blocks (new leaf an 1046 * - allocates all needed blocks (new leaf and all intermediate index blocks) 1047 * - makes decision where to split 1047 * - makes decision where to split 1048 * - moves remaining extents and index entrie 1048 * - moves remaining extents and index entries (right to the split point) 1049 * into the newly allocated blocks 1049 * into the newly allocated blocks 1050 * - initializes subtree 1050 * - initializes subtree 1051 */ 1051 */ 1052 static int ext4_ext_split(handle_t *handle, s 1052 static int ext4_ext_split(handle_t *handle, struct inode *inode, 1053 unsigned int flags, 1053 unsigned int flags, 1054 struct ext4_ext_pat 1054 struct ext4_ext_path *path, 1055 struct ext4_extent 1055 struct ext4_extent *newext, int at) 1056 { 1056 { 1057 struct buffer_head *bh = NULL; 1057 struct buffer_head *bh = NULL; 1058 int depth = ext_depth(inode); 1058 int depth = ext_depth(inode); 1059 struct ext4_extent_header *neh; 1059 struct ext4_extent_header *neh; 1060 struct ext4_extent_idx *fidx; 1060 struct ext4_extent_idx *fidx; 1061 int i = at, k, m, a; 1061 int i = at, k, m, a; 1062 ext4_fsblk_t newblock, oldblock; 1062 ext4_fsblk_t newblock, oldblock; 1063 __le32 border; 1063 __le32 border; 1064 ext4_fsblk_t *ablocks = NULL; /* arra 1064 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 1065 gfp_t gfp_flags = GFP_NOFS; 1065 gfp_t gfp_flags = GFP_NOFS; 1066 int err = 0; 1066 int err = 0; 1067 size_t ext_size = 0; 1067 size_t ext_size = 0; 1068 1068 1069 if (flags & EXT4_EX_NOFAIL) 1069 if (flags & EXT4_EX_NOFAIL) 1070 gfp_flags |= __GFP_NOFAIL; 1070 gfp_flags |= __GFP_NOFAIL; 1071 1071 1072 /* make decision: where to split? */ 1072 /* make decision: where to split? */ 1073 /* FIXME: now decision is simplest: a 1073 /* FIXME: now decision is simplest: at current extent */ 1074 1074 1075 /* if current leaf will be split, the 1075 /* if current leaf will be split, then we should use 1076 * border from split point */ 1076 * border from split point */ 1077 if (unlikely(path[depth].p_ext > EXT_ 1077 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 1078 EXT4_ERROR_INODE(inode, "p_ex 1078 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 1079 return -EFSCORRUPTED; 1079 return -EFSCORRUPTED; 1080 } 1080 } 1081 if (path[depth].p_ext != EXT_MAX_EXTE 1081 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 1082 border = path[depth].p_ext[1] 1082 border = path[depth].p_ext[1].ee_block; 1083 ext_debug(inode, "leaf will b 1083 ext_debug(inode, "leaf will be split." 1084 " next leaf s 1084 " next leaf starts at %d\n", 1085 le32_to_cpu 1085 le32_to_cpu(border)); 1086 } else { 1086 } else { 1087 border = newext->ee_block; 1087 border = newext->ee_block; 1088 ext_debug(inode, "leaf will b 1088 ext_debug(inode, "leaf will be added." 1089 " next leaf s 1089 " next leaf starts at %d\n", 1090 le32_to_cpu(b 1090 le32_to_cpu(border)); 1091 } 1091 } 1092 1092 1093 /* 1093 /* 1094 * If error occurs, then we break pro 1094 * If error occurs, then we break processing 1095 * and mark filesystem read-only. ind 1095 * and mark filesystem read-only. index won't 1096 * be inserted and tree will be in co 1096 * be inserted and tree will be in consistent 1097 * state. Next mount will repair buff 1097 * state. Next mount will repair buffers too. 1098 */ 1098 */ 1099 1099 1100 /* 1100 /* 1101 * Get array to track all allocated b 1101 * Get array to track all allocated blocks. 1102 * We need this to handle errors and 1102 * We need this to handle errors and free blocks 1103 * upon them. 1103 * upon them. 1104 */ 1104 */ 1105 ablocks = kcalloc(depth, sizeof(ext4_ 1105 ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags); 1106 if (!ablocks) 1106 if (!ablocks) 1107 return -ENOMEM; 1107 return -ENOMEM; 1108 1108 1109 /* allocate all needed blocks */ 1109 /* allocate all needed blocks */ 1110 ext_debug(inode, "allocate %d blocks 1110 ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at); 1111 for (a = 0; a < depth - at; a++) { 1111 for (a = 0; a < depth - at; a++) { 1112 newblock = ext4_ext_new_meta_ 1112 newblock = ext4_ext_new_meta_block(handle, inode, path, 1113 1113 newext, &err, flags); 1114 if (newblock == 0) 1114 if (newblock == 0) 1115 goto cleanup; 1115 goto cleanup; 1116 ablocks[a] = newblock; 1116 ablocks[a] = newblock; 1117 } 1117 } 1118 1118 1119 /* initialize new leaf */ 1119 /* initialize new leaf */ 1120 newblock = ablocks[--a]; 1120 newblock = ablocks[--a]; 1121 if (unlikely(newblock == 0)) { 1121 if (unlikely(newblock == 0)) { 1122 EXT4_ERROR_INODE(inode, "newb 1122 EXT4_ERROR_INODE(inode, "newblock == 0!"); 1123 err = -EFSCORRUPTED; 1123 err = -EFSCORRUPTED; 1124 goto cleanup; 1124 goto cleanup; 1125 } 1125 } 1126 bh = sb_getblk_gfp(inode->i_sb, newbl 1126 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); 1127 if (unlikely(!bh)) { 1127 if (unlikely(!bh)) { 1128 err = -ENOMEM; 1128 err = -ENOMEM; 1129 goto cleanup; 1129 goto cleanup; 1130 } 1130 } 1131 lock_buffer(bh); 1131 lock_buffer(bh); 1132 1132 1133 err = ext4_journal_get_create_access( 1133 err = ext4_journal_get_create_access(handle, inode->i_sb, bh, 1134 1134 EXT4_JTR_NONE); 1135 if (err) 1135 if (err) 1136 goto cleanup; 1136 goto cleanup; 1137 1137 1138 neh = ext_block_hdr(bh); 1138 neh = ext_block_hdr(bh); 1139 neh->eh_entries = 0; 1139 neh->eh_entries = 0; 1140 neh->eh_max = cpu_to_le16(ext4_ext_sp 1140 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1141 neh->eh_magic = EXT4_EXT_MAGIC; 1141 neh->eh_magic = EXT4_EXT_MAGIC; 1142 neh->eh_depth = 0; 1142 neh->eh_depth = 0; 1143 neh->eh_generation = 0; 1143 neh->eh_generation = 0; 1144 1144 1145 /* move remainder of path[depth] to t 1145 /* move remainder of path[depth] to the new leaf */ 1146 if (unlikely(path[depth].p_hdr->eh_en 1146 if (unlikely(path[depth].p_hdr->eh_entries != 1147 path[depth].p_hdr->eh_ma 1147 path[depth].p_hdr->eh_max)) { 1148 EXT4_ERROR_INODE(inode, "eh_e 1148 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 1149 path[depth]. 1149 path[depth].p_hdr->eh_entries, 1150 path[depth]. 1150 path[depth].p_hdr->eh_max); 1151 err = -EFSCORRUPTED; 1151 err = -EFSCORRUPTED; 1152 goto cleanup; 1152 goto cleanup; 1153 } 1153 } 1154 /* start copy from next extent */ 1154 /* start copy from next extent */ 1155 m = EXT_MAX_EXTENT(path[depth].p_hdr) 1155 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 1156 ext4_ext_show_move(inode, path, newbl 1156 ext4_ext_show_move(inode, path, newblock, depth); 1157 if (m) { 1157 if (m) { 1158 struct ext4_extent *ex; 1158 struct ext4_extent *ex; 1159 ex = EXT_FIRST_EXTENT(neh); 1159 ex = EXT_FIRST_EXTENT(neh); 1160 memmove(ex, path[depth].p_ext 1160 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 1161 le16_add_cpu(&neh->eh_entries 1161 le16_add_cpu(&neh->eh_entries, m); 1162 } 1162 } 1163 1163 1164 /* zero out unused area in the extent 1164 /* zero out unused area in the extent block */ 1165 ext_size = sizeof(struct ext4_extent_ 1165 ext_size = sizeof(struct ext4_extent_header) + 1166 sizeof(struct ext4_extent) * 1166 sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries); 1167 memset(bh->b_data + ext_size, 0, inod 1167 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); 1168 ext4_extent_block_csum_set(inode, neh 1168 ext4_extent_block_csum_set(inode, neh); 1169 set_buffer_uptodate(bh); 1169 set_buffer_uptodate(bh); 1170 unlock_buffer(bh); 1170 unlock_buffer(bh); 1171 1171 1172 err = ext4_handle_dirty_metadata(hand 1172 err = ext4_handle_dirty_metadata(handle, inode, bh); 1173 if (err) 1173 if (err) 1174 goto cleanup; 1174 goto cleanup; 1175 brelse(bh); 1175 brelse(bh); 1176 bh = NULL; 1176 bh = NULL; 1177 1177 1178 /* correct old leaf */ 1178 /* correct old leaf */ 1179 if (m) { 1179 if (m) { 1180 err = ext4_ext_get_access(han 1180 err = ext4_ext_get_access(handle, inode, path + depth); 1181 if (err) 1181 if (err) 1182 goto cleanup; 1182 goto cleanup; 1183 le16_add_cpu(&path[depth].p_h 1183 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 1184 err = ext4_ext_dirty(handle, 1184 err = ext4_ext_dirty(handle, inode, path + depth); 1185 if (err) 1185 if (err) 1186 goto cleanup; 1186 goto cleanup; 1187 1187 1188 } 1188 } 1189 1189 1190 /* create intermediate indexes */ 1190 /* create intermediate indexes */ 1191 k = depth - at - 1; 1191 k = depth - at - 1; 1192 if (unlikely(k < 0)) { 1192 if (unlikely(k < 0)) { 1193 EXT4_ERROR_INODE(inode, "k %d 1193 EXT4_ERROR_INODE(inode, "k %d < 0!", k); 1194 err = -EFSCORRUPTED; 1194 err = -EFSCORRUPTED; 1195 goto cleanup; 1195 goto cleanup; 1196 } 1196 } 1197 if (k) 1197 if (k) 1198 ext_debug(inode, "create %d i 1198 ext_debug(inode, "create %d intermediate indices\n", k); 1199 /* insert new index into current inde 1199 /* insert new index into current index block */ 1200 /* current depth stored in i var */ 1200 /* current depth stored in i var */ 1201 i = depth - 1; 1201 i = depth - 1; 1202 while (k--) { 1202 while (k--) { 1203 oldblock = newblock; 1203 oldblock = newblock; 1204 newblock = ablocks[--a]; 1204 newblock = ablocks[--a]; 1205 bh = sb_getblk(inode->i_sb, n 1205 bh = sb_getblk(inode->i_sb, newblock); 1206 if (unlikely(!bh)) { 1206 if (unlikely(!bh)) { 1207 err = -ENOMEM; 1207 err = -ENOMEM; 1208 goto cleanup; 1208 goto cleanup; 1209 } 1209 } 1210 lock_buffer(bh); 1210 lock_buffer(bh); 1211 1211 1212 err = ext4_journal_get_create 1212 err = ext4_journal_get_create_access(handle, inode->i_sb, bh, 1213 1213 EXT4_JTR_NONE); 1214 if (err) 1214 if (err) 1215 goto cleanup; 1215 goto cleanup; 1216 1216 1217 neh = ext_block_hdr(bh); 1217 neh = ext_block_hdr(bh); 1218 neh->eh_entries = cpu_to_le16 1218 neh->eh_entries = cpu_to_le16(1); 1219 neh->eh_magic = EXT4_EXT_MAGI 1219 neh->eh_magic = EXT4_EXT_MAGIC; 1220 neh->eh_max = cpu_to_le16(ext 1220 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1221 neh->eh_depth = cpu_to_le16(d 1221 neh->eh_depth = cpu_to_le16(depth - i); 1222 neh->eh_generation = 0; 1222 neh->eh_generation = 0; 1223 fidx = EXT_FIRST_INDEX(neh); 1223 fidx = EXT_FIRST_INDEX(neh); 1224 fidx->ei_block = border; 1224 fidx->ei_block = border; 1225 ext4_idx_store_pblock(fidx, o 1225 ext4_idx_store_pblock(fidx, oldblock); 1226 1226 1227 ext_debug(inode, "int.index a 1227 ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n", 1228 i, newblock, 1228 i, newblock, le32_to_cpu(border), oldblock); 1229 1229 1230 /* move remainder of path[i] 1230 /* move remainder of path[i] to the new index block */ 1231 if (unlikely(EXT_MAX_INDEX(pa 1231 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 1232 EXT_L 1232 EXT_LAST_INDEX(path[i].p_hdr))) { 1233 EXT4_ERROR_INODE(inod 1233 EXT4_ERROR_INODE(inode, 1234 "EXT 1234 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 1235 le32 1235 le32_to_cpu(path[i].p_ext->ee_block)); 1236 err = -EFSCORRUPTED; 1236 err = -EFSCORRUPTED; 1237 goto cleanup; 1237 goto cleanup; 1238 } 1238 } 1239 /* start copy indexes */ 1239 /* start copy indexes */ 1240 m = EXT_MAX_INDEX(path[i].p_h 1240 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 1241 ext_debug(inode, "cur 0x%p, l 1241 ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx, 1242 EXT_MAX_INDEX 1242 EXT_MAX_INDEX(path[i].p_hdr)); 1243 ext4_ext_show_move(inode, pat 1243 ext4_ext_show_move(inode, path, newblock, i); 1244 if (m) { 1244 if (m) { 1245 memmove(++fidx, path[ 1245 memmove(++fidx, path[i].p_idx, 1246 sizeof(struct 1246 sizeof(struct ext4_extent_idx) * m); 1247 le16_add_cpu(&neh->eh 1247 le16_add_cpu(&neh->eh_entries, m); 1248 } 1248 } 1249 /* zero out unused area in th 1249 /* zero out unused area in the extent block */ 1250 ext_size = sizeof(struct ext4 1250 ext_size = sizeof(struct ext4_extent_header) + 1251 (sizeof(struct ext4_extent 1251 (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries)); 1252 memset(bh->b_data + ext_size, 1252 memset(bh->b_data + ext_size, 0, 1253 inode->i_sb->s_blocks 1253 inode->i_sb->s_blocksize - ext_size); 1254 ext4_extent_block_csum_set(in 1254 ext4_extent_block_csum_set(inode, neh); 1255 set_buffer_uptodate(bh); 1255 set_buffer_uptodate(bh); 1256 unlock_buffer(bh); 1256 unlock_buffer(bh); 1257 1257 1258 err = ext4_handle_dirty_metad 1258 err = ext4_handle_dirty_metadata(handle, inode, bh); 1259 if (err) 1259 if (err) 1260 goto cleanup; 1260 goto cleanup; 1261 brelse(bh); 1261 brelse(bh); 1262 bh = NULL; 1262 bh = NULL; 1263 1263 1264 /* correct old index */ 1264 /* correct old index */ 1265 if (m) { 1265 if (m) { 1266 err = ext4_ext_get_ac 1266 err = ext4_ext_get_access(handle, inode, path + i); 1267 if (err) 1267 if (err) 1268 goto cleanup; 1268 goto cleanup; 1269 le16_add_cpu(&path[i] 1269 le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1270 err = ext4_ext_dirty( 1270 err = ext4_ext_dirty(handle, inode, path + i); 1271 if (err) 1271 if (err) 1272 goto cleanup; 1272 goto cleanup; 1273 } 1273 } 1274 1274 1275 i--; 1275 i--; 1276 } 1276 } 1277 1277 1278 /* insert new index */ 1278 /* insert new index */ 1279 err = ext4_ext_insert_index(handle, i 1279 err = ext4_ext_insert_index(handle, inode, path + at, 1280 le32_to_c 1280 le32_to_cpu(border), newblock); 1281 1281 1282 cleanup: 1282 cleanup: 1283 if (bh) { 1283 if (bh) { 1284 if (buffer_locked(bh)) 1284 if (buffer_locked(bh)) 1285 unlock_buffer(bh); 1285 unlock_buffer(bh); 1286 brelse(bh); 1286 brelse(bh); 1287 } 1287 } 1288 1288 1289 if (err) { 1289 if (err) { 1290 /* free all allocated blocks 1290 /* free all allocated blocks in error case */ 1291 for (i = 0; i < depth; i++) { 1291 for (i = 0; i < depth; i++) { 1292 if (!ablocks[i]) 1292 if (!ablocks[i]) 1293 continue; 1293 continue; 1294 ext4_free_blocks(hand 1294 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1295 EXT4 1295 EXT4_FREE_BLOCKS_METADATA); 1296 } 1296 } 1297 } 1297 } 1298 kfree(ablocks); 1298 kfree(ablocks); 1299 1299 1300 return err; 1300 return err; 1301 } 1301 } 1302 1302 1303 /* 1303 /* 1304 * ext4_ext_grow_indepth: 1304 * ext4_ext_grow_indepth: 1305 * implements tree growing procedure: 1305 * implements tree growing procedure: 1306 * - allocates new block 1306 * - allocates new block 1307 * - moves top-level data (index block or lea 1307 * - moves top-level data (index block or leaf) into the new block 1308 * - initializes new top-level, creating inde 1308 * - initializes new top-level, creating index that points to the 1309 * just created block 1309 * just created block 1310 */ 1310 */ 1311 static int ext4_ext_grow_indepth(handle_t *ha 1311 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 1312 unsigned int 1312 unsigned int flags) 1313 { 1313 { 1314 struct ext4_extent_header *neh; 1314 struct ext4_extent_header *neh; 1315 struct buffer_head *bh; 1315 struct buffer_head *bh; 1316 ext4_fsblk_t newblock, goal = 0; 1316 ext4_fsblk_t newblock, goal = 0; 1317 struct ext4_super_block *es = EXT4_SB 1317 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 1318 int err = 0; 1318 int err = 0; 1319 size_t ext_size = 0; 1319 size_t ext_size = 0; 1320 1320 1321 /* Try to prepend new index to old on 1321 /* Try to prepend new index to old one */ 1322 if (ext_depth(inode)) 1322 if (ext_depth(inode)) 1323 goal = ext4_idx_pblock(EXT_FI 1323 goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode))); 1324 if (goal > le32_to_cpu(es->s_first_da 1324 if (goal > le32_to_cpu(es->s_first_data_block)) { 1325 flags |= EXT4_MB_HINT_TRY_GOA 1325 flags |= EXT4_MB_HINT_TRY_GOAL; 1326 goal--; 1326 goal--; 1327 } else 1327 } else 1328 goal = ext4_inode_to_goal_blo 1328 goal = ext4_inode_to_goal_block(inode); 1329 newblock = ext4_new_meta_blocks(handl 1329 newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 1330 NULL, 1330 NULL, &err); 1331 if (newblock == 0) 1331 if (newblock == 0) 1332 return err; 1332 return err; 1333 1333 1334 bh = sb_getblk_gfp(inode->i_sb, newbl 1334 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); 1335 if (unlikely(!bh)) 1335 if (unlikely(!bh)) 1336 return -ENOMEM; 1336 return -ENOMEM; 1337 lock_buffer(bh); 1337 lock_buffer(bh); 1338 1338 1339 err = ext4_journal_get_create_access( 1339 err = ext4_journal_get_create_access(handle, inode->i_sb, bh, 1340 1340 EXT4_JTR_NONE); 1341 if (err) { 1341 if (err) { 1342 unlock_buffer(bh); 1342 unlock_buffer(bh); 1343 goto out; 1343 goto out; 1344 } 1344 } 1345 1345 1346 ext_size = sizeof(EXT4_I(inode)->i_da 1346 ext_size = sizeof(EXT4_I(inode)->i_data); 1347 /* move top-level index/leaf into new 1347 /* move top-level index/leaf into new block */ 1348 memmove(bh->b_data, EXT4_I(inode)->i_ 1348 memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size); 1349 /* zero out unused area in the extent 1349 /* zero out unused area in the extent block */ 1350 memset(bh->b_data + ext_size, 0, inod 1350 memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); 1351 1351 1352 /* set size of new block */ 1352 /* set size of new block */ 1353 neh = ext_block_hdr(bh); 1353 neh = ext_block_hdr(bh); 1354 /* old root could have indexes or lea 1354 /* old root could have indexes or leaves 1355 * so calculate e_max right way */ 1355 * so calculate e_max right way */ 1356 if (ext_depth(inode)) 1356 if (ext_depth(inode)) 1357 neh->eh_max = cpu_to_le16(ext 1357 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1358 else 1358 else 1359 neh->eh_max = cpu_to_le16(ext 1359 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1360 neh->eh_magic = EXT4_EXT_MAGIC; 1360 neh->eh_magic = EXT4_EXT_MAGIC; 1361 ext4_extent_block_csum_set(inode, neh 1361 ext4_extent_block_csum_set(inode, neh); 1362 set_buffer_uptodate(bh); 1362 set_buffer_uptodate(bh); 1363 set_buffer_verified(bh); 1363 set_buffer_verified(bh); 1364 unlock_buffer(bh); 1364 unlock_buffer(bh); 1365 1365 1366 err = ext4_handle_dirty_metadata(hand 1366 err = ext4_handle_dirty_metadata(handle, inode, bh); 1367 if (err) 1367 if (err) 1368 goto out; 1368 goto out; 1369 1369 1370 /* Update top-level index: num,max,po 1370 /* Update top-level index: num,max,pointer */ 1371 neh = ext_inode_hdr(inode); 1371 neh = ext_inode_hdr(inode); 1372 neh->eh_entries = cpu_to_le16(1); 1372 neh->eh_entries = cpu_to_le16(1); 1373 ext4_idx_store_pblock(EXT_FIRST_INDEX 1373 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); 1374 if (neh->eh_depth == 0) { 1374 if (neh->eh_depth == 0) { 1375 /* Root extent block becomes 1375 /* Root extent block becomes index block */ 1376 neh->eh_max = cpu_to_le16(ext 1376 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 1377 EXT_FIRST_INDEX(neh)->ei_bloc 1377 EXT_FIRST_INDEX(neh)->ei_block = 1378 EXT_FIRST_EXTENT(neh) 1378 EXT_FIRST_EXTENT(neh)->ee_block; 1379 } 1379 } 1380 ext_debug(inode, "new root: num %d(%d 1380 ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n", 1381 le16_to_cpu(neh->eh_entries 1381 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 1382 le32_to_cpu(EXT_FIRST_INDEX 1382 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1383 ext4_idx_pblock(EXT_FIRST_I 1383 ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1384 1384 1385 le16_add_cpu(&neh->eh_depth, 1); 1385 le16_add_cpu(&neh->eh_depth, 1); 1386 err = ext4_mark_inode_dirty(handle, i 1386 err = ext4_mark_inode_dirty(handle, inode); 1387 out: 1387 out: 1388 brelse(bh); 1388 brelse(bh); 1389 1389 1390 return err; 1390 return err; 1391 } 1391 } 1392 1392 1393 /* 1393 /* 1394 * ext4_ext_create_new_leaf: 1394 * ext4_ext_create_new_leaf: 1395 * finds empty index and adds new leaf. 1395 * finds empty index and adds new leaf. 1396 * if no free index is found, then it request 1396 * if no free index is found, then it requests in-depth growing. 1397 */ 1397 */ 1398 static struct ext4_ext_path * !! 1398 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 1399 ext4_ext_create_new_leaf(handle_t *handle, st !! 1399 unsigned int mb_flags, 1400 unsigned int mb_flag !! 1400 unsigned int gb_flags, 1401 struct ext4_ext_path !! 1401 struct ext4_ext_path **ppath, 1402 struct ext4_extent * !! 1402 struct ext4_extent *newext) 1403 { 1403 { >> 1404 struct ext4_ext_path *path = *ppath; 1404 struct ext4_ext_path *curp; 1405 struct ext4_ext_path *curp; 1405 int depth, i, err = 0; 1406 int depth, i, err = 0; 1406 ext4_lblk_t ee_block = le32_to_cpu(ne << 1407 1407 1408 repeat: 1408 repeat: 1409 i = depth = ext_depth(inode); 1409 i = depth = ext_depth(inode); 1410 1410 1411 /* walk up to the tree and look for f 1411 /* walk up to the tree and look for free index entry */ 1412 curp = path + depth; 1412 curp = path + depth; 1413 while (i > 0 && !EXT_HAS_FREE_INDEX(c 1413 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1414 i--; 1414 i--; 1415 curp--; 1415 curp--; 1416 } 1416 } 1417 1417 1418 /* we use already allocated block for 1418 /* we use already allocated block for index block, 1419 * so subsequent data blocks should b 1419 * so subsequent data blocks should be contiguous */ 1420 if (EXT_HAS_FREE_INDEX(curp)) { 1420 if (EXT_HAS_FREE_INDEX(curp)) { 1421 /* if we found index with fre 1421 /* if we found index with free entry, then use that 1422 * entry: create all needed s 1422 * entry: create all needed subtree and add new leaf */ 1423 err = ext4_ext_split(handle, 1423 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); 1424 if (err) 1424 if (err) 1425 goto errout; !! 1425 goto out; 1426 1426 1427 /* refill path */ 1427 /* refill path */ 1428 path = ext4_find_extent(inode !! 1428 path = ext4_find_extent(inode, 1429 return path; !! 1429 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1430 } !! 1430 ppath, gb_flags); 1431 !! 1431 if (IS_ERR(path)) 1432 /* tree is full, time to grow in dept !! 1432 err = PTR_ERR(path); 1433 err = ext4_ext_grow_indepth(handle, i !! 1433 } else { 1434 if (err) !! 1434 /* tree is full, time to grow in depth */ 1435 goto errout; !! 1435 err = ext4_ext_grow_indepth(handle, inode, mb_flags); >> 1436 if (err) >> 1437 goto out; 1436 1438 1437 /* refill path */ !! 1439 /* refill path */ 1438 path = ext4_find_extent(inode, ee_blo !! 1440 path = ext4_find_extent(inode, 1439 if (IS_ERR(path)) !! 1441 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1440 return path; !! 1442 ppath, gb_flags); >> 1443 if (IS_ERR(path)) { >> 1444 err = PTR_ERR(path); >> 1445 goto out; >> 1446 } 1441 1447 1442 /* !! 1448 /* 1443 * only first (depth 0 -> 1) produces !! 1449 * only first (depth 0 -> 1) produces free space; 1444 * in all other cases we have to spli !! 1450 * in all other cases we have to split the grown tree 1445 */ !! 1451 */ 1446 depth = ext_depth(inode); !! 1452 depth = ext_depth(inode); 1447 if (path[depth].p_hdr->eh_entries == !! 1453 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1448 /* now we need to split */ !! 1454 /* now we need to split */ 1449 goto repeat; !! 1455 goto repeat; >> 1456 } 1450 } 1457 } 1451 1458 1452 return path; !! 1459 out: 1453 !! 1460 return err; 1454 errout: << 1455 ext4_free_ext_path(path); << 1456 return ERR_PTR(err); << 1457 } 1461 } 1458 1462 1459 /* 1463 /* 1460 * search the closest allocated block to the 1464 * search the closest allocated block to the left for *logical 1461 * and returns it at @logical + it's physical 1465 * and returns it at @logical + it's physical address at @phys 1462 * if *logical is the smallest allocated bloc 1466 * if *logical is the smallest allocated block, the function 1463 * returns 0 at @phys 1467 * returns 0 at @phys 1464 * return value contains 0 (success) or error 1468 * return value contains 0 (success) or error code 1465 */ 1469 */ 1466 static int ext4_ext_search_left(struct inode 1470 static int ext4_ext_search_left(struct inode *inode, 1467 struct ext4_e 1471 struct ext4_ext_path *path, 1468 ext4_lblk_t * 1472 ext4_lblk_t *logical, ext4_fsblk_t *phys) 1469 { 1473 { 1470 struct ext4_extent_idx *ix; 1474 struct ext4_extent_idx *ix; 1471 struct ext4_extent *ex; 1475 struct ext4_extent *ex; 1472 int depth, ee_len; 1476 int depth, ee_len; 1473 1477 1474 if (unlikely(path == NULL)) { 1478 if (unlikely(path == NULL)) { 1475 EXT4_ERROR_INODE(inode, "path 1479 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1476 return -EFSCORRUPTED; 1480 return -EFSCORRUPTED; 1477 } 1481 } 1478 depth = path->p_depth; 1482 depth = path->p_depth; 1479 *phys = 0; 1483 *phys = 0; 1480 1484 1481 if (depth == 0 && path->p_ext == NULL 1485 if (depth == 0 && path->p_ext == NULL) 1482 return 0; 1486 return 0; 1483 1487 1484 /* usually extent in the path covers 1488 /* usually extent in the path covers blocks smaller 1485 * then *logical, but it can be that 1489 * then *logical, but it can be that extent is the 1486 * first one in the file */ 1490 * first one in the file */ 1487 1491 1488 ex = path[depth].p_ext; 1492 ex = path[depth].p_ext; 1489 ee_len = ext4_ext_get_actual_len(ex); 1493 ee_len = ext4_ext_get_actual_len(ex); 1490 if (*logical < le32_to_cpu(ex->ee_blo 1494 if (*logical < le32_to_cpu(ex->ee_block)) { 1491 if (unlikely(EXT_FIRST_EXTENT 1495 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1492 EXT4_ERROR_INODE(inod 1496 EXT4_ERROR_INODE(inode, 1493 "EXT 1497 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1494 *log 1498 *logical, le32_to_cpu(ex->ee_block)); 1495 return -EFSCORRUPTED; 1499 return -EFSCORRUPTED; 1496 } 1500 } 1497 while (--depth >= 0) { 1501 while (--depth >= 0) { 1498 ix = path[depth].p_id 1502 ix = path[depth].p_idx; 1499 if (unlikely(ix != EX 1503 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1500 EXT4_ERROR_IN 1504 EXT4_ERROR_INODE(inode, 1501 "ix (%d) != 1505 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 1502 ix != NULL 1506 ix != NULL ? le32_to_cpu(ix->ei_block) : 0, 1503 le32_to_cpu 1507 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block), 1504 depth); 1508 depth); 1505 return -EFSCO 1509 return -EFSCORRUPTED; 1506 } 1510 } 1507 } 1511 } 1508 return 0; 1512 return 0; 1509 } 1513 } 1510 1514 1511 if (unlikely(*logical < (le32_to_cpu( 1515 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1512 EXT4_ERROR_INODE(inode, 1516 EXT4_ERROR_INODE(inode, 1513 "logical %d 1517 "logical %d < ee_block %d + ee_len %d!", 1514 *logical, le 1518 *logical, le32_to_cpu(ex->ee_block), ee_len); 1515 return -EFSCORRUPTED; 1519 return -EFSCORRUPTED; 1516 } 1520 } 1517 1521 1518 *logical = le32_to_cpu(ex->ee_block) 1522 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1519 *phys = ext4_ext_pblock(ex) + ee_len 1523 *phys = ext4_ext_pblock(ex) + ee_len - 1; 1520 return 0; 1524 return 0; 1521 } 1525 } 1522 1526 1523 /* 1527 /* 1524 * Search the closest allocated block to the 1528 * Search the closest allocated block to the right for *logical 1525 * and returns it at @logical + it's physical 1529 * and returns it at @logical + it's physical address at @phys. 1526 * If not exists, return 0 and @phys is set t 1530 * If not exists, return 0 and @phys is set to 0. We will return 1527 * 1 which means we found an allocated block 1531 * 1 which means we found an allocated block and ret_ex is valid. 1528 * Or return a (< 0) error code. 1532 * Or return a (< 0) error code. 1529 */ 1533 */ 1530 static int ext4_ext_search_right(struct inode 1534 static int ext4_ext_search_right(struct inode *inode, 1531 struct ext4_ 1535 struct ext4_ext_path *path, 1532 ext4_lblk_t 1536 ext4_lblk_t *logical, ext4_fsblk_t *phys, 1533 struct ext4_ 1537 struct ext4_extent *ret_ex) 1534 { 1538 { 1535 struct buffer_head *bh = NULL; 1539 struct buffer_head *bh = NULL; 1536 struct ext4_extent_header *eh; 1540 struct ext4_extent_header *eh; 1537 struct ext4_extent_idx *ix; 1541 struct ext4_extent_idx *ix; 1538 struct ext4_extent *ex; 1542 struct ext4_extent *ex; 1539 int depth; /* Note, NOT eh_depth 1543 int depth; /* Note, NOT eh_depth; depth from top of tree */ 1540 int ee_len; 1544 int ee_len; 1541 1545 1542 if (unlikely(path == NULL)) { 1546 if (unlikely(path == NULL)) { 1543 EXT4_ERROR_INODE(inode, "path 1547 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1544 return -EFSCORRUPTED; 1548 return -EFSCORRUPTED; 1545 } 1549 } 1546 depth = path->p_depth; 1550 depth = path->p_depth; 1547 *phys = 0; 1551 *phys = 0; 1548 1552 1549 if (depth == 0 && path->p_ext == NULL 1553 if (depth == 0 && path->p_ext == NULL) 1550 return 0; 1554 return 0; 1551 1555 1552 /* usually extent in the path covers 1556 /* usually extent in the path covers blocks smaller 1553 * then *logical, but it can be that 1557 * then *logical, but it can be that extent is the 1554 * first one in the file */ 1558 * first one in the file */ 1555 1559 1556 ex = path[depth].p_ext; 1560 ex = path[depth].p_ext; 1557 ee_len = ext4_ext_get_actual_len(ex); 1561 ee_len = ext4_ext_get_actual_len(ex); 1558 if (*logical < le32_to_cpu(ex->ee_blo 1562 if (*logical < le32_to_cpu(ex->ee_block)) { 1559 if (unlikely(EXT_FIRST_EXTENT 1563 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1560 EXT4_ERROR_INODE(inod 1564 EXT4_ERROR_INODE(inode, 1561 "fir 1565 "first_extent(path[%d].p_hdr) != ex", 1562 dept 1566 depth); 1563 return -EFSCORRUPTED; 1567 return -EFSCORRUPTED; 1564 } 1568 } 1565 while (--depth >= 0) { 1569 while (--depth >= 0) { 1566 ix = path[depth].p_id 1570 ix = path[depth].p_idx; 1567 if (unlikely(ix != EX 1571 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1568 EXT4_ERROR_IN 1572 EXT4_ERROR_INODE(inode, 1569 1573 "ix != EXT_FIRST_INDEX *logical %d!", 1570 1574 *logical); 1571 return -EFSCO 1575 return -EFSCORRUPTED; 1572 } 1576 } 1573 } 1577 } 1574 goto found_extent; 1578 goto found_extent; 1575 } 1579 } 1576 1580 1577 if (unlikely(*logical < (le32_to_cpu( 1581 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1578 EXT4_ERROR_INODE(inode, 1582 EXT4_ERROR_INODE(inode, 1579 "logical %d 1583 "logical %d < ee_block %d + ee_len %d!", 1580 *logical, le 1584 *logical, le32_to_cpu(ex->ee_block), ee_len); 1581 return -EFSCORRUPTED; 1585 return -EFSCORRUPTED; 1582 } 1586 } 1583 1587 1584 if (ex != EXT_LAST_EXTENT(path[depth] 1588 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 1585 /* next allocated block in th 1589 /* next allocated block in this leaf */ 1586 ex++; 1590 ex++; 1587 goto found_extent; 1591 goto found_extent; 1588 } 1592 } 1589 1593 1590 /* go up and search for index to the 1594 /* go up and search for index to the right */ 1591 while (--depth >= 0) { 1595 while (--depth >= 0) { 1592 ix = path[depth].p_idx; 1596 ix = path[depth].p_idx; 1593 if (ix != EXT_LAST_INDEX(path 1597 if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 1594 goto got_index; 1598 goto got_index; 1595 } 1599 } 1596 1600 1597 /* we've gone up to the root and foun 1601 /* we've gone up to the root and found no index to the right */ 1598 return 0; 1602 return 0; 1599 1603 1600 got_index: 1604 got_index: 1601 /* we've found index to the right, le 1605 /* we've found index to the right, let's 1602 * follow it and find the closest all 1606 * follow it and find the closest allocated 1603 * block to the right */ 1607 * block to the right */ 1604 ix++; 1608 ix++; 1605 while (++depth < path->p_depth) { 1609 while (++depth < path->p_depth) { 1606 /* subtract from p_depth to g 1610 /* subtract from p_depth to get proper eh_depth */ 1607 bh = read_extent_tree_block(i 1611 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0); 1608 if (IS_ERR(bh)) 1612 if (IS_ERR(bh)) 1609 return PTR_ERR(bh); 1613 return PTR_ERR(bh); 1610 eh = ext_block_hdr(bh); 1614 eh = ext_block_hdr(bh); 1611 ix = EXT_FIRST_INDEX(eh); 1615 ix = EXT_FIRST_INDEX(eh); 1612 put_bh(bh); 1616 put_bh(bh); 1613 } 1617 } 1614 1618 1615 bh = read_extent_tree_block(inode, ix 1619 bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0); 1616 if (IS_ERR(bh)) 1620 if (IS_ERR(bh)) 1617 return PTR_ERR(bh); 1621 return PTR_ERR(bh); 1618 eh = ext_block_hdr(bh); 1622 eh = ext_block_hdr(bh); 1619 ex = EXT_FIRST_EXTENT(eh); 1623 ex = EXT_FIRST_EXTENT(eh); 1620 found_extent: 1624 found_extent: 1621 *logical = le32_to_cpu(ex->ee_block); 1625 *logical = le32_to_cpu(ex->ee_block); 1622 *phys = ext4_ext_pblock(ex); 1626 *phys = ext4_ext_pblock(ex); 1623 if (ret_ex) 1627 if (ret_ex) 1624 *ret_ex = *ex; 1628 *ret_ex = *ex; 1625 if (bh) 1629 if (bh) 1626 put_bh(bh); 1630 put_bh(bh); 1627 return 1; 1631 return 1; 1628 } 1632 } 1629 1633 1630 /* 1634 /* 1631 * ext4_ext_next_allocated_block: 1635 * ext4_ext_next_allocated_block: 1632 * returns allocated block in subsequent exte 1636 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. 1633 * NOTE: it considers block number from index 1637 * NOTE: it considers block number from index entry as 1634 * allocated block. Thus, index entries have 1638 * allocated block. Thus, index entries have to be consistent 1635 * with leaves. 1639 * with leaves. 1636 */ 1640 */ 1637 ext4_lblk_t 1641 ext4_lblk_t 1638 ext4_ext_next_allocated_block(struct ext4_ext 1642 ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1639 { 1643 { 1640 int depth; 1644 int depth; 1641 1645 1642 BUG_ON(path == NULL); 1646 BUG_ON(path == NULL); 1643 depth = path->p_depth; 1647 depth = path->p_depth; 1644 1648 1645 if (depth == 0 && path->p_ext == NULL 1649 if (depth == 0 && path->p_ext == NULL) 1646 return EXT_MAX_BLOCKS; 1650 return EXT_MAX_BLOCKS; 1647 1651 1648 while (depth >= 0) { 1652 while (depth >= 0) { 1649 struct ext4_ext_path *p = &pa 1653 struct ext4_ext_path *p = &path[depth]; 1650 1654 1651 if (depth == path->p_depth) { 1655 if (depth == path->p_depth) { 1652 /* leaf */ 1656 /* leaf */ 1653 if (p->p_ext && p->p_ 1657 if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr)) 1654 return le32_t 1658 return le32_to_cpu(p->p_ext[1].ee_block); 1655 } else { 1659 } else { 1656 /* index */ 1660 /* index */ 1657 if (p->p_idx != EXT_L 1661 if (p->p_idx != EXT_LAST_INDEX(p->p_hdr)) 1658 return le32_t 1662 return le32_to_cpu(p->p_idx[1].ei_block); 1659 } 1663 } 1660 depth--; 1664 depth--; 1661 } 1665 } 1662 1666 1663 return EXT_MAX_BLOCKS; 1667 return EXT_MAX_BLOCKS; 1664 } 1668 } 1665 1669 1666 /* 1670 /* 1667 * ext4_ext_next_leaf_block: 1671 * ext4_ext_next_leaf_block: 1668 * returns first allocated block from next le 1672 * returns first allocated block from next leaf or EXT_MAX_BLOCKS 1669 */ 1673 */ 1670 static ext4_lblk_t ext4_ext_next_leaf_block(s 1674 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) 1671 { 1675 { 1672 int depth; 1676 int depth; 1673 1677 1674 BUG_ON(path == NULL); 1678 BUG_ON(path == NULL); 1675 depth = path->p_depth; 1679 depth = path->p_depth; 1676 1680 1677 /* zero-tree has no leaf blocks at al 1681 /* zero-tree has no leaf blocks at all */ 1678 if (depth == 0) 1682 if (depth == 0) 1679 return EXT_MAX_BLOCKS; 1683 return EXT_MAX_BLOCKS; 1680 1684 1681 /* go to index block */ 1685 /* go to index block */ 1682 depth--; 1686 depth--; 1683 1687 1684 while (depth >= 0) { 1688 while (depth >= 0) { 1685 if (path[depth].p_idx != 1689 if (path[depth].p_idx != 1686 EXT_LAST_INDE 1690 EXT_LAST_INDEX(path[depth].p_hdr)) 1687 return (ext4_lblk_t) 1691 return (ext4_lblk_t) 1688 le32_to_cpu(p 1692 le32_to_cpu(path[depth].p_idx[1].ei_block); 1689 depth--; 1693 depth--; 1690 } 1694 } 1691 1695 1692 return EXT_MAX_BLOCKS; 1696 return EXT_MAX_BLOCKS; 1693 } 1697 } 1694 1698 1695 /* 1699 /* 1696 * ext4_ext_correct_indexes: 1700 * ext4_ext_correct_indexes: 1697 * if leaf gets modified and modified extent 1701 * if leaf gets modified and modified extent is first in the leaf, 1698 * then we have to correct all indexes above. 1702 * then we have to correct all indexes above. 1699 * TODO: do we need to correct tree in all ca 1703 * TODO: do we need to correct tree in all cases? 1700 */ 1704 */ 1701 static int ext4_ext_correct_indexes(handle_t 1705 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1702 struct ext4_e 1706 struct ext4_ext_path *path) 1703 { 1707 { 1704 struct ext4_extent_header *eh; 1708 struct ext4_extent_header *eh; 1705 int depth = ext_depth(inode); 1709 int depth = ext_depth(inode); 1706 struct ext4_extent *ex; 1710 struct ext4_extent *ex; 1707 __le32 border; 1711 __le32 border; 1708 int k, err = 0; 1712 int k, err = 0; 1709 1713 1710 eh = path[depth].p_hdr; 1714 eh = path[depth].p_hdr; 1711 ex = path[depth].p_ext; 1715 ex = path[depth].p_ext; 1712 1716 1713 if (unlikely(ex == NULL || eh == NULL 1717 if (unlikely(ex == NULL || eh == NULL)) { 1714 EXT4_ERROR_INODE(inode, 1718 EXT4_ERROR_INODE(inode, 1715 "ex %p == NU 1719 "ex %p == NULL or eh %p == NULL", ex, eh); 1716 return -EFSCORRUPTED; 1720 return -EFSCORRUPTED; 1717 } 1721 } 1718 1722 1719 if (depth == 0) { 1723 if (depth == 0) { 1720 /* there is no tree at all */ 1724 /* there is no tree at all */ 1721 return 0; 1725 return 0; 1722 } 1726 } 1723 1727 1724 if (ex != EXT_FIRST_EXTENT(eh)) { 1728 if (ex != EXT_FIRST_EXTENT(eh)) { 1725 /* we correct tree if first l 1729 /* we correct tree if first leaf got modified only */ 1726 return 0; 1730 return 0; 1727 } 1731 } 1728 1732 1729 /* 1733 /* 1730 * TODO: we need correction if border 1734 * TODO: we need correction if border is smaller than current one 1731 */ 1735 */ 1732 k = depth - 1; 1736 k = depth - 1; 1733 border = path[depth].p_ext->ee_block; 1737 border = path[depth].p_ext->ee_block; 1734 err = ext4_ext_get_access(handle, ino 1738 err = ext4_ext_get_access(handle, inode, path + k); 1735 if (err) 1739 if (err) 1736 return err; 1740 return err; 1737 path[k].p_idx->ei_block = border; 1741 path[k].p_idx->ei_block = border; 1738 err = ext4_ext_dirty(handle, inode, p 1742 err = ext4_ext_dirty(handle, inode, path + k); 1739 if (err) 1743 if (err) 1740 return err; 1744 return err; 1741 1745 1742 while (k--) { 1746 while (k--) { 1743 /* change all left-side index 1747 /* change all left-side indexes */ 1744 if (path[k+1].p_idx != EXT_FI 1748 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1745 break; 1749 break; 1746 err = ext4_ext_get_access(han 1750 err = ext4_ext_get_access(handle, inode, path + k); 1747 if (err) 1751 if (err) 1748 goto clean; !! 1752 break; 1749 path[k].p_idx->ei_block = bor 1753 path[k].p_idx->ei_block = border; 1750 err = ext4_ext_dirty(handle, 1754 err = ext4_ext_dirty(handle, inode, path + k); 1751 if (err) 1755 if (err) 1752 goto clean; !! 1756 break; 1753 } 1757 } 1754 return 0; << 1755 << 1756 clean: << 1757 /* << 1758 * The path[k].p_bh is either unmodif << 1759 * set (see ext4_ext_get_access()). S << 1760 * of the successfully modified exten << 1761 * these extents to be checked to avo << 1762 */ << 1763 while (++k < depth) << 1764 clear_buffer_verified(path[k] << 1765 1758 1766 return err; 1759 return err; 1767 } 1760 } 1768 1761 1769 static int ext4_can_extents_be_merged(struct 1762 static int ext4_can_extents_be_merged(struct inode *inode, 1770 struct 1763 struct ext4_extent *ex1, 1771 struct 1764 struct ext4_extent *ex2) 1772 { 1765 { 1773 unsigned short ext1_ee_len, ext2_ee_l 1766 unsigned short ext1_ee_len, ext2_ee_len; 1774 1767 1775 if (ext4_ext_is_unwritten(ex1) != ext 1768 if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2)) 1776 return 0; 1769 return 0; 1777 1770 1778 ext1_ee_len = ext4_ext_get_actual_len 1771 ext1_ee_len = ext4_ext_get_actual_len(ex1); 1779 ext2_ee_len = ext4_ext_get_actual_len 1772 ext2_ee_len = ext4_ext_get_actual_len(ex2); 1780 1773 1781 if (le32_to_cpu(ex1->ee_block) + ext1 1774 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 1782 le32_to_cpu(ex2->ee_b 1775 le32_to_cpu(ex2->ee_block)) 1783 return 0; 1776 return 0; 1784 1777 1785 if (ext1_ee_len + ext2_ee_len > EXT_I 1778 if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) 1786 return 0; 1779 return 0; 1787 1780 1788 if (ext4_ext_is_unwritten(ex1) && 1781 if (ext4_ext_is_unwritten(ex1) && 1789 ext1_ee_len + ext2_ee_len > EXT_U 1782 ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN) 1790 return 0; 1783 return 0; 1791 #ifdef AGGRESSIVE_TEST 1784 #ifdef AGGRESSIVE_TEST 1792 if (ext1_ee_len >= 4) 1785 if (ext1_ee_len >= 4) 1793 return 0; 1786 return 0; 1794 #endif 1787 #endif 1795 1788 1796 if (ext4_ext_pblock(ex1) + ext1_ee_le 1789 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1797 return 1; 1790 return 1; 1798 return 0; 1791 return 0; 1799 } 1792 } 1800 1793 1801 /* 1794 /* 1802 * This function tries to merge the "ex" exte 1795 * This function tries to merge the "ex" extent to the next extent in the tree. 1803 * It always tries to merge towards right. If 1796 * It always tries to merge towards right. If you want to merge towards 1804 * left, pass "ex - 1" as argument instead of 1797 * left, pass "ex - 1" as argument instead of "ex". 1805 * Returns 0 if the extents (ex and ex+1) wer 1798 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 1806 * 1 if they got merged. 1799 * 1 if they got merged. 1807 */ 1800 */ 1808 static int ext4_ext_try_to_merge_right(struct 1801 static int ext4_ext_try_to_merge_right(struct inode *inode, 1809 struct ext4_ 1802 struct ext4_ext_path *path, 1810 struct ext4_ 1803 struct ext4_extent *ex) 1811 { 1804 { 1812 struct ext4_extent_header *eh; 1805 struct ext4_extent_header *eh; 1813 unsigned int depth, len; 1806 unsigned int depth, len; 1814 int merge_done = 0, unwritten; 1807 int merge_done = 0, unwritten; 1815 1808 1816 depth = ext_depth(inode); 1809 depth = ext_depth(inode); 1817 BUG_ON(path[depth].p_hdr == NULL); 1810 BUG_ON(path[depth].p_hdr == NULL); 1818 eh = path[depth].p_hdr; 1811 eh = path[depth].p_hdr; 1819 1812 1820 while (ex < EXT_LAST_EXTENT(eh)) { 1813 while (ex < EXT_LAST_EXTENT(eh)) { 1821 if (!ext4_can_extents_be_merg 1814 if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 1822 break; 1815 break; 1823 /* merge with next extent! */ 1816 /* merge with next extent! */ 1824 unwritten = ext4_ext_is_unwri 1817 unwritten = ext4_ext_is_unwritten(ex); 1825 ex->ee_len = cpu_to_le16(ext4 1818 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1826 + ext4_ext_ge 1819 + ext4_ext_get_actual_len(ex + 1)); 1827 if (unwritten) 1820 if (unwritten) 1828 ext4_ext_mark_unwritt 1821 ext4_ext_mark_unwritten(ex); 1829 1822 1830 if (ex + 1 < EXT_LAST_EXTENT( 1823 if (ex + 1 < EXT_LAST_EXTENT(eh)) { 1831 len = (EXT_LAST_EXTEN 1824 len = (EXT_LAST_EXTENT(eh) - ex - 1) 1832 * sizeof(stru 1825 * sizeof(struct ext4_extent); 1833 memmove(ex + 1, ex + 1826 memmove(ex + 1, ex + 2, len); 1834 } 1827 } 1835 le16_add_cpu(&eh->eh_entries, 1828 le16_add_cpu(&eh->eh_entries, -1); 1836 merge_done = 1; 1829 merge_done = 1; 1837 WARN_ON(eh->eh_entries == 0); 1830 WARN_ON(eh->eh_entries == 0); 1838 if (!eh->eh_entries) 1831 if (!eh->eh_entries) 1839 EXT4_ERROR_INODE(inod 1832 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 1840 } 1833 } 1841 1834 1842 return merge_done; 1835 return merge_done; 1843 } 1836 } 1844 1837 1845 /* 1838 /* 1846 * This function does a very simple check to 1839 * This function does a very simple check to see if we can collapse 1847 * an extent tree with a single extent tree l 1840 * an extent tree with a single extent tree leaf block into the inode. 1848 */ 1841 */ 1849 static void ext4_ext_try_to_merge_up(handle_t 1842 static void ext4_ext_try_to_merge_up(handle_t *handle, 1850 struct i 1843 struct inode *inode, 1851 struct e 1844 struct ext4_ext_path *path) 1852 { 1845 { 1853 size_t s; 1846 size_t s; 1854 unsigned max_root = ext4_ext_space_ro 1847 unsigned max_root = ext4_ext_space_root(inode, 0); 1855 ext4_fsblk_t blk; 1848 ext4_fsblk_t blk; 1856 1849 1857 if ((path[0].p_depth != 1) || 1850 if ((path[0].p_depth != 1) || 1858 (le16_to_cpu(path[0].p_hdr->eh_en 1851 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || 1859 (le16_to_cpu(path[1].p_hdr->eh_en 1852 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) 1860 return; 1853 return; 1861 1854 1862 /* 1855 /* 1863 * We need to modify the block alloca 1856 * We need to modify the block allocation bitmap and the block 1864 * group descriptor to release the ex 1857 * group descriptor to release the extent tree block. If we 1865 * can't get the journal credits, giv 1858 * can't get the journal credits, give up. 1866 */ 1859 */ 1867 if (ext4_journal_extend(handle, 2, 1860 if (ext4_journal_extend(handle, 2, 1868 ext4_free_metadata_re 1861 ext4_free_metadata_revoke_credits(inode->i_sb, 1))) 1869 return; 1862 return; 1870 1863 1871 /* 1864 /* 1872 * Copy the extent data up to the ino 1865 * Copy the extent data up to the inode 1873 */ 1866 */ 1874 blk = ext4_idx_pblock(path[0].p_idx); 1867 blk = ext4_idx_pblock(path[0].p_idx); 1875 s = le16_to_cpu(path[1].p_hdr->eh_ent 1868 s = le16_to_cpu(path[1].p_hdr->eh_entries) * 1876 sizeof(struct ext4_extent_idx 1869 sizeof(struct ext4_extent_idx); 1877 s += sizeof(struct ext4_extent_header 1870 s += sizeof(struct ext4_extent_header); 1878 1871 1879 path[1].p_maxdepth = path[0].p_maxdep 1872 path[1].p_maxdepth = path[0].p_maxdepth; 1880 memcpy(path[0].p_hdr, path[1].p_hdr, 1873 memcpy(path[0].p_hdr, path[1].p_hdr, s); 1881 path[0].p_depth = 0; 1874 path[0].p_depth = 0; 1882 path[0].p_ext = EXT_FIRST_EXTENT(path 1875 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + 1883 (path[1].p_ext - EXT_FIRST_EX 1876 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); 1884 path[0].p_hdr->eh_max = cpu_to_le16(m 1877 path[0].p_hdr->eh_max = cpu_to_le16(max_root); 1885 1878 1886 ext4_ext_path_brelse(path + 1); !! 1879 brelse(path[1].p_bh); 1887 ext4_free_blocks(handle, inode, NULL, 1880 ext4_free_blocks(handle, inode, NULL, blk, 1, 1888 EXT4_FREE_BLOCKS_MET 1881 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 1889 } 1882 } 1890 1883 1891 /* 1884 /* 1892 * This function tries to merge the @ex exten 1885 * This function tries to merge the @ex extent to neighbours in the tree, then 1893 * tries to collapse the extent tree into the 1886 * tries to collapse the extent tree into the inode. 1894 */ 1887 */ 1895 static void ext4_ext_try_to_merge(handle_t *h 1888 static void ext4_ext_try_to_merge(handle_t *handle, 1896 struct inod 1889 struct inode *inode, 1897 struct ext4 1890 struct ext4_ext_path *path, 1898 struct ext4 1891 struct ext4_extent *ex) 1899 { 1892 { 1900 struct ext4_extent_header *eh; 1893 struct ext4_extent_header *eh; 1901 unsigned int depth; 1894 unsigned int depth; 1902 int merge_done = 0; 1895 int merge_done = 0; 1903 1896 1904 depth = ext_depth(inode); 1897 depth = ext_depth(inode); 1905 BUG_ON(path[depth].p_hdr == NULL); 1898 BUG_ON(path[depth].p_hdr == NULL); 1906 eh = path[depth].p_hdr; 1899 eh = path[depth].p_hdr; 1907 1900 1908 if (ex > EXT_FIRST_EXTENT(eh)) 1901 if (ex > EXT_FIRST_EXTENT(eh)) 1909 merge_done = ext4_ext_try_to_ 1902 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1910 1903 1911 if (!merge_done) 1904 if (!merge_done) 1912 (void) ext4_ext_try_to_merge_ 1905 (void) ext4_ext_try_to_merge_right(inode, path, ex); 1913 1906 1914 ext4_ext_try_to_merge_up(handle, inod 1907 ext4_ext_try_to_merge_up(handle, inode, path); 1915 } 1908 } 1916 1909 1917 /* 1910 /* 1918 * check if a portion of the "newext" extent 1911 * check if a portion of the "newext" extent overlaps with an 1919 * existing extent. 1912 * existing extent. 1920 * 1913 * 1921 * If there is an overlap discovered, it upda 1914 * If there is an overlap discovered, it updates the length of the newext 1922 * such that there will be no overlap, and th 1915 * such that there will be no overlap, and then returns 1. 1923 * If there is no overlap found, it returns 0 1916 * If there is no overlap found, it returns 0. 1924 */ 1917 */ 1925 static unsigned int ext4_ext_check_overlap(st 1918 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, 1926 st 1919 struct inode *inode, 1927 st 1920 struct ext4_extent *newext, 1928 st 1921 struct ext4_ext_path *path) 1929 { 1922 { 1930 ext4_lblk_t b1, b2; 1923 ext4_lblk_t b1, b2; 1931 unsigned int depth, len1; 1924 unsigned int depth, len1; 1932 unsigned int ret = 0; 1925 unsigned int ret = 0; 1933 1926 1934 b1 = le32_to_cpu(newext->ee_block); 1927 b1 = le32_to_cpu(newext->ee_block); 1935 len1 = ext4_ext_get_actual_len(newext 1928 len1 = ext4_ext_get_actual_len(newext); 1936 depth = ext_depth(inode); 1929 depth = ext_depth(inode); 1937 if (!path[depth].p_ext) 1930 if (!path[depth].p_ext) 1938 goto out; 1931 goto out; 1939 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu 1932 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); 1940 1933 1941 /* 1934 /* 1942 * get the next allocated block if th 1935 * get the next allocated block if the extent in the path 1943 * is before the requested block(s) 1936 * is before the requested block(s) 1944 */ 1937 */ 1945 if (b2 < b1) { 1938 if (b2 < b1) { 1946 b2 = ext4_ext_next_allocated_ 1939 b2 = ext4_ext_next_allocated_block(path); 1947 if (b2 == EXT_MAX_BLOCKS) 1940 if (b2 == EXT_MAX_BLOCKS) 1948 goto out; 1941 goto out; 1949 b2 = EXT4_LBLK_CMASK(sbi, b2) 1942 b2 = EXT4_LBLK_CMASK(sbi, b2); 1950 } 1943 } 1951 1944 1952 /* check for wrap through zero on ext 1945 /* check for wrap through zero on extent logical start block*/ 1953 if (b1 + len1 < b1) { 1946 if (b1 + len1 < b1) { 1954 len1 = EXT_MAX_BLOCKS - b1; 1947 len1 = EXT_MAX_BLOCKS - b1; 1955 newext->ee_len = cpu_to_le16( 1948 newext->ee_len = cpu_to_le16(len1); 1956 ret = 1; 1949 ret = 1; 1957 } 1950 } 1958 1951 1959 /* check for overlap */ 1952 /* check for overlap */ 1960 if (b1 + len1 > b2) { 1953 if (b1 + len1 > b2) { 1961 newext->ee_len = cpu_to_le16( 1954 newext->ee_len = cpu_to_le16(b2 - b1); 1962 ret = 1; 1955 ret = 1; 1963 } 1956 } 1964 out: 1957 out: 1965 return ret; 1958 return ret; 1966 } 1959 } 1967 1960 1968 /* 1961 /* 1969 * ext4_ext_insert_extent: 1962 * ext4_ext_insert_extent: 1970 * tries to merge requested extent into the e 1963 * tries to merge requested extent into the existing extent or 1971 * inserts requested extent as new one into t 1964 * inserts requested extent as new one into the tree, 1972 * creating new leaf in the no-space case. 1965 * creating new leaf in the no-space case. 1973 */ 1966 */ 1974 struct ext4_ext_path * !! 1967 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1975 ext4_ext_insert_extent(handle_t *handle, stru !! 1968 struct ext4_ext_path **ppath, 1976 struct ext4_ext_path * !! 1969 struct ext4_extent *newext, int gb_flags) 1977 struct ext4_extent *ne << 1978 { 1970 { >> 1971 struct ext4_ext_path *path = *ppath; 1979 struct ext4_extent_header *eh; 1972 struct ext4_extent_header *eh; 1980 struct ext4_extent *ex, *fex; 1973 struct ext4_extent *ex, *fex; 1981 struct ext4_extent *nearex; /* neares 1974 struct ext4_extent *nearex; /* nearest extent */ 1982 int depth, len, err = 0; !! 1975 struct ext4_ext_path *npath = NULL; >> 1976 int depth, len, err; 1983 ext4_lblk_t next; 1977 ext4_lblk_t next; 1984 int mb_flags = 0, unwritten; 1978 int mb_flags = 0, unwritten; 1985 1979 1986 if (gb_flags & EXT4_GET_BLOCKS_DELALL 1980 if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1987 mb_flags |= EXT4_MB_DELALLOC_ 1981 mb_flags |= EXT4_MB_DELALLOC_RESERVED; 1988 if (unlikely(ext4_ext_get_actual_len( 1982 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1989 EXT4_ERROR_INODE(inode, "ext4 1983 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 1990 err = -EFSCORRUPTED; !! 1984 return -EFSCORRUPTED; 1991 goto errout; << 1992 } 1985 } 1993 depth = ext_depth(inode); 1986 depth = ext_depth(inode); 1994 ex = path[depth].p_ext; 1987 ex = path[depth].p_ext; 1995 eh = path[depth].p_hdr; 1988 eh = path[depth].p_hdr; 1996 if (unlikely(path[depth].p_hdr == NUL 1989 if (unlikely(path[depth].p_hdr == NULL)) { 1997 EXT4_ERROR_INODE(inode, "path 1990 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 1998 err = -EFSCORRUPTED; !! 1991 return -EFSCORRUPTED; 1999 goto errout; << 2000 } 1992 } 2001 1993 2002 /* try to insert block into found ext 1994 /* try to insert block into found extent and return */ 2003 if (ex && !(gb_flags & EXT4_GET_BLOCK 1995 if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) { 2004 1996 2005 /* 1997 /* 2006 * Try to see whether we shou 1998 * Try to see whether we should rather test the extent on 2007 * right from ex, or from the 1999 * right from ex, or from the left of ex. This is because 2008 * ext4_find_extent() can ret 2000 * ext4_find_extent() can return either extent on the 2009 * left, or on the right from 2001 * left, or on the right from the searched position. This 2010 * will make merging more eff 2002 * will make merging more effective. 2011 */ 2003 */ 2012 if (ex < EXT_LAST_EXTENT(eh) 2004 if (ex < EXT_LAST_EXTENT(eh) && 2013 (le32_to_cpu(ex->ee_block 2005 (le32_to_cpu(ex->ee_block) + 2014 ext4_ext_get_actual_len(e 2006 ext4_ext_get_actual_len(ex) < 2015 le32_to_cpu(newext->ee_bl 2007 le32_to_cpu(newext->ee_block))) { 2016 ex += 1; 2008 ex += 1; 2017 goto prepend; 2009 goto prepend; 2018 } else if ((ex > EXT_FIRST_EX 2010 } else if ((ex > EXT_FIRST_EXTENT(eh)) && 2019 (le32_to_cpu(newex 2011 (le32_to_cpu(newext->ee_block) + 2020 ext4_ext_get_actua 2012 ext4_ext_get_actual_len(newext) < 2021 le32_to_cpu(ex->ee 2013 le32_to_cpu(ex->ee_block))) 2022 ex -= 1; 2014 ex -= 1; 2023 2015 2024 /* Try to append newex to the 2016 /* Try to append newex to the ex */ 2025 if (ext4_can_extents_be_merge 2017 if (ext4_can_extents_be_merged(inode, ex, newext)) { 2026 ext_debug(inode, "app 2018 ext_debug(inode, "append [%d]%d block to %u:[%d]%d" 2027 "(from %llu 2019 "(from %llu)\n", 2028 ext4_ext_is 2020 ext4_ext_is_unwritten(newext), 2029 ext4_ext_ge 2021 ext4_ext_get_actual_len(newext), 2030 le32_to_cpu 2022 le32_to_cpu(ex->ee_block), 2031 ext4_ext_is 2023 ext4_ext_is_unwritten(ex), 2032 ext4_ext_ge 2024 ext4_ext_get_actual_len(ex), 2033 ext4_ext_pb 2025 ext4_ext_pblock(ex)); 2034 err = ext4_ext_get_ac 2026 err = ext4_ext_get_access(handle, inode, 2035 2027 path + depth); 2036 if (err) 2028 if (err) 2037 goto errout; !! 2029 return err; 2038 unwritten = ext4_ext_ 2030 unwritten = ext4_ext_is_unwritten(ex); 2039 ex->ee_len = cpu_to_l 2031 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 2040 + ext 2032 + ext4_ext_get_actual_len(newext)); 2041 if (unwritten) 2033 if (unwritten) 2042 ext4_ext_mark 2034 ext4_ext_mark_unwritten(ex); 2043 nearex = ex; 2035 nearex = ex; 2044 goto merge; 2036 goto merge; 2045 } 2037 } 2046 2038 2047 prepend: 2039 prepend: 2048 /* Try to prepend newex to th 2040 /* Try to prepend newex to the ex */ 2049 if (ext4_can_extents_be_merge 2041 if (ext4_can_extents_be_merged(inode, newext, ex)) { 2050 ext_debug(inode, "pre 2042 ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d" 2051 "(from %llu 2043 "(from %llu)\n", 2052 le32_to_cpu 2044 le32_to_cpu(newext->ee_block), 2053 ext4_ext_is 2045 ext4_ext_is_unwritten(newext), 2054 ext4_ext_ge 2046 ext4_ext_get_actual_len(newext), 2055 le32_to_cpu 2047 le32_to_cpu(ex->ee_block), 2056 ext4_ext_is 2048 ext4_ext_is_unwritten(ex), 2057 ext4_ext_ge 2049 ext4_ext_get_actual_len(ex), 2058 ext4_ext_pb 2050 ext4_ext_pblock(ex)); 2059 err = ext4_ext_get_ac 2051 err = ext4_ext_get_access(handle, inode, 2060 2052 path + depth); 2061 if (err) 2053 if (err) 2062 goto errout; !! 2054 return err; 2063 2055 2064 unwritten = ext4_ext_ 2056 unwritten = ext4_ext_is_unwritten(ex); 2065 ex->ee_block = newext 2057 ex->ee_block = newext->ee_block; 2066 ext4_ext_store_pblock 2058 ext4_ext_store_pblock(ex, ext4_ext_pblock(newext)); 2067 ex->ee_len = cpu_to_l 2059 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 2068 + ext 2060 + ext4_ext_get_actual_len(newext)); 2069 if (unwritten) 2061 if (unwritten) 2070 ext4_ext_mark 2062 ext4_ext_mark_unwritten(ex); 2071 nearex = ex; 2063 nearex = ex; 2072 goto merge; 2064 goto merge; 2073 } 2065 } 2074 } 2066 } 2075 2067 2076 depth = ext_depth(inode); 2068 depth = ext_depth(inode); 2077 eh = path[depth].p_hdr; 2069 eh = path[depth].p_hdr; 2078 if (le16_to_cpu(eh->eh_entries) < le1 2070 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 2079 goto has_space; 2071 goto has_space; 2080 2072 2081 /* probably next leaf has space for u 2073 /* probably next leaf has space for us? */ 2082 fex = EXT_LAST_EXTENT(eh); 2074 fex = EXT_LAST_EXTENT(eh); 2083 next = EXT_MAX_BLOCKS; 2075 next = EXT_MAX_BLOCKS; 2084 if (le32_to_cpu(newext->ee_block) > l 2076 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) 2085 next = ext4_ext_next_leaf_blo 2077 next = ext4_ext_next_leaf_block(path); 2086 if (next != EXT_MAX_BLOCKS) { 2078 if (next != EXT_MAX_BLOCKS) { 2087 struct ext4_ext_path *npath; << 2088 << 2089 ext_debug(inode, "next leaf b 2079 ext_debug(inode, "next leaf block - %u\n", next); >> 2080 BUG_ON(npath != NULL); 2090 npath = ext4_find_extent(inod 2081 npath = ext4_find_extent(inode, next, NULL, gb_flags); 2091 if (IS_ERR(npath)) { !! 2082 if (IS_ERR(npath)) 2092 err = PTR_ERR(npath); !! 2083 return PTR_ERR(npath); 2093 goto errout; << 2094 } << 2095 BUG_ON(npath->p_depth != path 2084 BUG_ON(npath->p_depth != path->p_depth); 2096 eh = npath[depth].p_hdr; 2085 eh = npath[depth].p_hdr; 2097 if (le16_to_cpu(eh->eh_entrie 2086 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 2098 ext_debug(inode, "nex 2087 ext_debug(inode, "next leaf isn't full(%d)\n", 2099 le16_to_cpu 2088 le16_to_cpu(eh->eh_entries)); 2100 ext4_free_ext_path(pa << 2101 path = npath; 2089 path = npath; 2102 goto has_space; 2090 goto has_space; 2103 } 2091 } 2104 ext_debug(inode, "next leaf h 2092 ext_debug(inode, "next leaf has no free space(%d,%d)\n", 2105 le16_to_cpu(eh->eh_ 2093 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 2106 ext4_free_ext_path(npath); << 2107 } 2094 } 2108 2095 2109 /* 2096 /* 2110 * There is no free space in the foun 2097 * There is no free space in the found leaf. 2111 * We're gonna add a new leaf in the 2098 * We're gonna add a new leaf in the tree. 2112 */ 2099 */ 2113 if (gb_flags & EXT4_GET_BLOCKS_METADA 2100 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 2114 mb_flags |= EXT4_MB_USE_RESER 2101 mb_flags |= EXT4_MB_USE_RESERVED; 2115 path = ext4_ext_create_new_leaf(handl !! 2102 err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, 2116 path, !! 2103 ppath, newext); 2117 if (IS_ERR(path)) !! 2104 if (err) 2118 return path; !! 2105 goto cleanup; 2119 depth = ext_depth(inode); 2106 depth = ext_depth(inode); 2120 eh = path[depth].p_hdr; 2107 eh = path[depth].p_hdr; 2121 2108 2122 has_space: 2109 has_space: 2123 nearex = path[depth].p_ext; 2110 nearex = path[depth].p_ext; 2124 2111 2125 err = ext4_ext_get_access(handle, ino 2112 err = ext4_ext_get_access(handle, inode, path + depth); 2126 if (err) 2113 if (err) 2127 goto errout; !! 2114 goto cleanup; 2128 2115 2129 if (!nearex) { 2116 if (!nearex) { 2130 /* there is no extent in this 2117 /* there is no extent in this leaf, create first one */ 2131 ext_debug(inode, "first exten 2118 ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n", 2132 le32_to_cpu(n 2119 le32_to_cpu(newext->ee_block), 2133 ext4_ext_pblo 2120 ext4_ext_pblock(newext), 2134 ext4_ext_is_u 2121 ext4_ext_is_unwritten(newext), 2135 ext4_ext_get_ 2122 ext4_ext_get_actual_len(newext)); 2136 nearex = EXT_FIRST_EXTENT(eh) 2123 nearex = EXT_FIRST_EXTENT(eh); 2137 } else { 2124 } else { 2138 if (le32_to_cpu(newext->ee_bl 2125 if (le32_to_cpu(newext->ee_block) 2139 > le32_to_cpu(near 2126 > le32_to_cpu(nearex->ee_block)) { 2140 /* Insert after */ 2127 /* Insert after */ 2141 ext_debug(inode, "ins 2128 ext_debug(inode, "insert %u:%llu:[%d]%d before: " 2142 "near 2129 "nearest %p\n", 2143 le32_ 2130 le32_to_cpu(newext->ee_block), 2144 ext4_ 2131 ext4_ext_pblock(newext), 2145 ext4_ 2132 ext4_ext_is_unwritten(newext), 2146 ext4_ 2133 ext4_ext_get_actual_len(newext), 2147 neare 2134 nearex); 2148 nearex++; 2135 nearex++; 2149 } else { 2136 } else { 2150 /* Insert before */ 2137 /* Insert before */ 2151 BUG_ON(newext->ee_blo 2138 BUG_ON(newext->ee_block == nearex->ee_block); 2152 ext_debug(inode, "ins 2139 ext_debug(inode, "insert %u:%llu:[%d]%d after: " 2153 "near 2140 "nearest %p\n", 2154 le32_ 2141 le32_to_cpu(newext->ee_block), 2155 ext4_ 2142 ext4_ext_pblock(newext), 2156 ext4_ 2143 ext4_ext_is_unwritten(newext), 2157 ext4_ 2144 ext4_ext_get_actual_len(newext), 2158 neare 2145 nearex); 2159 } 2146 } 2160 len = EXT_LAST_EXTENT(eh) - n 2147 len = EXT_LAST_EXTENT(eh) - nearex + 1; 2161 if (len > 0) { 2148 if (len > 0) { 2162 ext_debug(inode, "ins 2149 ext_debug(inode, "insert %u:%llu:[%d]%d: " 2163 "move 2150 "move %d extents from 0x%p to 0x%p\n", 2164 le32_ 2151 le32_to_cpu(newext->ee_block), 2165 ext4_ 2152 ext4_ext_pblock(newext), 2166 ext4_ 2153 ext4_ext_is_unwritten(newext), 2167 ext4_ 2154 ext4_ext_get_actual_len(newext), 2168 len, 2155 len, nearex, nearex + 1); 2169 memmove(nearex + 1, n 2156 memmove(nearex + 1, nearex, 2170 len * sizeof( 2157 len * sizeof(struct ext4_extent)); 2171 } 2158 } 2172 } 2159 } 2173 2160 2174 le16_add_cpu(&eh->eh_entries, 1); 2161 le16_add_cpu(&eh->eh_entries, 1); 2175 path[depth].p_ext = nearex; 2162 path[depth].p_ext = nearex; 2176 nearex->ee_block = newext->ee_block; 2163 nearex->ee_block = newext->ee_block; 2177 ext4_ext_store_pblock(nearex, ext4_ex 2164 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 2178 nearex->ee_len = newext->ee_len; 2165 nearex->ee_len = newext->ee_len; 2179 2166 2180 merge: 2167 merge: 2181 /* try to merge extents */ 2168 /* try to merge extents */ 2182 if (!(gb_flags & EXT4_GET_BLOCKS_PRE_ 2169 if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) 2183 ext4_ext_try_to_merge(handle, 2170 ext4_ext_try_to_merge(handle, inode, path, nearex); 2184 2171 >> 2172 2185 /* time to correct all indexes above 2173 /* time to correct all indexes above */ 2186 err = ext4_ext_correct_indexes(handle 2174 err = ext4_ext_correct_indexes(handle, inode, path); 2187 if (err) 2175 if (err) 2188 goto errout; !! 2176 goto cleanup; 2189 2177 2190 err = ext4_ext_dirty(handle, inode, p 2178 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 2191 if (err) << 2192 goto errout; << 2193 2179 2194 return path; !! 2180 cleanup: 2195 !! 2181 ext4_free_ext_path(npath); 2196 errout: !! 2182 return err; 2197 ext4_free_ext_path(path); << 2198 return ERR_PTR(err); << 2199 } 2183 } 2200 2184 2201 static int ext4_fill_es_cache_info(struct ino 2185 static int ext4_fill_es_cache_info(struct inode *inode, 2202 ext4_lblk_ 2186 ext4_lblk_t block, ext4_lblk_t num, 2203 struct fie 2187 struct fiemap_extent_info *fieinfo) 2204 { 2188 { 2205 ext4_lblk_t next, end = block + num - 2189 ext4_lblk_t next, end = block + num - 1; 2206 struct extent_status es; 2190 struct extent_status es; 2207 unsigned char blksize_bits = inode->i 2191 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; 2208 unsigned int flags; 2192 unsigned int flags; 2209 int err; 2193 int err; 2210 2194 2211 while (block <= end) { 2195 while (block <= end) { 2212 next = 0; 2196 next = 0; 2213 flags = 0; 2197 flags = 0; 2214 if (!ext4_es_lookup_extent(in 2198 if (!ext4_es_lookup_extent(inode, block, &next, &es)) 2215 break; 2199 break; 2216 if (ext4_es_is_unwritten(&es) 2200 if (ext4_es_is_unwritten(&es)) 2217 flags |= FIEMAP_EXTEN 2201 flags |= FIEMAP_EXTENT_UNWRITTEN; 2218 if (ext4_es_is_delayed(&es)) 2202 if (ext4_es_is_delayed(&es)) 2219 flags |= (FIEMAP_EXTE 2203 flags |= (FIEMAP_EXTENT_DELALLOC | 2220 FIEMAP_EXTE 2204 FIEMAP_EXTENT_UNKNOWN); 2221 if (ext4_es_is_hole(&es)) 2205 if (ext4_es_is_hole(&es)) 2222 flags |= EXT4_FIEMAP_ 2206 flags |= EXT4_FIEMAP_EXTENT_HOLE; 2223 if (next == 0) 2207 if (next == 0) 2224 flags |= FIEMAP_EXTEN 2208 flags |= FIEMAP_EXTENT_LAST; 2225 if (flags & (FIEMAP_EXTENT_DE 2209 if (flags & (FIEMAP_EXTENT_DELALLOC| 2226 EXT4_FIEMAP_EXTE 2210 EXT4_FIEMAP_EXTENT_HOLE)) 2227 es.es_pblk = 0; 2211 es.es_pblk = 0; 2228 else 2212 else 2229 es.es_pblk = ext4_es_ 2213 es.es_pblk = ext4_es_pblock(&es); 2230 err = fiemap_fill_next_extent 2214 err = fiemap_fill_next_extent(fieinfo, 2231 (__u64)es.es_ 2215 (__u64)es.es_lblk << blksize_bits, 2232 (__u64)es.es_ 2216 (__u64)es.es_pblk << blksize_bits, 2233 (__u64)es.es_ 2217 (__u64)es.es_len << blksize_bits, 2234 flags); 2218 flags); 2235 if (next == 0) 2219 if (next == 0) 2236 break; 2220 break; 2237 block = next; 2221 block = next; 2238 if (err < 0) 2222 if (err < 0) 2239 return err; 2223 return err; 2240 if (err == 1) 2224 if (err == 1) 2241 return 0; 2225 return 0; 2242 } 2226 } 2243 return 0; 2227 return 0; 2244 } 2228 } 2245 2229 2246 2230 2247 /* 2231 /* 2248 * ext4_ext_find_hole - find hole around give !! 2232 * ext4_ext_determine_hole - determine hole around given block 2249 * @inode: inode we lookup in 2233 * @inode: inode we lookup in 2250 * @path: path in extent tree to @lblk 2234 * @path: path in extent tree to @lblk 2251 * @lblk: pointer to logical block arou 2235 * @lblk: pointer to logical block around which we want to determine hole 2252 * 2236 * 2253 * Determine hole length (and start if easily 2237 * Determine hole length (and start if easily possible) around given logical 2254 * block. We don't try too hard to find the b 2238 * block. We don't try too hard to find the beginning of the hole but @path 2255 * actually points to extent before @lblk, we 2239 * actually points to extent before @lblk, we provide it. 2256 * 2240 * 2257 * The function returns the length of a hole 2241 * The function returns the length of a hole starting at @lblk. We update @lblk 2258 * to the beginning of the hole if we managed 2242 * to the beginning of the hole if we managed to find it. 2259 */ 2243 */ 2260 static ext4_lblk_t ext4_ext_find_hole(struct !! 2244 static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode, 2261 struct !! 2245 struct ext4_ext_path *path, 2262 ext4_lb !! 2246 ext4_lblk_t *lblk) 2263 { 2247 { 2264 int depth = ext_depth(inode); 2248 int depth = ext_depth(inode); 2265 struct ext4_extent *ex; 2249 struct ext4_extent *ex; 2266 ext4_lblk_t len; 2250 ext4_lblk_t len; 2267 2251 2268 ex = path[depth].p_ext; 2252 ex = path[depth].p_ext; 2269 if (ex == NULL) { 2253 if (ex == NULL) { 2270 /* there is no extent yet, so 2254 /* there is no extent yet, so gap is [0;-] */ 2271 *lblk = 0; 2255 *lblk = 0; 2272 len = EXT_MAX_BLOCKS; 2256 len = EXT_MAX_BLOCKS; 2273 } else if (*lblk < le32_to_cpu(ex->ee 2257 } else if (*lblk < le32_to_cpu(ex->ee_block)) { 2274 len = le32_to_cpu(ex->ee_bloc 2258 len = le32_to_cpu(ex->ee_block) - *lblk; 2275 } else if (*lblk >= le32_to_cpu(ex->e 2259 } else if (*lblk >= le32_to_cpu(ex->ee_block) 2276 + ext4_ext_get_actual 2260 + ext4_ext_get_actual_len(ex)) { 2277 ext4_lblk_t next; 2261 ext4_lblk_t next; 2278 2262 2279 *lblk = le32_to_cpu(ex->ee_bl 2263 *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 2280 next = ext4_ext_next_allocate 2264 next = ext4_ext_next_allocated_block(path); 2281 BUG_ON(next == *lblk); 2265 BUG_ON(next == *lblk); 2282 len = next - *lblk; 2266 len = next - *lblk; 2283 } else { 2267 } else { 2284 BUG(); 2268 BUG(); 2285 } 2269 } 2286 return len; 2270 return len; 2287 } 2271 } 2288 2272 2289 /* 2273 /* >> 2274 * ext4_ext_put_gap_in_cache: >> 2275 * calculate boundaries of the gap that the requested block fits into >> 2276 * and cache this gap >> 2277 */ >> 2278 static void >> 2279 ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start, >> 2280 ext4_lblk_t hole_len) >> 2281 { >> 2282 struct extent_status es; >> 2283 >> 2284 ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start, >> 2285 hole_start + hole_len - 1, &es); >> 2286 if (es.es_len) { >> 2287 /* There's delayed extent containing lblock? */ >> 2288 if (es.es_lblk <= hole_start) >> 2289 return; >> 2290 hole_len = min(es.es_lblk - hole_start, hole_len); >> 2291 } >> 2292 ext_debug(inode, " -> %u:%u\n", hole_start, hole_len); >> 2293 ext4_es_insert_extent(inode, hole_start, hole_len, ~0, >> 2294 EXTENT_STATUS_HOLE); >> 2295 } >> 2296 >> 2297 /* 2290 * ext4_ext_rm_idx: 2298 * ext4_ext_rm_idx: 2291 * removes index from the index block. 2299 * removes index from the index block. 2292 */ 2300 */ 2293 static int ext4_ext_rm_idx(handle_t *handle, 2301 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2294 struct ext4_ext_path 2302 struct ext4_ext_path *path, int depth) 2295 { 2303 { 2296 int err; 2304 int err; 2297 ext4_fsblk_t leaf; 2305 ext4_fsblk_t leaf; 2298 int k = depth - 1; << 2299 2306 2300 /* free index block */ 2307 /* free index block */ 2301 leaf = ext4_idx_pblock(path[k].p_idx) !! 2308 depth--; 2302 if (unlikely(path[k].p_hdr->eh_entrie !! 2309 path = path + depth; 2303 EXT4_ERROR_INODE(inode, "path !! 2310 leaf = ext4_idx_pblock(path->p_idx); >> 2311 if (unlikely(path->p_hdr->eh_entries == 0)) { >> 2312 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); 2304 return -EFSCORRUPTED; 2313 return -EFSCORRUPTED; 2305 } 2314 } 2306 err = ext4_ext_get_access(handle, ino !! 2315 err = ext4_ext_get_access(handle, inode, path); 2307 if (err) 2316 if (err) 2308 return err; 2317 return err; 2309 2318 2310 if (path[k].p_idx != EXT_LAST_INDEX(p !! 2319 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { 2311 int len = EXT_LAST_INDEX(path !! 2320 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; 2312 len *= sizeof(struct ext4_ext 2321 len *= sizeof(struct ext4_extent_idx); 2313 memmove(path[k].p_idx, path[k !! 2322 memmove(path->p_idx, path->p_idx + 1, len); 2314 } 2323 } 2315 2324 2316 le16_add_cpu(&path[k].p_hdr->eh_entri !! 2325 le16_add_cpu(&path->p_hdr->eh_entries, -1); 2317 err = ext4_ext_dirty(handle, inode, p !! 2326 err = ext4_ext_dirty(handle, inode, path); 2318 if (err) 2327 if (err) 2319 return err; 2328 return err; 2320 ext_debug(inode, "index is empty, rem 2329 ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf); 2321 trace_ext4_ext_rm_idx(inode, leaf); 2330 trace_ext4_ext_rm_idx(inode, leaf); 2322 2331 2323 ext4_free_blocks(handle, inode, NULL, 2332 ext4_free_blocks(handle, inode, NULL, leaf, 1, 2324 EXT4_FREE_BLOCKS_MET 2333 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2325 2334 2326 while (--k >= 0) { !! 2335 while (--depth >= 0) { 2327 if (path[k + 1].p_idx != EXT_ !! 2336 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) 2328 break; 2337 break; 2329 err = ext4_ext_get_access(han !! 2338 path--; >> 2339 err = ext4_ext_get_access(handle, inode, path); 2330 if (err) 2340 if (err) 2331 goto clean; !! 2341 break; 2332 path[k].p_idx->ei_block = pat !! 2342 path->p_idx->ei_block = (path+1)->p_idx->ei_block; 2333 err = ext4_ext_dirty(handle, !! 2343 err = ext4_ext_dirty(handle, inode, path); 2334 if (err) 2344 if (err) 2335 goto clean; !! 2345 break; 2336 } 2346 } 2337 return 0; << 2338 << 2339 clean: << 2340 /* << 2341 * The path[k].p_bh is either unmodif << 2342 * set (see ext4_ext_get_access()). S << 2343 * of the successfully modified exten << 2344 * these extents to be checked to avo << 2345 */ << 2346 while (++k < depth) << 2347 clear_buffer_verified(path[k] << 2348 << 2349 return err; 2347 return err; 2350 } 2348 } 2351 2349 2352 /* 2350 /* 2353 * ext4_ext_calc_credits_for_single_extent: 2351 * ext4_ext_calc_credits_for_single_extent: 2354 * This routine returns max. credits that nee 2352 * This routine returns max. credits that needed to insert an extent 2355 * to the extent tree. 2353 * to the extent tree. 2356 * When pass the actual path, the caller shou 2354 * When pass the actual path, the caller should calculate credits 2357 * under i_data_sem. 2355 * under i_data_sem. 2358 */ 2356 */ 2359 int ext4_ext_calc_credits_for_single_extent(s 2357 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2360 2358 struct ext4_ext_path *path) 2361 { 2359 { 2362 if (path) { 2360 if (path) { 2363 int depth = ext_depth(inode); 2361 int depth = ext_depth(inode); 2364 int ret = 0; 2362 int ret = 0; 2365 2363 2366 /* probably there is space in 2364 /* probably there is space in leaf? */ 2367 if (le16_to_cpu(path[depth].p 2365 if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2368 < le16_to_cpu 2366 < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2369 2367 2370 /* 2368 /* 2371 * There are some sp 2369 * There are some space in the leaf tree, no 2372 * need to account f 2370 * need to account for leaf block credit 2373 * 2371 * 2374 * bitmaps and block 2372 * bitmaps and block group descriptor blocks 2375 * and other metadat 2373 * and other metadata blocks still need to be 2376 * accounted. 2374 * accounted. 2377 */ 2375 */ 2378 /* 1 bitmap, 1 block 2376 /* 1 bitmap, 1 block group descriptor */ 2379 ret = 2 + EXT4_META_T 2377 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 2380 return ret; 2378 return ret; 2381 } 2379 } 2382 } 2380 } 2383 2381 2384 return ext4_chunk_trans_blocks(inode, 2382 return ext4_chunk_trans_blocks(inode, nrblocks); 2385 } 2383 } 2386 2384 2387 /* 2385 /* 2388 * How many index/leaf blocks need to change/ 2386 * How many index/leaf blocks need to change/allocate to add @extents extents? 2389 * 2387 * 2390 * If we add a single extent, then in the wor 2388 * If we add a single extent, then in the worse case, each tree level 2391 * index/leaf need to be changed in case of t 2389 * index/leaf need to be changed in case of the tree split. 2392 * 2390 * 2393 * If more extents are inserted, they could c 2391 * If more extents are inserted, they could cause the whole tree split more 2394 * than once, but this is really rare. 2392 * than once, but this is really rare. 2395 */ 2393 */ 2396 int ext4_ext_index_trans_blocks(struct inode 2394 int ext4_ext_index_trans_blocks(struct inode *inode, int extents) 2397 { 2395 { 2398 int index; 2396 int index; 2399 int depth; 2397 int depth; 2400 2398 2401 /* If we are converting the inline da 2399 /* If we are converting the inline data, only one is needed here. */ 2402 if (ext4_has_inline_data(inode)) 2400 if (ext4_has_inline_data(inode)) 2403 return 1; 2401 return 1; 2404 2402 2405 depth = ext_depth(inode); 2403 depth = ext_depth(inode); 2406 2404 2407 if (extents <= 1) 2405 if (extents <= 1) 2408 index = depth * 2; 2406 index = depth * 2; 2409 else 2407 else 2410 index = depth * 3; 2408 index = depth * 3; 2411 2409 2412 return index; 2410 return index; 2413 } 2411 } 2414 2412 2415 static inline int get_default_free_blocks_fla 2413 static inline int get_default_free_blocks_flags(struct inode *inode) 2416 { 2414 { 2417 if (S_ISDIR(inode->i_mode) || S_ISLNK 2415 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) || 2418 ext4_test_inode_flag(inode, EXT4_ 2416 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE)) 2419 return EXT4_FREE_BLOCKS_METAD 2417 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; 2420 else if (ext4_should_journal_data(ino 2418 else if (ext4_should_journal_data(inode)) 2421 return EXT4_FREE_BLOCKS_FORGE 2419 return EXT4_FREE_BLOCKS_FORGET; 2422 return 0; 2420 return 0; 2423 } 2421 } 2424 2422 2425 /* 2423 /* 2426 * ext4_rereserve_cluster - increment the res 2424 * ext4_rereserve_cluster - increment the reserved cluster count when 2427 * freeing a cluster 2425 * freeing a cluster with a pending reservation 2428 * 2426 * 2429 * @inode - file containing the cluster 2427 * @inode - file containing the cluster 2430 * @lblk - logical block in cluster to be res 2428 * @lblk - logical block in cluster to be reserved 2431 * 2429 * 2432 * Increments the reserved cluster count and 2430 * Increments the reserved cluster count and adjusts quota in a bigalloc 2433 * file system when freeing a partial cluster 2431 * file system when freeing a partial cluster containing at least one 2434 * delayed and unwritten block. A partial cl 2432 * delayed and unwritten block. A partial cluster meeting that 2435 * requirement will have a pending reservatio 2433 * requirement will have a pending reservation. If so, the 2436 * RERESERVE_CLUSTER flag is used when callin 2434 * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to 2437 * defer reserved and allocated space account 2435 * defer reserved and allocated space accounting to a subsequent call 2438 * to this function. 2436 * to this function. 2439 */ 2437 */ 2440 static void ext4_rereserve_cluster(struct ino 2438 static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk) 2441 { 2439 { 2442 struct ext4_sb_info *sbi = EXT4_SB(in 2440 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2443 struct ext4_inode_info *ei = EXT4_I(i 2441 struct ext4_inode_info *ei = EXT4_I(inode); 2444 2442 2445 dquot_reclaim_block(inode, EXT4_C2B(s 2443 dquot_reclaim_block(inode, EXT4_C2B(sbi, 1)); 2446 2444 2447 spin_lock(&ei->i_block_reservation_lo 2445 spin_lock(&ei->i_block_reservation_lock); 2448 ei->i_reserved_data_blocks++; 2446 ei->i_reserved_data_blocks++; 2449 percpu_counter_add(&sbi->s_dirtyclust 2447 percpu_counter_add(&sbi->s_dirtyclusters_counter, 1); 2450 spin_unlock(&ei->i_block_reservation_ 2448 spin_unlock(&ei->i_block_reservation_lock); 2451 2449 2452 percpu_counter_add(&sbi->s_freecluste 2450 percpu_counter_add(&sbi->s_freeclusters_counter, 1); 2453 ext4_remove_pending(inode, lblk); 2451 ext4_remove_pending(inode, lblk); 2454 } 2452 } 2455 2453 2456 static int ext4_remove_blocks(handle_t *handl 2454 static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2457 struct ext4_ext 2455 struct ext4_extent *ex, 2458 struct partial_ 2456 struct partial_cluster *partial, 2459 ext4_lblk_t fro 2457 ext4_lblk_t from, ext4_lblk_t to) 2460 { 2458 { 2461 struct ext4_sb_info *sbi = EXT4_SB(in 2459 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2462 unsigned short ee_len = ext4_ext_get_ 2460 unsigned short ee_len = ext4_ext_get_actual_len(ex); 2463 ext4_fsblk_t last_pblk, pblk; 2461 ext4_fsblk_t last_pblk, pblk; 2464 ext4_lblk_t num; 2462 ext4_lblk_t num; 2465 int flags; 2463 int flags; 2466 2464 2467 /* only extent tail removal is allowe 2465 /* only extent tail removal is allowed */ 2468 if (from < le32_to_cpu(ex->ee_block) 2466 if (from < le32_to_cpu(ex->ee_block) || 2469 to != le32_to_cpu(ex->ee_block) + 2467 to != le32_to_cpu(ex->ee_block) + ee_len - 1) { 2470 ext4_error(sbi->s_sb, 2468 ext4_error(sbi->s_sb, 2471 "strange request: 2469 "strange request: removal(2) %u-%u from %u:%u", 2472 from, to, le32_to_ 2470 from, to, le32_to_cpu(ex->ee_block), ee_len); 2473 return 0; 2471 return 0; 2474 } 2472 } 2475 2473 2476 #ifdef EXTENTS_STATS 2474 #ifdef EXTENTS_STATS 2477 spin_lock(&sbi->s_ext_stats_lock); 2475 spin_lock(&sbi->s_ext_stats_lock); 2478 sbi->s_ext_blocks += ee_len; 2476 sbi->s_ext_blocks += ee_len; 2479 sbi->s_ext_extents++; 2477 sbi->s_ext_extents++; 2480 if (ee_len < sbi->s_ext_min) 2478 if (ee_len < sbi->s_ext_min) 2481 sbi->s_ext_min = ee_len; 2479 sbi->s_ext_min = ee_len; 2482 if (ee_len > sbi->s_ext_max) 2480 if (ee_len > sbi->s_ext_max) 2483 sbi->s_ext_max = ee_len; 2481 sbi->s_ext_max = ee_len; 2484 if (ext_depth(inode) > sbi->s_depth_m 2482 if (ext_depth(inode) > sbi->s_depth_max) 2485 sbi->s_depth_max = ext_depth( 2483 sbi->s_depth_max = ext_depth(inode); 2486 spin_unlock(&sbi->s_ext_stats_lock); 2484 spin_unlock(&sbi->s_ext_stats_lock); 2487 #endif 2485 #endif 2488 2486 2489 trace_ext4_remove_blocks(inode, ex, f 2487 trace_ext4_remove_blocks(inode, ex, from, to, partial); 2490 2488 2491 /* 2489 /* 2492 * if we have a partial cluster, and 2490 * if we have a partial cluster, and it's different from the 2493 * cluster of the last block in the e 2491 * cluster of the last block in the extent, we free it 2494 */ 2492 */ 2495 last_pblk = ext4_ext_pblock(ex) + ee_ 2493 last_pblk = ext4_ext_pblock(ex) + ee_len - 1; 2496 2494 2497 if (partial->state != initial && 2495 if (partial->state != initial && 2498 partial->pclu != EXT4_B2C(sbi, la 2496 partial->pclu != EXT4_B2C(sbi, last_pblk)) { 2499 if (partial->state == tofree) 2497 if (partial->state == tofree) { 2500 flags = get_default_f 2498 flags = get_default_free_blocks_flags(inode); 2501 if (ext4_is_pending(i 2499 if (ext4_is_pending(inode, partial->lblk)) 2502 flags |= EXT4 2500 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 2503 ext4_free_blocks(hand 2501 ext4_free_blocks(handle, inode, NULL, 2504 EXT4 2502 EXT4_C2B(sbi, partial->pclu), 2505 sbi- 2503 sbi->s_cluster_ratio, flags); 2506 if (flags & EXT4_FREE 2504 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 2507 ext4_rereserv 2505 ext4_rereserve_cluster(inode, partial->lblk); 2508 } 2506 } 2509 partial->state = initial; 2507 partial->state = initial; 2510 } 2508 } 2511 2509 2512 num = le32_to_cpu(ex->ee_block) + ee_ 2510 num = le32_to_cpu(ex->ee_block) + ee_len - from; 2513 pblk = ext4_ext_pblock(ex) + ee_len - 2511 pblk = ext4_ext_pblock(ex) + ee_len - num; 2514 2512 2515 /* 2513 /* 2516 * We free the partial cluster at the 2514 * We free the partial cluster at the end of the extent (if any), 2517 * unless the cluster is used by anot 2515 * unless the cluster is used by another extent (partial_cluster 2518 * state is nofree). If a partial cl 2516 * state is nofree). If a partial cluster exists here, it must be 2519 * shared with the last block in the 2517 * shared with the last block in the extent. 2520 */ 2518 */ 2521 flags = get_default_free_blocks_flags 2519 flags = get_default_free_blocks_flags(inode); 2522 2520 2523 /* partial, left end cluster aligned, 2521 /* partial, left end cluster aligned, right end unaligned */ 2524 if ((EXT4_LBLK_COFF(sbi, to) != sbi-> 2522 if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) && 2525 (EXT4_LBLK_CMASK(sbi, to) >= from 2523 (EXT4_LBLK_CMASK(sbi, to) >= from) && 2526 (partial->state != nofree)) { 2524 (partial->state != nofree)) { 2527 if (ext4_is_pending(inode, to 2525 if (ext4_is_pending(inode, to)) 2528 flags |= EXT4_FREE_BL 2526 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 2529 ext4_free_blocks(handle, inod 2527 ext4_free_blocks(handle, inode, NULL, 2530 EXT4_PBLK_CM 2528 EXT4_PBLK_CMASK(sbi, last_pblk), 2531 sbi->s_clust 2529 sbi->s_cluster_ratio, flags); 2532 if (flags & EXT4_FREE_BLOCKS_ 2530 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 2533 ext4_rereserve_cluste 2531 ext4_rereserve_cluster(inode, to); 2534 partial->state = initial; 2532 partial->state = initial; 2535 flags = get_default_free_bloc 2533 flags = get_default_free_blocks_flags(inode); 2536 } 2534 } 2537 2535 2538 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST 2536 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; 2539 2537 2540 /* 2538 /* 2541 * For bigalloc file systems, we neve 2539 * For bigalloc file systems, we never free a partial cluster 2542 * at the beginning of the extent. I 2540 * at the beginning of the extent. Instead, we check to see if we 2543 * need to free it on a subsequent ca 2541 * need to free it on a subsequent call to ext4_remove_blocks, 2544 * or at the end of ext4_ext_rm_leaf 2542 * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space. 2545 */ 2543 */ 2546 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRS 2544 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 2547 ext4_free_blocks(handle, inode, NULL, 2545 ext4_free_blocks(handle, inode, NULL, pblk, num, flags); 2548 2546 2549 /* reset the partial cluster if we've 2547 /* reset the partial cluster if we've freed past it */ 2550 if (partial->state != initial && part 2548 if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk)) 2551 partial->state = initial; 2549 partial->state = initial; 2552 2550 2553 /* 2551 /* 2554 * If we've freed the entire extent b 2552 * If we've freed the entire extent but the beginning is not left 2555 * cluster aligned and is not marked 2553 * cluster aligned and is not marked as ineligible for freeing we 2556 * record the partial cluster at the 2554 * record the partial cluster at the beginning of the extent. It 2557 * wasn't freed by the preceding ext4 2555 * wasn't freed by the preceding ext4_free_blocks() call, and we 2558 * need to look farther to the left t 2556 * need to look farther to the left to determine if it's to be freed 2559 * (not shared with another extent). 2557 * (not shared with another extent). Else, reset the partial 2560 * cluster - we're either done freei 2558 * cluster - we're either done freeing or the beginning of the 2561 * extent is left cluster aligned. 2559 * extent is left cluster aligned. 2562 */ 2560 */ 2563 if (EXT4_LBLK_COFF(sbi, from) && num 2561 if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) { 2564 if (partial->state == initial 2562 if (partial->state == initial) { 2565 partial->pclu = EXT4_ 2563 partial->pclu = EXT4_B2C(sbi, pblk); 2566 partial->lblk = from; 2564 partial->lblk = from; 2567 partial->state = tofr 2565 partial->state = tofree; 2568 } 2566 } 2569 } else { 2567 } else { 2570 partial->state = initial; 2568 partial->state = initial; 2571 } 2569 } 2572 2570 2573 return 0; 2571 return 0; 2574 } 2572 } 2575 2573 2576 /* 2574 /* 2577 * ext4_ext_rm_leaf() Removes the extents ass 2575 * ext4_ext_rm_leaf() Removes the extents associated with the 2578 * blocks appearing between "start" and "end" 2576 * blocks appearing between "start" and "end". Both "start" 2579 * and "end" must appear in the same extent o 2577 * and "end" must appear in the same extent or EIO is returned. 2580 * 2578 * 2581 * @handle: The journal handle 2579 * @handle: The journal handle 2582 * @inode: The files inode 2580 * @inode: The files inode 2583 * @path: The path to the leaf 2581 * @path: The path to the leaf 2584 * @partial_cluster: The cluster which we'll 2582 * @partial_cluster: The cluster which we'll have to free if all extents 2585 * has been released from i 2583 * has been released from it. However, if this value is 2586 * negative, it's a cluster 2584 * negative, it's a cluster just to the right of the 2587 * punched region and it mu 2585 * punched region and it must not be freed. 2588 * @start: The first block to remove 2586 * @start: The first block to remove 2589 * @end: The last block to remove 2587 * @end: The last block to remove 2590 */ 2588 */ 2591 static int 2589 static int 2592 ext4_ext_rm_leaf(handle_t *handle, struct ino 2590 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 2593 struct ext4_ext_path *path, 2591 struct ext4_ext_path *path, 2594 struct partial_cluster *part 2592 struct partial_cluster *partial, 2595 ext4_lblk_t start, ext4_lblk 2593 ext4_lblk_t start, ext4_lblk_t end) 2596 { 2594 { 2597 struct ext4_sb_info *sbi = EXT4_SB(in 2595 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2598 int err = 0, correct_index = 0; 2596 int err = 0, correct_index = 0; 2599 int depth = ext_depth(inode), credits 2597 int depth = ext_depth(inode), credits, revoke_credits; 2600 struct ext4_extent_header *eh; 2598 struct ext4_extent_header *eh; 2601 ext4_lblk_t a, b; 2599 ext4_lblk_t a, b; 2602 unsigned num; 2600 unsigned num; 2603 ext4_lblk_t ex_ee_block; 2601 ext4_lblk_t ex_ee_block; 2604 unsigned short ex_ee_len; 2602 unsigned short ex_ee_len; 2605 unsigned unwritten = 0; 2603 unsigned unwritten = 0; 2606 struct ext4_extent *ex; 2604 struct ext4_extent *ex; 2607 ext4_fsblk_t pblk; 2605 ext4_fsblk_t pblk; 2608 2606 2609 /* the header must be checked already 2607 /* the header must be checked already in ext4_ext_remove_space() */ 2610 ext_debug(inode, "truncate since %u i 2608 ext_debug(inode, "truncate since %u in leaf to %u\n", start, end); 2611 if (!path[depth].p_hdr) 2609 if (!path[depth].p_hdr) 2612 path[depth].p_hdr = ext_block 2610 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2613 eh = path[depth].p_hdr; 2611 eh = path[depth].p_hdr; 2614 if (unlikely(path[depth].p_hdr == NUL 2612 if (unlikely(path[depth].p_hdr == NULL)) { 2615 EXT4_ERROR_INODE(inode, "path 2613 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2616 return -EFSCORRUPTED; 2614 return -EFSCORRUPTED; 2617 } 2615 } 2618 /* find where to start removing */ 2616 /* find where to start removing */ 2619 ex = path[depth].p_ext; 2617 ex = path[depth].p_ext; 2620 if (!ex) 2618 if (!ex) 2621 ex = EXT_LAST_EXTENT(eh); 2619 ex = EXT_LAST_EXTENT(eh); 2622 2620 2623 ex_ee_block = le32_to_cpu(ex->ee_bloc 2621 ex_ee_block = le32_to_cpu(ex->ee_block); 2624 ex_ee_len = ext4_ext_get_actual_len(e 2622 ex_ee_len = ext4_ext_get_actual_len(ex); 2625 2623 2626 trace_ext4_ext_rm_leaf(inode, start, 2624 trace_ext4_ext_rm_leaf(inode, start, ex, partial); 2627 2625 2628 while (ex >= EXT_FIRST_EXTENT(eh) && 2626 while (ex >= EXT_FIRST_EXTENT(eh) && 2629 ex_ee_block + ex_ee_l 2627 ex_ee_block + ex_ee_len > start) { 2630 2628 2631 if (ext4_ext_is_unwritten(ex) 2629 if (ext4_ext_is_unwritten(ex)) 2632 unwritten = 1; 2630 unwritten = 1; 2633 else 2631 else 2634 unwritten = 0; 2632 unwritten = 0; 2635 2633 2636 ext_debug(inode, "remove ext 2634 ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block, 2637 unwritten, ex_ee_le 2635 unwritten, ex_ee_len); 2638 path[depth].p_ext = ex; 2636 path[depth].p_ext = ex; 2639 2637 2640 a = max(ex_ee_block, start); 2638 a = max(ex_ee_block, start); 2641 b = min(ex_ee_block + ex_ee_l 2639 b = min(ex_ee_block + ex_ee_len - 1, end); 2642 2640 2643 ext_debug(inode, " border %u 2641 ext_debug(inode, " border %u:%u\n", a, b); 2644 2642 2645 /* If this extent is beyond t 2643 /* If this extent is beyond the end of the hole, skip it */ 2646 if (end < ex_ee_block) { 2644 if (end < ex_ee_block) { 2647 /* 2645 /* 2648 * We're going to ski 2646 * We're going to skip this extent and move to another, 2649 * so note that its f 2647 * so note that its first cluster is in use to avoid 2650 * freeing it when re 2648 * freeing it when removing blocks. Eventually, the 2651 * right edge of the 2649 * right edge of the truncated/punched region will 2652 * be just to the lef 2650 * be just to the left. 2653 */ 2651 */ 2654 if (sbi->s_cluster_ra 2652 if (sbi->s_cluster_ratio > 1) { 2655 pblk = ext4_e 2653 pblk = ext4_ext_pblock(ex); 2656 partial->pclu 2654 partial->pclu = EXT4_B2C(sbi, pblk); 2657 partial->stat 2655 partial->state = nofree; 2658 } 2656 } 2659 ex--; 2657 ex--; 2660 ex_ee_block = le32_to 2658 ex_ee_block = le32_to_cpu(ex->ee_block); 2661 ex_ee_len = ext4_ext_ 2659 ex_ee_len = ext4_ext_get_actual_len(ex); 2662 continue; 2660 continue; 2663 } else if (b != ex_ee_block + 2661 } else if (b != ex_ee_block + ex_ee_len - 1) { 2664 EXT4_ERROR_INODE(inod 2662 EXT4_ERROR_INODE(inode, 2665 "can 2663 "can not handle truncate %u:%u " 2666 "on 2664 "on extent %u:%u", 2667 star 2665 start, end, ex_ee_block, 2668 ex_e 2666 ex_ee_block + ex_ee_len - 1); 2669 err = -EFSCORRUPTED; 2667 err = -EFSCORRUPTED; 2670 goto out; 2668 goto out; 2671 } else if (a != ex_ee_block) 2669 } else if (a != ex_ee_block) { 2672 /* remove tail of the 2670 /* remove tail of the extent */ 2673 num = a - ex_ee_block 2671 num = a - ex_ee_block; 2674 } else { 2672 } else { 2675 /* remove whole exten 2673 /* remove whole extent: excellent! */ 2676 num = 0; 2674 num = 0; 2677 } 2675 } 2678 /* 2676 /* 2679 * 3 for leaf, sb, and inode 2677 * 3 for leaf, sb, and inode plus 2 (bmap and group 2680 * descriptor) for each block 2678 * descriptor) for each block group; assume two block 2681 * groups plus ex_ee_len/bloc 2679 * groups plus ex_ee_len/blocks_per_block_group for 2682 * the worst case 2680 * the worst case 2683 */ 2681 */ 2684 credits = 7 + 2*(ex_ee_len/EX 2682 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2685 if (ex == EXT_FIRST_EXTENT(eh 2683 if (ex == EXT_FIRST_EXTENT(eh)) { 2686 correct_index = 1; 2684 correct_index = 1; 2687 credits += (ext_depth 2685 credits += (ext_depth(inode)) + 1; 2688 } 2686 } 2689 credits += EXT4_MAXQUOTAS_TRA 2687 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 2690 /* 2688 /* 2691 * We may end up freeing some 2689 * We may end up freeing some index blocks and data from the 2692 * punched range. Note that p 2690 * punched range. Note that partial clusters are accounted for 2693 * by ext4_free_data_revoke_c 2691 * by ext4_free_data_revoke_credits(). 2694 */ 2692 */ 2695 revoke_credits = 2693 revoke_credits = 2696 ext4_free_metadata_re 2694 ext4_free_metadata_revoke_credits(inode->i_sb, 2697 2695 ext_depth(inode)) + 2698 ext4_free_data_revoke 2696 ext4_free_data_revoke_credits(inode, b - a + 1); 2699 2697 2700 err = ext4_datasem_ensure_cre 2698 err = ext4_datasem_ensure_credits(handle, inode, credits, 2701 2699 credits, revoke_credits); 2702 if (err) { 2700 if (err) { 2703 if (err > 0) 2701 if (err > 0) 2704 err = -EAGAIN 2702 err = -EAGAIN; 2705 goto out; 2703 goto out; 2706 } 2704 } 2707 2705 2708 err = ext4_ext_get_access(han 2706 err = ext4_ext_get_access(handle, inode, path + depth); 2709 if (err) 2707 if (err) 2710 goto out; 2708 goto out; 2711 2709 2712 err = ext4_remove_blocks(hand 2710 err = ext4_remove_blocks(handle, inode, ex, partial, a, b); 2713 if (err) 2711 if (err) 2714 goto out; 2712 goto out; 2715 2713 2716 if (num == 0) 2714 if (num == 0) 2717 /* this extent is rem 2715 /* this extent is removed; mark slot entirely unused */ 2718 ext4_ext_store_pblock 2716 ext4_ext_store_pblock(ex, 0); 2719 2717 2720 ex->ee_len = cpu_to_le16(num) 2718 ex->ee_len = cpu_to_le16(num); 2721 /* 2719 /* 2722 * Do not mark unwritten if a 2720 * Do not mark unwritten if all the blocks in the 2723 * extent have been removed. 2721 * extent have been removed. 2724 */ 2722 */ 2725 if (unwritten && num) 2723 if (unwritten && num) 2726 ext4_ext_mark_unwritt 2724 ext4_ext_mark_unwritten(ex); 2727 /* 2725 /* 2728 * If the extent was complete 2726 * If the extent was completely released, 2729 * we need to remove it from 2727 * we need to remove it from the leaf 2730 */ 2728 */ 2731 if (num == 0) { 2729 if (num == 0) { 2732 if (end != EXT_MAX_BL 2730 if (end != EXT_MAX_BLOCKS - 1) { 2733 /* 2731 /* 2734 * For hole p 2732 * For hole punching, we need to scoot all the 2735 * extents up 2733 * extents up when an extent is removed so that 2736 * we dont ha 2734 * we dont have blank extents in the middle 2737 */ 2735 */ 2738 memmove(ex, e 2736 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2739 sizeo 2737 sizeof(struct ext4_extent)); 2740 2738 2741 /* Now get ri 2739 /* Now get rid of the one at the end */ 2742 memset(EXT_LA 2740 memset(EXT_LAST_EXTENT(eh), 0, 2743 sizeo 2741 sizeof(struct ext4_extent)); 2744 } 2742 } 2745 le16_add_cpu(&eh->eh_ 2743 le16_add_cpu(&eh->eh_entries, -1); 2746 } 2744 } 2747 2745 2748 err = ext4_ext_dirty(handle, 2746 err = ext4_ext_dirty(handle, inode, path + depth); 2749 if (err) 2747 if (err) 2750 goto out; 2748 goto out; 2751 2749 2752 ext_debug(inode, "new extent: 2750 ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num, 2753 ext4_ext_pblo 2751 ext4_ext_pblock(ex)); 2754 ex--; 2752 ex--; 2755 ex_ee_block = le32_to_cpu(ex- 2753 ex_ee_block = le32_to_cpu(ex->ee_block); 2756 ex_ee_len = ext4_ext_get_actu 2754 ex_ee_len = ext4_ext_get_actual_len(ex); 2757 } 2755 } 2758 2756 2759 if (correct_index && eh->eh_entries) 2757 if (correct_index && eh->eh_entries) 2760 err = ext4_ext_correct_indexe 2758 err = ext4_ext_correct_indexes(handle, inode, path); 2761 2759 2762 /* 2760 /* 2763 * If there's a partial cluster and a 2761 * If there's a partial cluster and at least one extent remains in 2764 * the leaf, free the partial cluster 2762 * the leaf, free the partial cluster if it isn't shared with the 2765 * current extent. If it is shared w 2763 * current extent. If it is shared with the current extent 2766 * we reset the partial cluster becau 2764 * we reset the partial cluster because we've reached the start of the 2767 * truncated/punched region and we're 2765 * truncated/punched region and we're done removing blocks. 2768 */ 2766 */ 2769 if (partial->state == tofree && ex >= 2767 if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) { 2770 pblk = ext4_ext_pblock(ex) + 2768 pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; 2771 if (partial->pclu != EXT4_B2C 2769 if (partial->pclu != EXT4_B2C(sbi, pblk)) { 2772 int flags = get_defau 2770 int flags = get_default_free_blocks_flags(inode); 2773 2771 2774 if (ext4_is_pending(i 2772 if (ext4_is_pending(inode, partial->lblk)) 2775 flags |= EXT4 2773 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 2776 ext4_free_blocks(hand 2774 ext4_free_blocks(handle, inode, NULL, 2777 EXT4 2775 EXT4_C2B(sbi, partial->pclu), 2778 sbi- 2776 sbi->s_cluster_ratio, flags); 2779 if (flags & EXT4_FREE 2777 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 2780 ext4_rereserv 2778 ext4_rereserve_cluster(inode, partial->lblk); 2781 } 2779 } 2782 partial->state = initial; 2780 partial->state = initial; 2783 } 2781 } 2784 2782 2785 /* if this leaf is free, then we shou 2783 /* if this leaf is free, then we should 2786 * remove it from index block above * 2784 * remove it from index block above */ 2787 if (err == 0 && eh->eh_entries == 0 & 2785 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2788 err = ext4_ext_rm_idx(handle, 2786 err = ext4_ext_rm_idx(handle, inode, path, depth); 2789 2787 2790 out: 2788 out: 2791 return err; 2789 return err; 2792 } 2790 } 2793 2791 2794 /* 2792 /* 2795 * ext4_ext_more_to_rm: 2793 * ext4_ext_more_to_rm: 2796 * returns 1 if current index has to be freed 2794 * returns 1 if current index has to be freed (even partial) 2797 */ 2795 */ 2798 static int 2796 static int 2799 ext4_ext_more_to_rm(struct ext4_ext_path *pat 2797 ext4_ext_more_to_rm(struct ext4_ext_path *path) 2800 { 2798 { 2801 BUG_ON(path->p_idx == NULL); 2799 BUG_ON(path->p_idx == NULL); 2802 2800 2803 if (path->p_idx < EXT_FIRST_INDEX(pat 2801 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2804 return 0; 2802 return 0; 2805 2803 2806 /* 2804 /* 2807 * if truncate on deeper level happen 2805 * if truncate on deeper level happened, it wasn't partial, 2808 * so we have to consider current ind 2806 * so we have to consider current index for truncation 2809 */ 2807 */ 2810 if (le16_to_cpu(path->p_hdr->eh_entri 2808 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2811 return 0; 2809 return 0; 2812 return 1; 2810 return 1; 2813 } 2811 } 2814 2812 2815 int ext4_ext_remove_space(struct inode *inode 2813 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 2816 ext4_lblk_t end) 2814 ext4_lblk_t end) 2817 { 2815 { 2818 struct ext4_sb_info *sbi = EXT4_SB(in 2816 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2819 int depth = ext_depth(inode); 2817 int depth = ext_depth(inode); 2820 struct ext4_ext_path *path = NULL; 2818 struct ext4_ext_path *path = NULL; 2821 struct partial_cluster partial; 2819 struct partial_cluster partial; 2822 handle_t *handle; 2820 handle_t *handle; 2823 int i = 0, err = 0; 2821 int i = 0, err = 0; 2824 2822 2825 partial.pclu = 0; 2823 partial.pclu = 0; 2826 partial.lblk = 0; 2824 partial.lblk = 0; 2827 partial.state = initial; 2825 partial.state = initial; 2828 2826 2829 ext_debug(inode, "truncate since %u t 2827 ext_debug(inode, "truncate since %u to %u\n", start, end); 2830 2828 2831 /* probably first extent we're gonna 2829 /* probably first extent we're gonna free will be last in block */ 2832 handle = ext4_journal_start_with_revo 2830 handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE, 2833 depth + 1, 2831 depth + 1, 2834 ext4_free_metadata_re 2832 ext4_free_metadata_revoke_credits(inode->i_sb, depth)); 2835 if (IS_ERR(handle)) 2833 if (IS_ERR(handle)) 2836 return PTR_ERR(handle); 2834 return PTR_ERR(handle); 2837 2835 2838 again: 2836 again: 2839 trace_ext4_ext_remove_space(inode, st 2837 trace_ext4_ext_remove_space(inode, start, end, depth); 2840 2838 2841 /* 2839 /* 2842 * Check if we are removing extents i 2840 * Check if we are removing extents inside the extent tree. If that 2843 * is the case, we are going to punch 2841 * is the case, we are going to punch a hole inside the extent tree 2844 * so we have to check whether we nee 2842 * so we have to check whether we need to split the extent covering 2845 * the last block to remove so we can 2843 * the last block to remove so we can easily remove the part of it 2846 * in ext4_ext_rm_leaf(). 2844 * in ext4_ext_rm_leaf(). 2847 */ 2845 */ 2848 if (end < EXT_MAX_BLOCKS - 1) { 2846 if (end < EXT_MAX_BLOCKS - 1) { 2849 struct ext4_extent *ex; 2847 struct ext4_extent *ex; 2850 ext4_lblk_t ee_block, ex_end, 2848 ext4_lblk_t ee_block, ex_end, lblk; 2851 ext4_fsblk_t pblk; 2849 ext4_fsblk_t pblk; 2852 2850 2853 /* find extent for or closest 2851 /* find extent for or closest extent to this block */ 2854 path = ext4_find_extent(inode 2852 path = ext4_find_extent(inode, end, NULL, 2855 EXT4_ 2853 EXT4_EX_NOCACHE | EXT4_EX_NOFAIL); 2856 if (IS_ERR(path)) { 2854 if (IS_ERR(path)) { 2857 ext4_journal_stop(han 2855 ext4_journal_stop(handle); 2858 return PTR_ERR(path); 2856 return PTR_ERR(path); 2859 } 2857 } 2860 depth = ext_depth(inode); 2858 depth = ext_depth(inode); 2861 /* Leaf not may not exist onl 2859 /* Leaf not may not exist only if inode has no blocks at all */ 2862 ex = path[depth].p_ext; 2860 ex = path[depth].p_ext; 2863 if (!ex) { 2861 if (!ex) { 2864 if (depth) { 2862 if (depth) { 2865 EXT4_ERROR_IN 2863 EXT4_ERROR_INODE(inode, 2866 2864 "path[%d].p_hdr == NULL", 2867 2865 depth); 2868 err = -EFSCOR 2866 err = -EFSCORRUPTED; 2869 } 2867 } 2870 goto out; 2868 goto out; 2871 } 2869 } 2872 2870 2873 ee_block = le32_to_cpu(ex->ee 2871 ee_block = le32_to_cpu(ex->ee_block); 2874 ex_end = ee_block + ext4_ext_ 2872 ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1; 2875 2873 2876 /* 2874 /* 2877 * See if the last block is i 2875 * See if the last block is inside the extent, if so split 2878 * the extent at 'end' block 2876 * the extent at 'end' block so we can easily remove the 2879 * tail of the first part of 2877 * tail of the first part of the split extent in 2880 * ext4_ext_rm_leaf(). 2878 * ext4_ext_rm_leaf(). 2881 */ 2879 */ 2882 if (end >= ee_block && end < 2880 if (end >= ee_block && end < ex_end) { 2883 2881 2884 /* 2882 /* 2885 * If we're going to 2883 * If we're going to split the extent, note that 2886 * the cluster contai 2884 * the cluster containing the block after 'end' is 2887 * in use to avoid fr 2885 * in use to avoid freeing it when removing blocks. 2888 */ 2886 */ 2889 if (sbi->s_cluster_ra 2887 if (sbi->s_cluster_ratio > 1) { 2890 pblk = ext4_e 2888 pblk = ext4_ext_pblock(ex) + end - ee_block + 1; 2891 partial.pclu 2889 partial.pclu = EXT4_B2C(sbi, pblk); 2892 partial.state 2890 partial.state = nofree; 2893 } 2891 } 2894 2892 2895 /* 2893 /* 2896 * Split the extent i 2894 * Split the extent in two so that 'end' is the last 2897 * block in the first 2895 * block in the first new extent. Also we should not 2898 * fail removing spac 2896 * fail removing space due to ENOSPC so try to use 2899 * reserved block if 2897 * reserved block if that happens. 2900 */ 2898 */ 2901 path = ext4_force_spl !! 2899 err = ext4_force_split_extent_at(handle, inode, &path, 2902 !! 2900 end + 1, 1); 2903 if (IS_ERR(path)) { !! 2901 if (err < 0) 2904 err = PTR_ERR << 2905 goto out; 2902 goto out; 2906 } !! 2903 2907 } else if (sbi->s_cluster_rat 2904 } else if (sbi->s_cluster_ratio > 1 && end >= ex_end && 2908 partial.state == i 2905 partial.state == initial) { 2909 /* 2906 /* 2910 * If we're punching, 2907 * If we're punching, there's an extent to the right. 2911 * If the partial clu 2908 * If the partial cluster hasn't been set, set it to 2912 * that extent's firs 2909 * that extent's first cluster and its state to nofree 2913 * so it won't be fre 2910 * so it won't be freed should it contain blocks to be 2914 * removed. If it's a 2911 * removed. If it's already set (tofree/nofree), we're 2915 * retrying and keep 2912 * retrying and keep the original partial cluster info 2916 * so a cluster marke 2913 * so a cluster marked tofree as a result of earlier 2917 * extent removal is 2914 * extent removal is not lost. 2918 */ 2915 */ 2919 lblk = ex_end + 1; 2916 lblk = ex_end + 1; 2920 err = ext4_ext_search 2917 err = ext4_ext_search_right(inode, path, &lblk, &pblk, 2921 2918 NULL); 2922 if (err < 0) 2919 if (err < 0) 2923 goto out; 2920 goto out; 2924 if (pblk) { 2921 if (pblk) { 2925 partial.pclu 2922 partial.pclu = EXT4_B2C(sbi, pblk); 2926 partial.state 2923 partial.state = nofree; 2927 } 2924 } 2928 } 2925 } 2929 } 2926 } 2930 /* 2927 /* 2931 * We start scanning from right side, 2928 * We start scanning from right side, freeing all the blocks 2932 * after i_size and walking into the 2929 * after i_size and walking into the tree depth-wise. 2933 */ 2930 */ 2934 depth = ext_depth(inode); 2931 depth = ext_depth(inode); 2935 if (path) { 2932 if (path) { 2936 int k = i = depth; 2933 int k = i = depth; 2937 while (--k > 0) 2934 while (--k > 0) 2938 path[k].p_block = 2935 path[k].p_block = 2939 le16_to_cpu(p 2936 le16_to_cpu(path[k].p_hdr->eh_entries)+1; 2940 } else { 2937 } else { 2941 path = kcalloc(depth + 1, siz 2938 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), 2942 GFP_NOFS | __G 2939 GFP_NOFS | __GFP_NOFAIL); 2943 if (path == NULL) { 2940 if (path == NULL) { 2944 ext4_journal_stop(han 2941 ext4_journal_stop(handle); 2945 return -ENOMEM; 2942 return -ENOMEM; 2946 } 2943 } 2947 path[0].p_maxdepth = path[0]. 2944 path[0].p_maxdepth = path[0].p_depth = depth; 2948 path[0].p_hdr = ext_inode_hdr 2945 path[0].p_hdr = ext_inode_hdr(inode); 2949 i = 0; 2946 i = 0; 2950 2947 2951 if (ext4_ext_check(inode, pat 2948 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { 2952 err = -EFSCORRUPTED; 2949 err = -EFSCORRUPTED; 2953 goto out; 2950 goto out; 2954 } 2951 } 2955 } 2952 } 2956 err = 0; 2953 err = 0; 2957 2954 2958 while (i >= 0 && err == 0) { 2955 while (i >= 0 && err == 0) { 2959 if (i == depth) { 2956 if (i == depth) { 2960 /* this is leaf block 2957 /* this is leaf block */ 2961 err = ext4_ext_rm_lea 2958 err = ext4_ext_rm_leaf(handle, inode, path, 2962 2959 &partial, start, end); 2963 /* root level has p_b 2960 /* root level has p_bh == NULL, brelse() eats this */ 2964 ext4_ext_path_brelse( !! 2961 brelse(path[i].p_bh); >> 2962 path[i].p_bh = NULL; 2965 i--; 2963 i--; 2966 continue; 2964 continue; 2967 } 2965 } 2968 2966 2969 /* this is index block */ 2967 /* this is index block */ 2970 if (!path[i].p_hdr) { 2968 if (!path[i].p_hdr) { 2971 ext_debug(inode, "ini 2969 ext_debug(inode, "initialize header\n"); 2972 path[i].p_hdr = ext_b 2970 path[i].p_hdr = ext_block_hdr(path[i].p_bh); 2973 } 2971 } 2974 2972 2975 if (!path[i].p_idx) { 2973 if (!path[i].p_idx) { 2976 /* this level hasn't 2974 /* this level hasn't been touched yet */ 2977 path[i].p_idx = EXT_L 2975 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 2978 path[i].p_block = le1 2976 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 2979 ext_debug(inode, "ini 2977 ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n", 2980 path[i].p_h 2978 path[i].p_hdr, 2981 le16_to_cpu 2979 le16_to_cpu(path[i].p_hdr->eh_entries)); 2982 } else { 2980 } else { 2983 /* we were already he 2981 /* we were already here, see at next index */ 2984 path[i].p_idx--; 2982 path[i].p_idx--; 2985 } 2983 } 2986 2984 2987 ext_debug(inode, "level %d - 2985 ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n", 2988 i, EXT_FIRST_ 2986 i, EXT_FIRST_INDEX(path[i].p_hdr), 2989 path[i].p_idx 2987 path[i].p_idx); 2990 if (ext4_ext_more_to_rm(path 2988 if (ext4_ext_more_to_rm(path + i)) { 2991 struct buffer_head *b 2989 struct buffer_head *bh; 2992 /* go to the next lev 2990 /* go to the next level */ 2993 ext_debug(inode, "mov 2991 ext_debug(inode, "move to level %d (block %llu)\n", 2994 i + 1, ext4 2992 i + 1, ext4_idx_pblock(path[i].p_idx)); 2995 memset(path + i + 1, 2993 memset(path + i + 1, 0, sizeof(*path)); 2996 bh = read_extent_tree 2994 bh = read_extent_tree_block(inode, path[i].p_idx, 2997 2995 depth - i - 1, 2998 2996 EXT4_EX_NOCACHE); 2999 if (IS_ERR(bh)) { 2997 if (IS_ERR(bh)) { 3000 /* should we 2998 /* should we reset i_size? */ 3001 err = PTR_ERR 2999 err = PTR_ERR(bh); 3002 break; 3000 break; 3003 } 3001 } 3004 /* Yield here to deal 3002 /* Yield here to deal with large extent trees. 3005 * Should be a no-op 3003 * Should be a no-op if we did IO above. */ 3006 cond_resched(); 3004 cond_resched(); 3007 if (WARN_ON(i + 1 > d 3005 if (WARN_ON(i + 1 > depth)) { 3008 err = -EFSCOR 3006 err = -EFSCORRUPTED; 3009 break; 3007 break; 3010 } 3008 } 3011 path[i + 1].p_bh = bh 3009 path[i + 1].p_bh = bh; 3012 3010 3013 /* save actual number 3011 /* save actual number of indexes since this 3014 * number is changed 3012 * number is changed at the next iteration */ 3015 path[i].p_block = le1 3013 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 3016 i++; 3014 i++; 3017 } else { 3015 } else { 3018 /* we finished proces 3016 /* we finished processing this index, go up */ 3019 if (path[i].p_hdr->eh 3017 if (path[i].p_hdr->eh_entries == 0 && i > 0) { 3020 /* index is e 3018 /* index is empty, remove it; 3021 * handle mus 3019 * handle must be already prepared by the 3022 * truncatei_ 3020 * truncatei_leaf() */ 3023 err = ext4_ex 3021 err = ext4_ext_rm_idx(handle, inode, path, i); 3024 } 3022 } 3025 /* root level has p_b 3023 /* root level has p_bh == NULL, brelse() eats this */ 3026 ext4_ext_path_brelse( !! 3024 brelse(path[i].p_bh); >> 3025 path[i].p_bh = NULL; 3027 i--; 3026 i--; 3028 ext_debug(inode, "ret 3027 ext_debug(inode, "return to level %d\n", i); 3029 } 3028 } 3030 } 3029 } 3031 3030 3032 trace_ext4_ext_remove_space_done(inod 3031 trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial, 3033 path 3032 path->p_hdr->eh_entries); 3034 3033 3035 /* 3034 /* 3036 * if there's a partial cluster and w 3035 * if there's a partial cluster and we have removed the first extent 3037 * in the file, then we also free the 3036 * in the file, then we also free the partial cluster, if any 3038 */ 3037 */ 3039 if (partial.state == tofree && err == 3038 if (partial.state == tofree && err == 0) { 3040 int flags = get_default_free_ 3039 int flags = get_default_free_blocks_flags(inode); 3041 3040 3042 if (ext4_is_pending(inode, pa 3041 if (ext4_is_pending(inode, partial.lblk)) 3043 flags |= EXT4_FREE_BL 3042 flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 3044 ext4_free_blocks(handle, inod 3043 ext4_free_blocks(handle, inode, NULL, 3045 EXT4_C2B(sbi 3044 EXT4_C2B(sbi, partial.pclu), 3046 sbi->s_clust 3045 sbi->s_cluster_ratio, flags); 3047 if (flags & EXT4_FREE_BLOCKS_ 3046 if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 3048 ext4_rereserve_cluste 3047 ext4_rereserve_cluster(inode, partial.lblk); 3049 partial.state = initial; 3048 partial.state = initial; 3050 } 3049 } 3051 3050 3052 /* TODO: flexible tree reduction shou 3051 /* TODO: flexible tree reduction should be here */ 3053 if (path->p_hdr->eh_entries == 0) { 3052 if (path->p_hdr->eh_entries == 0) { 3054 /* 3053 /* 3055 * truncate to zero freed all 3054 * truncate to zero freed all the tree, 3056 * so we need to correct eh_d 3055 * so we need to correct eh_depth 3057 */ 3056 */ 3058 err = ext4_ext_get_access(han 3057 err = ext4_ext_get_access(handle, inode, path); 3059 if (err == 0) { 3058 if (err == 0) { 3060 ext_inode_hdr(inode)- 3059 ext_inode_hdr(inode)->eh_depth = 0; 3061 ext_inode_hdr(inode)- 3060 ext_inode_hdr(inode)->eh_max = 3062 cpu_to_le16(e 3061 cpu_to_le16(ext4_ext_space_root(inode, 0)); 3063 err = ext4_ext_dirty( 3062 err = ext4_ext_dirty(handle, inode, path); 3064 } 3063 } 3065 } 3064 } 3066 out: 3065 out: 3067 ext4_free_ext_path(path); 3066 ext4_free_ext_path(path); 3068 path = NULL; 3067 path = NULL; 3069 if (err == -EAGAIN) 3068 if (err == -EAGAIN) 3070 goto again; 3069 goto again; 3071 ext4_journal_stop(handle); 3070 ext4_journal_stop(handle); 3072 3071 3073 return err; 3072 return err; 3074 } 3073 } 3075 3074 3076 /* 3075 /* 3077 * called at mount time 3076 * called at mount time 3078 */ 3077 */ 3079 void ext4_ext_init(struct super_block *sb) 3078 void ext4_ext_init(struct super_block *sb) 3080 { 3079 { 3081 /* 3080 /* 3082 * possible initialization would be h 3081 * possible initialization would be here 3083 */ 3082 */ 3084 3083 3085 if (ext4_has_feature_extents(sb)) { 3084 if (ext4_has_feature_extents(sb)) { 3086 #if defined(AGGRESSIVE_TEST) || defined(CHECK 3085 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 3087 printk(KERN_INFO "EXT4-fs: fi 3086 printk(KERN_INFO "EXT4-fs: file extents enabled" 3088 #ifdef AGGRESSIVE_TEST 3087 #ifdef AGGRESSIVE_TEST 3089 ", aggressive tests" 3088 ", aggressive tests" 3090 #endif 3089 #endif 3091 #ifdef CHECK_BINSEARCH 3090 #ifdef CHECK_BINSEARCH 3092 ", check binsearch" 3091 ", check binsearch" 3093 #endif 3092 #endif 3094 #ifdef EXTENTS_STATS 3093 #ifdef EXTENTS_STATS 3095 ", stats" 3094 ", stats" 3096 #endif 3095 #endif 3097 "\n"); 3096 "\n"); 3098 #endif 3097 #endif 3099 #ifdef EXTENTS_STATS 3098 #ifdef EXTENTS_STATS 3100 spin_lock_init(&EXT4_SB(sb)-> 3099 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 3101 EXT4_SB(sb)->s_ext_min = 1 << 3100 EXT4_SB(sb)->s_ext_min = 1 << 30; 3102 EXT4_SB(sb)->s_ext_max = 0; 3101 EXT4_SB(sb)->s_ext_max = 0; 3103 #endif 3102 #endif 3104 } 3103 } 3105 } 3104 } 3106 3105 3107 /* 3106 /* 3108 * called at umount time 3107 * called at umount time 3109 */ 3108 */ 3110 void ext4_ext_release(struct super_block *sb) 3109 void ext4_ext_release(struct super_block *sb) 3111 { 3110 { 3112 if (!ext4_has_feature_extents(sb)) 3111 if (!ext4_has_feature_extents(sb)) 3113 return; 3112 return; 3114 3113 3115 #ifdef EXTENTS_STATS 3114 #ifdef EXTENTS_STATS 3116 if (EXT4_SB(sb)->s_ext_blocks && EXT4 3115 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 3117 struct ext4_sb_info *sbi = EX 3116 struct ext4_sb_info *sbi = EXT4_SB(sb); 3118 printk(KERN_ERR "EXT4-fs: %lu 3117 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 3119 sbi->s_ext_blocks, sb 3118 sbi->s_ext_blocks, sbi->s_ext_extents, 3120 sbi->s_ext_blocks / s 3119 sbi->s_ext_blocks / sbi->s_ext_extents); 3121 printk(KERN_ERR "EXT4-fs: ext 3120 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 3122 sbi->s_ext_min, sbi-> 3121 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 3123 } 3122 } 3124 #endif 3123 #endif 3125 } 3124 } 3126 3125 3127 static void ext4_zeroout_es(struct inode *ino 3126 static void ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex) 3128 { 3127 { 3129 ext4_lblk_t ee_block; 3128 ext4_lblk_t ee_block; 3130 ext4_fsblk_t ee_pblock; 3129 ext4_fsblk_t ee_pblock; 3131 unsigned int ee_len; 3130 unsigned int ee_len; 3132 3131 3133 ee_block = le32_to_cpu(ex->ee_block) 3132 ee_block = le32_to_cpu(ex->ee_block); 3134 ee_len = ext4_ext_get_actual_len(e 3133 ee_len = ext4_ext_get_actual_len(ex); 3135 ee_pblock = ext4_ext_pblock(ex); 3134 ee_pblock = ext4_ext_pblock(ex); 3136 3135 3137 if (ee_len == 0) 3136 if (ee_len == 0) 3138 return; 3137 return; 3139 3138 3140 ext4_es_insert_extent(inode, ee_block 3139 ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, 3141 EXTENT_STATUS_W !! 3140 EXTENT_STATUS_WRITTEN); 3142 } 3141 } 3143 3142 3144 /* FIXME!! we need to try to merge to left or 3143 /* FIXME!! we need to try to merge to left or right after zero-out */ 3145 static int ext4_ext_zeroout(struct inode *ino 3144 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 3146 { 3145 { 3147 ext4_fsblk_t ee_pblock; 3146 ext4_fsblk_t ee_pblock; 3148 unsigned int ee_len; 3147 unsigned int ee_len; 3149 3148 3150 ee_len = ext4_ext_get_actual_len(e 3149 ee_len = ext4_ext_get_actual_len(ex); 3151 ee_pblock = ext4_ext_pblock(ex); 3150 ee_pblock = ext4_ext_pblock(ex); 3152 return ext4_issue_zeroout(inode, le32 3151 return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock, 3153 ee_len); 3152 ee_len); 3154 } 3153 } 3155 3154 3156 /* 3155 /* 3157 * ext4_split_extent_at() splits an extent at 3156 * ext4_split_extent_at() splits an extent at given block. 3158 * 3157 * 3159 * @handle: the journal handle 3158 * @handle: the journal handle 3160 * @inode: the file inode 3159 * @inode: the file inode 3161 * @path: the path to the extent 3160 * @path: the path to the extent 3162 * @split: the logical block where the extent 3161 * @split: the logical block where the extent is splitted. 3163 * @split_flags: indicates if the extent coul 3162 * @split_flags: indicates if the extent could be zeroout if split fails, and 3164 * the states(init or unwritten 3163 * the states(init or unwritten) of new extents. 3165 * @flags: flags used to insert new extent to 3164 * @flags: flags used to insert new extent to extent tree. 3166 * 3165 * 3167 * 3166 * 3168 * Splits extent [a, b] into two extents [a, 3167 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 3169 * of which are determined by split_flag. 3168 * of which are determined by split_flag. 3170 * 3169 * 3171 * There are two cases: 3170 * There are two cases: 3172 * a> the extent are splitted into two exten 3171 * a> the extent are splitted into two extent. 3173 * b> split is not needed, and just mark the 3172 * b> split is not needed, and just mark the extent. 3174 * 3173 * 3175 * Return an extent path pointer on success, !! 3174 * return 0 on success. 3176 */ 3175 */ 3177 static struct ext4_ext_path *ext4_split_exten !! 3176 static int ext4_split_extent_at(handle_t *handle, 3178 !! 3177 struct inode *inode, 3179 !! 3178 struct ext4_ext_path **ppath, 3180 !! 3179 ext4_lblk_t split, 3181 !! 3180 int split_flag, >> 3181 int flags) 3182 { 3182 { >> 3183 struct ext4_ext_path *path = *ppath; 3183 ext4_fsblk_t newblock; 3184 ext4_fsblk_t newblock; 3184 ext4_lblk_t ee_block; 3185 ext4_lblk_t ee_block; 3185 struct ext4_extent *ex, newex, orig_e 3186 struct ext4_extent *ex, newex, orig_ex, zero_ex; 3186 struct ext4_extent *ex2 = NULL; 3187 struct ext4_extent *ex2 = NULL; 3187 unsigned int ee_len, depth; 3188 unsigned int ee_len, depth; 3188 int err = 0; 3189 int err = 0; 3189 3190 3190 BUG_ON((split_flag & (EXT4_EXT_DATA_V 3191 BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == 3191 (EXT4_EXT_DATA_VALID1 | EXT4_E 3192 (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); 3192 3193 3193 ext_debug(inode, "logical block %llu\ 3194 ext_debug(inode, "logical block %llu\n", (unsigned long long)split); 3194 3195 3195 ext4_ext_show_leaf(inode, path); 3196 ext4_ext_show_leaf(inode, path); 3196 3197 3197 depth = ext_depth(inode); 3198 depth = ext_depth(inode); 3198 ex = path[depth].p_ext; 3199 ex = path[depth].p_ext; 3199 ee_block = le32_to_cpu(ex->ee_block); 3200 ee_block = le32_to_cpu(ex->ee_block); 3200 ee_len = ext4_ext_get_actual_len(ex); 3201 ee_len = ext4_ext_get_actual_len(ex); 3201 newblock = split - ee_block + ext4_ex 3202 newblock = split - ee_block + ext4_ext_pblock(ex); 3202 3203 3203 BUG_ON(split < ee_block || split >= ( 3204 BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 3204 BUG_ON(!ext4_ext_is_unwritten(ex) && 3205 BUG_ON(!ext4_ext_is_unwritten(ex) && 3205 split_flag & (EXT4_EXT_MAY_ZER 3206 split_flag & (EXT4_EXT_MAY_ZEROOUT | 3206 EXT4_EXT_MARK_UN 3207 EXT4_EXT_MARK_UNWRIT1 | 3207 EXT4_EXT_MARK_UN 3208 EXT4_EXT_MARK_UNWRIT2)); 3208 3209 3209 err = ext4_ext_get_access(handle, ino 3210 err = ext4_ext_get_access(handle, inode, path + depth); 3210 if (err) 3211 if (err) 3211 goto out; 3212 goto out; 3212 3213 3213 if (split == ee_block) { 3214 if (split == ee_block) { 3214 /* 3215 /* 3215 * case b: block @split is th 3216 * case b: block @split is the block that the extent begins with 3216 * then we just change the st 3217 * then we just change the state of the extent, and splitting 3217 * is not needed. 3218 * is not needed. 3218 */ 3219 */ 3219 if (split_flag & EXT4_EXT_MAR 3220 if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3220 ext4_ext_mark_unwritt 3221 ext4_ext_mark_unwritten(ex); 3221 else 3222 else 3222 ext4_ext_mark_initial 3223 ext4_ext_mark_initialized(ex); 3223 3224 3224 if (!(flags & EXT4_GET_BLOCKS 3225 if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 3225 ext4_ext_try_to_merge 3226 ext4_ext_try_to_merge(handle, inode, path, ex); 3226 3227 3227 err = ext4_ext_dirty(handle, 3228 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3228 goto out; 3229 goto out; 3229 } 3230 } 3230 3231 3231 /* case a */ 3232 /* case a */ 3232 memcpy(&orig_ex, ex, sizeof(orig_ex)) 3233 memcpy(&orig_ex, ex, sizeof(orig_ex)); 3233 ex->ee_len = cpu_to_le16(split - ee_b 3234 ex->ee_len = cpu_to_le16(split - ee_block); 3234 if (split_flag & EXT4_EXT_MARK_UNWRIT 3235 if (split_flag & EXT4_EXT_MARK_UNWRIT1) 3235 ext4_ext_mark_unwritten(ex); 3236 ext4_ext_mark_unwritten(ex); 3236 3237 3237 /* 3238 /* 3238 * path may lead to new leaf, not to 3239 * path may lead to new leaf, not to original leaf any more 3239 * after ext4_ext_insert_extent() ret 3240 * after ext4_ext_insert_extent() returns, 3240 */ 3241 */ 3241 err = ext4_ext_dirty(handle, inode, p 3242 err = ext4_ext_dirty(handle, inode, path + depth); 3242 if (err) 3243 if (err) 3243 goto fix_extent_len; 3244 goto fix_extent_len; 3244 3245 3245 ex2 = &newex; 3246 ex2 = &newex; 3246 ex2->ee_block = cpu_to_le32(split); 3247 ex2->ee_block = cpu_to_le32(split); 3247 ex2->ee_len = cpu_to_le16(ee_len - 3248 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 3248 ext4_ext_store_pblock(ex2, newblock); 3249 ext4_ext_store_pblock(ex2, newblock); 3249 if (split_flag & EXT4_EXT_MARK_UNWRIT 3250 if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3250 ext4_ext_mark_unwritten(ex2); 3251 ext4_ext_mark_unwritten(ex2); 3251 3252 3252 path = ext4_ext_insert_extent(handle, !! 3253 err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); 3253 if (!IS_ERR(path)) << 3254 goto out; << 3255 << 3256 err = PTR_ERR(path); << 3257 if (err != -ENOSPC && err != -EDQUOT 3254 if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM) 3258 return path; !! 3255 goto out; 3259 << 3260 /* << 3261 * Get a new path to try to zeroout o << 3262 * Using EXT4_EX_NOFAIL guarantees th << 3263 * will not return -ENOMEM, otherwise << 3264 * retry in do_writepages(), and a WA << 3265 * in ext4_da_update_reserve_space() << 3266 * ee_len causing the i_reserved_data << 3267 */ << 3268 path = ext4_find_extent(inode, ee_blo << 3269 if (IS_ERR(path)) { << 3270 EXT4_ERROR_INODE(inode, "Fail << 3271 split, PTR_E << 3272 return path; << 3273 } << 3274 depth = ext_depth(inode); << 3275 ex = path[depth].p_ext; << 3276 3256 3277 if (EXT4_EXT_MAY_ZEROOUT & split_flag 3257 if (EXT4_EXT_MAY_ZEROOUT & split_flag) { 3278 if (split_flag & (EXT4_EXT_DA 3258 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { 3279 if (split_flag & EXT4 3259 if (split_flag & EXT4_EXT_DATA_VALID1) { 3280 err = ext4_ex 3260 err = ext4_ext_zeroout(inode, ex2); 3281 zero_ex.ee_bl 3261 zero_ex.ee_block = ex2->ee_block; 3282 zero_ex.ee_le 3262 zero_ex.ee_len = cpu_to_le16( 3283 3263 ext4_ext_get_actual_len(ex2)); 3284 ext4_ext_stor 3264 ext4_ext_store_pblock(&zero_ex, 3285 3265 ext4_ext_pblock(ex2)); 3286 } else { 3266 } else { 3287 err = ext4_ex 3267 err = ext4_ext_zeroout(inode, ex); 3288 zero_ex.ee_bl 3268 zero_ex.ee_block = ex->ee_block; 3289 zero_ex.ee_le 3269 zero_ex.ee_len = cpu_to_le16( 3290 3270 ext4_ext_get_actual_len(ex)); 3291 ext4_ext_stor 3271 ext4_ext_store_pblock(&zero_ex, 3292 3272 ext4_ext_pblock(ex)); 3293 } 3273 } 3294 } else { 3274 } else { 3295 err = ext4_ext_zeroou 3275 err = ext4_ext_zeroout(inode, &orig_ex); 3296 zero_ex.ee_block = or 3276 zero_ex.ee_block = orig_ex.ee_block; 3297 zero_ex.ee_len = cpu_ 3277 zero_ex.ee_len = cpu_to_le16( 3298 3278 ext4_ext_get_actual_len(&orig_ex)); 3299 ext4_ext_store_pblock 3279 ext4_ext_store_pblock(&zero_ex, 3300 3280 ext4_ext_pblock(&orig_ex)); 3301 } 3281 } 3302 3282 3303 if (!err) { 3283 if (!err) { 3304 /* update the extent 3284 /* update the extent length and mark as initialized */ 3305 ex->ee_len = cpu_to_l 3285 ex->ee_len = cpu_to_le16(ee_len); 3306 ext4_ext_try_to_merge 3286 ext4_ext_try_to_merge(handle, inode, path, ex); 3307 err = ext4_ext_dirty( 3287 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3308 if (!err) 3288 if (!err) 3309 /* update ext 3289 /* update extent status tree */ 3310 ext4_zeroout_ 3290 ext4_zeroout_es(inode, &zero_ex); 3311 /* If we failed at th 3291 /* If we failed at this point, we don't know in which 3312 * state the extent t 3292 * state the extent tree exactly is so don't try to fix 3313 * length of the orig 3293 * length of the original extent as it may do even more 3314 * damage. 3294 * damage. 3315 */ 3295 */ 3316 goto out; 3296 goto out; 3317 } 3297 } 3318 } 3298 } 3319 3299 3320 fix_extent_len: 3300 fix_extent_len: 3321 ex->ee_len = orig_ex.ee_len; 3301 ex->ee_len = orig_ex.ee_len; 3322 /* 3302 /* 3323 * Ignore ext4_ext_dirty return value 3303 * Ignore ext4_ext_dirty return value since we are already in error path 3324 * and err is a non-zero error code. 3304 * and err is a non-zero error code. 3325 */ 3305 */ 3326 ext4_ext_dirty(handle, inode, path + 3306 ext4_ext_dirty(handle, inode, path + path->p_depth); >> 3307 return err; 3327 out: 3308 out: 3328 if (err) { << 3329 ext4_free_ext_path(path); << 3330 path = ERR_PTR(err); << 3331 } << 3332 ext4_ext_show_leaf(inode, path); 3309 ext4_ext_show_leaf(inode, path); 3333 return path; !! 3310 return err; 3334 } 3311 } 3335 3312 3336 /* 3313 /* 3337 * ext4_split_extent() splits an extent and m !! 3314 * ext4_split_extents() splits an extent and mark extent which is covered 3338 * by @map as split_flags indicates 3315 * by @map as split_flags indicates 3339 * 3316 * 3340 * It may result in splitting the extent into 3317 * It may result in splitting the extent into multiple extents (up to three) 3341 * There are three possibilities: 3318 * There are three possibilities: 3342 * a> There is no split required 3319 * a> There is no split required 3343 * b> Splits in two extents: Split is happe 3320 * b> Splits in two extents: Split is happening at either end of the extent 3344 * c> Splits in three extents: Somone is sp 3321 * c> Splits in three extents: Somone is splitting in middle of the extent 3345 * 3322 * 3346 */ 3323 */ 3347 static struct ext4_ext_path *ext4_split_exten !! 3324 static int ext4_split_extent(handle_t *handle, 3348 !! 3325 struct inode *inode, 3349 !! 3326 struct ext4_ext_path **ppath, 3350 !! 3327 struct ext4_map_blocks *map, 3351 !! 3328 int split_flag, 3352 !! 3329 int flags) 3353 { 3330 { >> 3331 struct ext4_ext_path *path = *ppath; 3354 ext4_lblk_t ee_block; 3332 ext4_lblk_t ee_block; 3355 struct ext4_extent *ex; 3333 struct ext4_extent *ex; 3356 unsigned int ee_len, depth; 3334 unsigned int ee_len, depth; >> 3335 int err = 0; 3357 int unwritten; 3336 int unwritten; 3358 int split_flag1, flags1; 3337 int split_flag1, flags1; >> 3338 int allocated = map->m_len; 3359 3339 3360 depth = ext_depth(inode); 3340 depth = ext_depth(inode); 3361 ex = path[depth].p_ext; 3341 ex = path[depth].p_ext; 3362 ee_block = le32_to_cpu(ex->ee_block); 3342 ee_block = le32_to_cpu(ex->ee_block); 3363 ee_len = ext4_ext_get_actual_len(ex); 3343 ee_len = ext4_ext_get_actual_len(ex); 3364 unwritten = ext4_ext_is_unwritten(ex) 3344 unwritten = ext4_ext_is_unwritten(ex); 3365 3345 3366 if (map->m_lblk + map->m_len < ee_blo 3346 if (map->m_lblk + map->m_len < ee_block + ee_len) { 3367 split_flag1 = split_flag & EX 3347 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; 3368 flags1 = flags | EXT4_GET_BLO 3348 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 3369 if (unwritten) 3349 if (unwritten) 3370 split_flag1 |= EXT4_E 3350 split_flag1 |= EXT4_EXT_MARK_UNWRIT1 | 3371 EXT4_E 3351 EXT4_EXT_MARK_UNWRIT2; 3372 if (split_flag & EXT4_EXT_DAT 3352 if (split_flag & EXT4_EXT_DATA_VALID2) 3373 split_flag1 |= EXT4_E 3353 split_flag1 |= EXT4_EXT_DATA_VALID1; 3374 path = ext4_split_extent_at(h !! 3354 err = ext4_split_extent_at(handle, inode, ppath, 3375 map->m_lblk + 3355 map->m_lblk + map->m_len, split_flag1, flags1); 3376 if (IS_ERR(path)) !! 3356 if (err) 3377 return path; !! 3357 goto out; 3378 /* !! 3358 } else { 3379 * Update path is required be !! 3359 allocated = ee_len - (map->m_lblk - ee_block); 3380 * may result in split of ori << 3381 */ << 3382 path = ext4_find_extent(inode << 3383 if (IS_ERR(path)) << 3384 return path; << 3385 depth = ext_depth(inode); << 3386 ex = path[depth].p_ext; << 3387 if (!ex) { << 3388 EXT4_ERROR_INODE(inod << 3389 (unsi << 3390 ext4_free_ext_path(pa << 3391 return ERR_PTR(-EFSCO << 3392 } << 3393 unwritten = ext4_ext_is_unwri << 3394 } 3360 } >> 3361 /* >> 3362 * Update path is required because previous ext4_split_extent_at() may >> 3363 * result in split of original leaf or extent zeroout. >> 3364 */ >> 3365 path = ext4_find_extent(inode, map->m_lblk, ppath, flags); >> 3366 if (IS_ERR(path)) >> 3367 return PTR_ERR(path); >> 3368 depth = ext_depth(inode); >> 3369 ex = path[depth].p_ext; >> 3370 if (!ex) { >> 3371 EXT4_ERROR_INODE(inode, "unexpected hole at %lu", >> 3372 (unsigned long) map->m_lblk); >> 3373 return -EFSCORRUPTED; >> 3374 } >> 3375 unwritten = ext4_ext_is_unwritten(ex); 3395 3376 3396 if (map->m_lblk >= ee_block) { 3377 if (map->m_lblk >= ee_block) { 3397 split_flag1 = split_flag & EX 3378 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; 3398 if (unwritten) { 3379 if (unwritten) { 3399 split_flag1 |= EXT4_E 3380 split_flag1 |= EXT4_EXT_MARK_UNWRIT1; 3400 split_flag1 |= split_ 3381 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | 3401 3382 EXT4_EXT_MARK_UNWRIT2); 3402 } 3383 } 3403 path = ext4_split_extent_at(h !! 3384 err = ext4_split_extent_at(handle, inode, ppath, 3404 map->m_lblk, 3385 map->m_lblk, split_flag1, flags); 3405 if (IS_ERR(path)) !! 3386 if (err) 3406 return path; !! 3387 goto out; 3407 } 3388 } 3408 3389 3409 if (allocated) { << 3410 if (map->m_lblk + map->m_len << 3411 *allocated = ee_len - << 3412 else << 3413 *allocated = map->m_l << 3414 } << 3415 ext4_ext_show_leaf(inode, path); 3390 ext4_ext_show_leaf(inode, path); 3416 return path; !! 3391 out: >> 3392 return err ? err : allocated; 3417 } 3393 } 3418 3394 3419 /* 3395 /* 3420 * This function is called by ext4_ext_map_bl 3396 * This function is called by ext4_ext_map_blocks() if someone tries to write 3421 * to an unwritten extent. It may result in s 3397 * to an unwritten extent. It may result in splitting the unwritten 3422 * extent into multiple extents (up to three 3398 * extent into multiple extents (up to three - one initialized and two 3423 * unwritten). 3399 * unwritten). 3424 * There are three possibilities: 3400 * There are three possibilities: 3425 * a> There is no split required: Entire ex 3401 * a> There is no split required: Entire extent should be initialized 3426 * b> Splits in two extents: Write is happe 3402 * b> Splits in two extents: Write is happening at either end of the extent 3427 * c> Splits in three extents: Somone is wr 3403 * c> Splits in three extents: Somone is writing in middle of the extent 3428 * 3404 * 3429 * Pre-conditions: 3405 * Pre-conditions: 3430 * - The extent pointed to by 'path' is unwr 3406 * - The extent pointed to by 'path' is unwritten. 3431 * - The extent pointed to by 'path' contain 3407 * - The extent pointed to by 'path' contains a superset 3432 * of the logical span [map->m_lblk, map-> 3408 * of the logical span [map->m_lblk, map->m_lblk + map->m_len). 3433 * 3409 * 3434 * Post-conditions on success: 3410 * Post-conditions on success: 3435 * - the returned value is the number of blo 3411 * - the returned value is the number of blocks beyond map->l_lblk 3436 * that are allocated and initialized. 3412 * that are allocated and initialized. 3437 * It is guaranteed to be >= map->m_len. 3413 * It is guaranteed to be >= map->m_len. 3438 */ 3414 */ 3439 static struct ext4_ext_path * !! 3415 static int ext4_ext_convert_to_initialized(handle_t *handle, 3440 ext4_ext_convert_to_initialized(handle_t *han !! 3416 struct inode *inode, 3441 struct ext4_map_block !! 3417 struct ext4_map_blocks *map, 3442 int flags, unsigned i !! 3418 struct ext4_ext_path **ppath, >> 3419 int flags) 3443 { 3420 { >> 3421 struct ext4_ext_path *path = *ppath; 3444 struct ext4_sb_info *sbi; 3422 struct ext4_sb_info *sbi; 3445 struct ext4_extent_header *eh; 3423 struct ext4_extent_header *eh; 3446 struct ext4_map_blocks split_map; 3424 struct ext4_map_blocks split_map; 3447 struct ext4_extent zero_ex1, zero_ex2 3425 struct ext4_extent zero_ex1, zero_ex2; 3448 struct ext4_extent *ex, *abut_ex; 3426 struct ext4_extent *ex, *abut_ex; 3449 ext4_lblk_t ee_block, eof_block; 3427 ext4_lblk_t ee_block, eof_block; 3450 unsigned int ee_len, depth, map_len = 3428 unsigned int ee_len, depth, map_len = map->m_len; >> 3429 int allocated = 0, max_zeroout = 0; 3451 int err = 0; 3430 int err = 0; 3452 int split_flag = EXT4_EXT_DATA_VALID2 3431 int split_flag = EXT4_EXT_DATA_VALID2; 3453 unsigned int max_zeroout = 0; << 3454 3432 3455 ext_debug(inode, "logical block %llu, 3433 ext_debug(inode, "logical block %llu, max_blocks %u\n", 3456 (unsigned long long)map->m_ 3434 (unsigned long long)map->m_lblk, map_len); 3457 3435 3458 sbi = EXT4_SB(inode->i_sb); 3436 sbi = EXT4_SB(inode->i_sb); 3459 eof_block = (EXT4_I(inode)->i_disksiz 3437 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) 3460 >> inode->i_sb->s_blo 3438 >> inode->i_sb->s_blocksize_bits; 3461 if (eof_block < map->m_lblk + map_len 3439 if (eof_block < map->m_lblk + map_len) 3462 eof_block = map->m_lblk + map 3440 eof_block = map->m_lblk + map_len; 3463 3441 3464 depth = ext_depth(inode); 3442 depth = ext_depth(inode); 3465 eh = path[depth].p_hdr; 3443 eh = path[depth].p_hdr; 3466 ex = path[depth].p_ext; 3444 ex = path[depth].p_ext; 3467 ee_block = le32_to_cpu(ex->ee_block); 3445 ee_block = le32_to_cpu(ex->ee_block); 3468 ee_len = ext4_ext_get_actual_len(ex); 3446 ee_len = ext4_ext_get_actual_len(ex); 3469 zero_ex1.ee_len = 0; 3447 zero_ex1.ee_len = 0; 3470 zero_ex2.ee_len = 0; 3448 zero_ex2.ee_len = 0; 3471 3449 3472 trace_ext4_ext_convert_to_initialized 3450 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 3473 3451 3474 /* Pre-conditions */ 3452 /* Pre-conditions */ 3475 BUG_ON(!ext4_ext_is_unwritten(ex)); 3453 BUG_ON(!ext4_ext_is_unwritten(ex)); 3476 BUG_ON(!in_range(map->m_lblk, ee_bloc 3454 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 3477 3455 3478 /* 3456 /* 3479 * Attempt to transfer newly initiali 3457 * Attempt to transfer newly initialized blocks from the currently 3480 * unwritten extent to its neighbor. 3458 * unwritten extent to its neighbor. This is much cheaper 3481 * than an insertion followed by a me 3459 * than an insertion followed by a merge as those involve costly 3482 * memmove() calls. Transferring to t 3460 * memmove() calls. Transferring to the left is the common case in 3483 * steady state for workloads doing f 3461 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE) 3484 * followed by append writes. 3462 * followed by append writes. 3485 * 3463 * 3486 * Limitations of the current logic: 3464 * Limitations of the current logic: 3487 * - L1: we do not deal with writes 3465 * - L1: we do not deal with writes covering the whole extent. 3488 * This would require removing the 3466 * This would require removing the extent if the transfer 3489 * is possible. 3467 * is possible. 3490 * - L2: we only attempt to merge wi 3468 * - L2: we only attempt to merge with an extent stored in the 3491 * same extent tree node. 3469 * same extent tree node. 3492 */ 3470 */ 3493 *allocated = 0; << 3494 if ((map->m_lblk == ee_block) && 3471 if ((map->m_lblk == ee_block) && 3495 /* See if we can merge left * 3472 /* See if we can merge left */ 3496 (map_len < ee_len) && 3473 (map_len < ee_len) && /*L1*/ 3497 (ex > EXT_FIRST_EXTENT(eh))) 3474 (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/ 3498 ext4_lblk_t prev_lblk; 3475 ext4_lblk_t prev_lblk; 3499 ext4_fsblk_t prev_pblk, ee_pb 3476 ext4_fsblk_t prev_pblk, ee_pblk; 3500 unsigned int prev_len; 3477 unsigned int prev_len; 3501 3478 3502 abut_ex = ex - 1; 3479 abut_ex = ex - 1; 3503 prev_lblk = le32_to_cpu(abut_ 3480 prev_lblk = le32_to_cpu(abut_ex->ee_block); 3504 prev_len = ext4_ext_get_actua 3481 prev_len = ext4_ext_get_actual_len(abut_ex); 3505 prev_pblk = ext4_ext_pblock(a 3482 prev_pblk = ext4_ext_pblock(abut_ex); 3506 ee_pblk = ext4_ext_pblock(ex) 3483 ee_pblk = ext4_ext_pblock(ex); 3507 3484 3508 /* 3485 /* 3509 * A transfer of blocks from 3486 * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3510 * upon those conditions: 3487 * upon those conditions: 3511 * - C1: abut_ex is initializ 3488 * - C1: abut_ex is initialized, 3512 * - C2: abut_ex is logically 3489 * - C2: abut_ex is logically abutting ex, 3513 * - C3: abut_ex is physicall 3490 * - C3: abut_ex is physically abutting ex, 3514 * - C4: abut_ex can receive 3491 * - C4: abut_ex can receive the additional blocks without 3515 * overflowing the (initial 3492 * overflowing the (initialized) length limit. 3516 */ 3493 */ 3517 if ((!ext4_ext_is_unwritten(a 3494 if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 3518 ((prev_lblk + prev_le 3495 ((prev_lblk + prev_len) == ee_block) && /*C2*/ 3519 ((prev_pblk + prev_le 3496 ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ 3520 (prev_len < (EXT_INIT 3497 (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3521 err = ext4_ext_get_ac 3498 err = ext4_ext_get_access(handle, inode, path + depth); 3522 if (err) 3499 if (err) 3523 goto errout; !! 3500 goto out; 3524 3501 3525 trace_ext4_ext_conver 3502 trace_ext4_ext_convert_to_initialized_fastpath(inode, 3526 map, ex, abut 3503 map, ex, abut_ex); 3527 3504 3528 /* Shift the start of 3505 /* Shift the start of ex by 'map_len' blocks */ 3529 ex->ee_block = cpu_to 3506 ex->ee_block = cpu_to_le32(ee_block + map_len); 3530 ext4_ext_store_pblock 3507 ext4_ext_store_pblock(ex, ee_pblk + map_len); 3531 ex->ee_len = cpu_to_l 3508 ex->ee_len = cpu_to_le16(ee_len - map_len); 3532 ext4_ext_mark_unwritt 3509 ext4_ext_mark_unwritten(ex); /* Restore the flag */ 3533 3510 3534 /* Extend abut_ex by 3511 /* Extend abut_ex by 'map_len' blocks */ 3535 abut_ex->ee_len = cpu 3512 abut_ex->ee_len = cpu_to_le16(prev_len + map_len); 3536 3513 3537 /* Result: number of 3514 /* Result: number of initialized blocks past m_lblk */ 3538 *allocated = map_len; !! 3515 allocated = map_len; 3539 } 3516 } 3540 } else if (((map->m_lblk + map_len) = 3517 } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) && 3541 (map_len < ee_len) && 3518 (map_len < ee_len) && /*L1*/ 3542 ex < EXT_LAST_EXTENT(eh)) 3519 ex < EXT_LAST_EXTENT(eh)) { /*L2*/ 3543 /* See if we can merge right 3520 /* See if we can merge right */ 3544 ext4_lblk_t next_lblk; 3521 ext4_lblk_t next_lblk; 3545 ext4_fsblk_t next_pblk, ee_pb 3522 ext4_fsblk_t next_pblk, ee_pblk; 3546 unsigned int next_len; 3523 unsigned int next_len; 3547 3524 3548 abut_ex = ex + 1; 3525 abut_ex = ex + 1; 3549 next_lblk = le32_to_cpu(abut_ 3526 next_lblk = le32_to_cpu(abut_ex->ee_block); 3550 next_len = ext4_ext_get_actua 3527 next_len = ext4_ext_get_actual_len(abut_ex); 3551 next_pblk = ext4_ext_pblock(a 3528 next_pblk = ext4_ext_pblock(abut_ex); 3552 ee_pblk = ext4_ext_pblock(ex) 3529 ee_pblk = ext4_ext_pblock(ex); 3553 3530 3554 /* 3531 /* 3555 * A transfer of blocks from 3532 * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3556 * upon those conditions: 3533 * upon those conditions: 3557 * - C1: abut_ex is initializ 3534 * - C1: abut_ex is initialized, 3558 * - C2: abut_ex is logically 3535 * - C2: abut_ex is logically abutting ex, 3559 * - C3: abut_ex is physicall 3536 * - C3: abut_ex is physically abutting ex, 3560 * - C4: abut_ex can receive 3537 * - C4: abut_ex can receive the additional blocks without 3561 * overflowing the (initial 3538 * overflowing the (initialized) length limit. 3562 */ 3539 */ 3563 if ((!ext4_ext_is_unwritten(a 3540 if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 3564 ((map->m_lblk + map_len) 3541 ((map->m_lblk + map_len) == next_lblk) && /*C2*/ 3565 ((ee_pblk + ee_len) == ne 3542 ((ee_pblk + ee_len) == next_pblk) && /*C3*/ 3566 (next_len < (EXT_INIT_MAX 3543 (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3567 err = ext4_ext_get_ac 3544 err = ext4_ext_get_access(handle, inode, path + depth); 3568 if (err) 3545 if (err) 3569 goto errout; !! 3546 goto out; 3570 3547 3571 trace_ext4_ext_conver 3548 trace_ext4_ext_convert_to_initialized_fastpath(inode, 3572 map, ex, abut 3549 map, ex, abut_ex); 3573 3550 3574 /* Shift the start of 3551 /* Shift the start of abut_ex by 'map_len' blocks */ 3575 abut_ex->ee_block = c 3552 abut_ex->ee_block = cpu_to_le32(next_lblk - map_len); 3576 ext4_ext_store_pblock 3553 ext4_ext_store_pblock(abut_ex, next_pblk - map_len); 3577 ex->ee_len = cpu_to_l 3554 ex->ee_len = cpu_to_le16(ee_len - map_len); 3578 ext4_ext_mark_unwritt 3555 ext4_ext_mark_unwritten(ex); /* Restore the flag */ 3579 3556 3580 /* Extend abut_ex by 3557 /* Extend abut_ex by 'map_len' blocks */ 3581 abut_ex->ee_len = cpu 3558 abut_ex->ee_len = cpu_to_le16(next_len + map_len); 3582 3559 3583 /* Result: number of 3560 /* Result: number of initialized blocks past m_lblk */ 3584 *allocated = map_len; !! 3561 allocated = map_len; 3585 } 3562 } 3586 } 3563 } 3587 if (*allocated) { !! 3564 if (allocated) { 3588 /* Mark the block containing 3565 /* Mark the block containing both extents as dirty */ 3589 err = ext4_ext_dirty(handle, 3566 err = ext4_ext_dirty(handle, inode, path + depth); 3590 3567 3591 /* Update path to point to th 3568 /* Update path to point to the right extent */ 3592 path[depth].p_ext = abut_ex; 3569 path[depth].p_ext = abut_ex; 3593 if (err) << 3594 goto errout; << 3595 goto out; 3570 goto out; 3596 } else 3571 } else 3597 *allocated = ee_len - (map->m !! 3572 allocated = ee_len - (map->m_lblk - ee_block); 3598 3573 3599 WARN_ON(map->m_lblk < ee_block); 3574 WARN_ON(map->m_lblk < ee_block); 3600 /* 3575 /* 3601 * It is safe to convert extent to in 3576 * It is safe to convert extent to initialized via explicit 3602 * zeroout only if extent is fully in 3577 * zeroout only if extent is fully inside i_size or new_size. 3603 */ 3578 */ 3604 split_flag |= ee_block + ee_len <= eo 3579 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 3605 3580 3606 if (EXT4_EXT_MAY_ZEROOUT & split_flag 3581 if (EXT4_EXT_MAY_ZEROOUT & split_flag) 3607 max_zeroout = sbi->s_extent_m 3582 max_zeroout = sbi->s_extent_max_zeroout_kb >> 3608 (inode->i_sb->s_block 3583 (inode->i_sb->s_blocksize_bits - 10); 3609 3584 3610 /* 3585 /* 3611 * five cases: 3586 * five cases: 3612 * 1. split the extent into three ext 3587 * 1. split the extent into three extents. 3613 * 2. split the extent into two exten 3588 * 2. split the extent into two extents, zeroout the head of the first 3614 * extent. 3589 * extent. 3615 * 3. split the extent into two exten 3590 * 3. split the extent into two extents, zeroout the tail of the second 3616 * extent. 3591 * extent. 3617 * 4. split the extent into two exten 3592 * 4. split the extent into two extents with out zeroout. 3618 * 5. no splitting needed, just possi 3593 * 5. no splitting needed, just possibly zeroout the head and / or the 3619 * tail of the extent. 3594 * tail of the extent. 3620 */ 3595 */ 3621 split_map.m_lblk = map->m_lblk; 3596 split_map.m_lblk = map->m_lblk; 3622 split_map.m_len = map->m_len; 3597 split_map.m_len = map->m_len; 3623 3598 3624 if (max_zeroout && (*allocated > spli !! 3599 if (max_zeroout && (allocated > split_map.m_len)) { 3625 if (*allocated <= max_zeroout !! 3600 if (allocated <= max_zeroout) { 3626 /* case 3 or 5 */ 3601 /* case 3 or 5 */ 3627 zero_ex1.ee_block = 3602 zero_ex1.ee_block = 3628 cpu_to_le32( 3603 cpu_to_le32(split_map.m_lblk + 3629 3604 split_map.m_len); 3630 zero_ex1.ee_len = 3605 zero_ex1.ee_len = 3631 cpu_to_le16(* !! 3606 cpu_to_le16(allocated - split_map.m_len); 3632 ext4_ext_store_pblock 3607 ext4_ext_store_pblock(&zero_ex1, 3633 ext4_ext_pblo 3608 ext4_ext_pblock(ex) + split_map.m_lblk + 3634 split_map.m_l 3609 split_map.m_len - ee_block); 3635 err = ext4_ext_zeroou 3610 err = ext4_ext_zeroout(inode, &zero_ex1); 3636 if (err) 3611 if (err) 3637 goto fallback 3612 goto fallback; 3638 split_map.m_len = *al !! 3613 split_map.m_len = allocated; 3639 } 3614 } 3640 if (split_map.m_lblk - ee_blo 3615 if (split_map.m_lblk - ee_block + split_map.m_len < 3641 3616 max_zeroout) { 3642 /* case 2 or 5 */ 3617 /* case 2 or 5 */ 3643 if (split_map.m_lblk 3618 if (split_map.m_lblk != ee_block) { 3644 zero_ex2.ee_b 3619 zero_ex2.ee_block = ex->ee_block; 3645 zero_ex2.ee_l 3620 zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk - 3646 3621 ee_block); 3647 ext4_ext_stor 3622 ext4_ext_store_pblock(&zero_ex2, 3648 3623 ext4_ext_pblock(ex)); 3649 err = ext4_ex 3624 err = ext4_ext_zeroout(inode, &zero_ex2); 3650 if (err) 3625 if (err) 3651 goto 3626 goto fallback; 3652 } 3627 } 3653 3628 3654 split_map.m_len += sp 3629 split_map.m_len += split_map.m_lblk - ee_block; 3655 split_map.m_lblk = ee 3630 split_map.m_lblk = ee_block; 3656 *allocated = map->m_l !! 3631 allocated = map->m_len; 3657 } 3632 } 3658 } 3633 } 3659 3634 3660 fallback: 3635 fallback: 3661 path = ext4_split_extent(handle, inod !! 3636 err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag, 3662 flags, NULL) !! 3637 flags); 3663 if (IS_ERR(path)) !! 3638 if (err > 0) 3664 return path; !! 3639 err = 0; 3665 out: 3640 out: 3666 /* If we have gotten a failure, don't 3641 /* If we have gotten a failure, don't zero out status tree */ 3667 ext4_zeroout_es(inode, &zero_ex1); !! 3642 if (!err) { 3668 ext4_zeroout_es(inode, &zero_ex2); !! 3643 ext4_zeroout_es(inode, &zero_ex1); 3669 return path; !! 3644 ext4_zeroout_es(inode, &zero_ex2); 3670 !! 3645 } 3671 errout: !! 3646 return err ? err : allocated; 3672 ext4_free_ext_path(path); << 3673 return ERR_PTR(err); << 3674 } 3647 } 3675 3648 3676 /* 3649 /* 3677 * This function is called by ext4_ext_map_bl 3650 * This function is called by ext4_ext_map_blocks() from 3678 * ext4_get_blocks_dio_write() when DIO to wr 3651 * ext4_get_blocks_dio_write() when DIO to write 3679 * to an unwritten extent. 3652 * to an unwritten extent. 3680 * 3653 * 3681 * Writing to an unwritten extent may result 3654 * Writing to an unwritten extent may result in splitting the unwritten 3682 * extent into multiple initialized/unwritten 3655 * extent into multiple initialized/unwritten extents (up to three) 3683 * There are three possibilities: 3656 * There are three possibilities: 3684 * a> There is no split required: Entire ex 3657 * a> There is no split required: Entire extent should be unwritten 3685 * b> Splits in two extents: Write is happe 3658 * b> Splits in two extents: Write is happening at either end of the extent 3686 * c> Splits in three extents: Somone is wr 3659 * c> Splits in three extents: Somone is writing in middle of the extent 3687 * 3660 * 3688 * This works the same way in the case of ini 3661 * This works the same way in the case of initialized -> unwritten conversion. 3689 * 3662 * 3690 * One of more index blocks maybe needed if t 3663 * One of more index blocks maybe needed if the extent tree grow after 3691 * the unwritten extent split. To prevent ENO 3664 * the unwritten extent split. To prevent ENOSPC occur at the IO 3692 * complete, we need to split the unwritten e 3665 * complete, we need to split the unwritten extent before DIO submit 3693 * the IO. The unwritten extent called at thi 3666 * the IO. The unwritten extent called at this time will be split 3694 * into three unwritten extent(at most). Afte 3667 * into three unwritten extent(at most). After IO complete, the part 3695 * being filled will be convert to initialize 3668 * being filled will be convert to initialized by the end_io callback function 3696 * via ext4_convert_unwritten_extents(). 3669 * via ext4_convert_unwritten_extents(). 3697 * 3670 * 3698 * The size of unwritten extent to be written !! 3671 * Returns the size of unwritten extent to be written on success. 3699 * allocated pointer. Return an extent path p << 3700 * pointer on failure. << 3701 */ 3672 */ 3702 static struct ext4_ext_path *ext4_split_conve !! 3673 static int ext4_split_convert_extents(handle_t *handle, 3703 struc 3674 struct inode *inode, 3704 struc 3675 struct ext4_map_blocks *map, 3705 struc !! 3676 struct ext4_ext_path **ppath, 3706 int f !! 3677 int flags) 3707 { 3678 { >> 3679 struct ext4_ext_path *path = *ppath; 3708 ext4_lblk_t eof_block; 3680 ext4_lblk_t eof_block; 3709 ext4_lblk_t ee_block; 3681 ext4_lblk_t ee_block; 3710 struct ext4_extent *ex; 3682 struct ext4_extent *ex; 3711 unsigned int ee_len; 3683 unsigned int ee_len; 3712 int split_flag = 0, depth; 3684 int split_flag = 0, depth; 3713 3685 3714 ext_debug(inode, "logical block %llu, 3686 ext_debug(inode, "logical block %llu, max_blocks %u\n", 3715 (unsigned long long)map->m_ 3687 (unsigned long long)map->m_lblk, map->m_len); 3716 3688 3717 eof_block = (EXT4_I(inode)->i_disksiz 3689 eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) 3718 >> inode->i_sb->s_blo 3690 >> inode->i_sb->s_blocksize_bits; 3719 if (eof_block < map->m_lblk + map->m_ 3691 if (eof_block < map->m_lblk + map->m_len) 3720 eof_block = map->m_lblk + map 3692 eof_block = map->m_lblk + map->m_len; 3721 /* 3693 /* 3722 * It is safe to convert extent to in 3694 * It is safe to convert extent to initialized via explicit 3723 * zeroout only if extent is fully in 3695 * zeroout only if extent is fully inside i_size or new_size. 3724 */ 3696 */ 3725 depth = ext_depth(inode); 3697 depth = ext_depth(inode); 3726 ex = path[depth].p_ext; 3698 ex = path[depth].p_ext; 3727 ee_block = le32_to_cpu(ex->ee_block); 3699 ee_block = le32_to_cpu(ex->ee_block); 3728 ee_len = ext4_ext_get_actual_len(ex); 3700 ee_len = ext4_ext_get_actual_len(ex); 3729 3701 3730 /* Convert to unwritten */ 3702 /* Convert to unwritten */ 3731 if (flags & EXT4_GET_BLOCKS_CONVERT_U 3703 if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) { 3732 split_flag |= EXT4_EXT_DATA_V 3704 split_flag |= EXT4_EXT_DATA_VALID1; 3733 /* Convert to initialized */ 3705 /* Convert to initialized */ 3734 } else if (flags & EXT4_GET_BLOCKS_CO 3706 } else if (flags & EXT4_GET_BLOCKS_CONVERT) { 3735 split_flag |= ee_block + ee_l 3707 split_flag |= ee_block + ee_len <= eof_block ? 3736 EXT4_EXT_MAY_ZE 3708 EXT4_EXT_MAY_ZEROOUT : 0; 3737 split_flag |= (EXT4_EXT_MARK_ 3709 split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2); 3738 } 3710 } 3739 flags |= EXT4_GET_BLOCKS_PRE_IO; 3711 flags |= EXT4_GET_BLOCKS_PRE_IO; 3740 return ext4_split_extent(handle, inod !! 3712 return ext4_split_extent(handle, inode, ppath, map, split_flag, flags); 3741 allocated); << 3742 } 3713 } 3743 3714 3744 static struct ext4_ext_path * !! 3715 static int ext4_convert_unwritten_extents_endio(handle_t *handle, 3745 ext4_convert_unwritten_extents_endio(handle_t !! 3716 struct inode *inode, 3746 struct e !! 3717 struct ext4_map_blocks *map, 3747 struct e !! 3718 struct ext4_ext_path **ppath) 3748 { 3719 { >> 3720 struct ext4_ext_path *path = *ppath; 3749 struct ext4_extent *ex; 3721 struct ext4_extent *ex; 3750 ext4_lblk_t ee_block; 3722 ext4_lblk_t ee_block; 3751 unsigned int ee_len; 3723 unsigned int ee_len; 3752 int depth; 3724 int depth; 3753 int err = 0; 3725 int err = 0; 3754 3726 3755 depth = ext_depth(inode); 3727 depth = ext_depth(inode); 3756 ex = path[depth].p_ext; 3728 ex = path[depth].p_ext; 3757 ee_block = le32_to_cpu(ex->ee_block); 3729 ee_block = le32_to_cpu(ex->ee_block); 3758 ee_len = ext4_ext_get_actual_len(ex); 3730 ee_len = ext4_ext_get_actual_len(ex); 3759 3731 3760 ext_debug(inode, "logical block %llu, 3732 ext_debug(inode, "logical block %llu, max_blocks %u\n", 3761 (unsigned long long)ee_bloc 3733 (unsigned long long)ee_block, ee_len); 3762 3734 3763 /* If extent is larger than requested 3735 /* If extent is larger than requested it is a clear sign that we still 3764 * have some extent state machine iss 3736 * have some extent state machine issues left. So extent_split is still 3765 * required. 3737 * required. 3766 * TODO: Once all related issues will 3738 * TODO: Once all related issues will be fixed this situation should be 3767 * illegal. 3739 * illegal. 3768 */ 3740 */ 3769 if (ee_block != map->m_lblk || ee_len 3741 if (ee_block != map->m_lblk || ee_len > map->m_len) { 3770 #ifdef CONFIG_EXT4_DEBUG 3742 #ifdef CONFIG_EXT4_DEBUG 3771 ext4_warning(inode->i_sb, "In 3743 ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu," 3772 " len %u; IO log 3744 " len %u; IO logical block %llu, len %u", 3773 inode->i_ino, (u 3745 inode->i_ino, (unsigned long long)ee_block, ee_len, 3774 (unsigned long l 3746 (unsigned long long)map->m_lblk, map->m_len); 3775 #endif 3747 #endif 3776 path = ext4_split_convert_ext !! 3748 err = ext4_split_convert_extents(handle, inode, map, ppath, 3777 !! 3749 EXT4_GET_BLOCKS_CONVERT); 3778 if (IS_ERR(path)) !! 3750 if (err < 0) 3779 return path; !! 3751 return err; 3780 !! 3752 path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3781 path = ext4_find_extent(inode << 3782 if (IS_ERR(path)) 3753 if (IS_ERR(path)) 3783 return path; !! 3754 return PTR_ERR(path); 3784 depth = ext_depth(inode); 3755 depth = ext_depth(inode); 3785 ex = path[depth].p_ext; 3756 ex = path[depth].p_ext; 3786 } 3757 } 3787 3758 3788 err = ext4_ext_get_access(handle, ino 3759 err = ext4_ext_get_access(handle, inode, path + depth); 3789 if (err) 3760 if (err) 3790 goto errout; !! 3761 goto out; 3791 /* first mark the extent as initializ 3762 /* first mark the extent as initialized */ 3792 ext4_ext_mark_initialized(ex); 3763 ext4_ext_mark_initialized(ex); 3793 3764 3794 /* note: ext4_ext_correct_indexes() i 3765 /* note: ext4_ext_correct_indexes() isn't needed here because 3795 * borders are not changed 3766 * borders are not changed 3796 */ 3767 */ 3797 ext4_ext_try_to_merge(handle, inode, 3768 ext4_ext_try_to_merge(handle, inode, path, ex); 3798 3769 3799 /* Mark modified extent as dirty */ 3770 /* Mark modified extent as dirty */ 3800 err = ext4_ext_dirty(handle, inode, p 3771 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3801 if (err) !! 3772 out: 3802 goto errout; << 3803 << 3804 ext4_ext_show_leaf(inode, path); 3773 ext4_ext_show_leaf(inode, path); 3805 return path; !! 3774 return err; 3806 << 3807 errout: << 3808 ext4_free_ext_path(path); << 3809 return ERR_PTR(err); << 3810 } 3775 } 3811 3776 3812 static struct ext4_ext_path * !! 3777 static int 3813 convert_initialized_extent(handle_t *handle, 3778 convert_initialized_extent(handle_t *handle, struct inode *inode, 3814 struct ext4_map_bl 3779 struct ext4_map_blocks *map, 3815 struct ext4_ext_pa !! 3780 struct ext4_ext_path **ppath, 3816 unsigned int *allo 3781 unsigned int *allocated) 3817 { 3782 { >> 3783 struct ext4_ext_path *path = *ppath; 3818 struct ext4_extent *ex; 3784 struct ext4_extent *ex; 3819 ext4_lblk_t ee_block; 3785 ext4_lblk_t ee_block; 3820 unsigned int ee_len; 3786 unsigned int ee_len; 3821 int depth; 3787 int depth; 3822 int err = 0; 3788 int err = 0; 3823 3789 3824 /* 3790 /* 3825 * Make sure that the extent is no bi 3791 * Make sure that the extent is no bigger than we support with 3826 * unwritten extent 3792 * unwritten extent 3827 */ 3793 */ 3828 if (map->m_len > EXT_UNWRITTEN_MAX_LE 3794 if (map->m_len > EXT_UNWRITTEN_MAX_LEN) 3829 map->m_len = EXT_UNWRITTEN_MA 3795 map->m_len = EXT_UNWRITTEN_MAX_LEN / 2; 3830 3796 3831 depth = ext_depth(inode); 3797 depth = ext_depth(inode); 3832 ex = path[depth].p_ext; 3798 ex = path[depth].p_ext; 3833 ee_block = le32_to_cpu(ex->ee_block); 3799 ee_block = le32_to_cpu(ex->ee_block); 3834 ee_len = ext4_ext_get_actual_len(ex); 3800 ee_len = ext4_ext_get_actual_len(ex); 3835 3801 3836 ext_debug(inode, "logical block %llu, 3802 ext_debug(inode, "logical block %llu, max_blocks %u\n", 3837 (unsigned long long)ee_bloc 3803 (unsigned long long)ee_block, ee_len); 3838 3804 3839 if (ee_block != map->m_lblk || ee_len 3805 if (ee_block != map->m_lblk || ee_len > map->m_len) { 3840 path = ext4_split_convert_ext !! 3806 err = ext4_split_convert_extents(handle, inode, map, ppath, 3841 EXT4_GET_BLOC !! 3807 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN); 3842 if (IS_ERR(path)) !! 3808 if (err < 0) 3843 return path; !! 3809 return err; 3844 !! 3810 path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3845 path = ext4_find_extent(inode << 3846 if (IS_ERR(path)) 3811 if (IS_ERR(path)) 3847 return path; !! 3812 return PTR_ERR(path); 3848 depth = ext_depth(inode); 3813 depth = ext_depth(inode); 3849 ex = path[depth].p_ext; 3814 ex = path[depth].p_ext; 3850 if (!ex) { 3815 if (!ex) { 3851 EXT4_ERROR_INODE(inod 3816 EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3852 (uns 3817 (unsigned long) map->m_lblk); 3853 err = -EFSCORRUPTED; !! 3818 return -EFSCORRUPTED; 3854 goto errout; << 3855 } 3819 } 3856 } 3820 } 3857 3821 3858 err = ext4_ext_get_access(handle, ino 3822 err = ext4_ext_get_access(handle, inode, path + depth); 3859 if (err) 3823 if (err) 3860 goto errout; !! 3824 return err; 3861 /* first mark the extent as unwritten 3825 /* first mark the extent as unwritten */ 3862 ext4_ext_mark_unwritten(ex); 3826 ext4_ext_mark_unwritten(ex); 3863 3827 3864 /* note: ext4_ext_correct_indexes() i 3828 /* note: ext4_ext_correct_indexes() isn't needed here because 3865 * borders are not changed 3829 * borders are not changed 3866 */ 3830 */ 3867 ext4_ext_try_to_merge(handle, inode, 3831 ext4_ext_try_to_merge(handle, inode, path, ex); 3868 3832 3869 /* Mark modified extent as dirty */ 3833 /* Mark modified extent as dirty */ 3870 err = ext4_ext_dirty(handle, inode, p 3834 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3871 if (err) 3835 if (err) 3872 goto errout; !! 3836 return err; 3873 ext4_ext_show_leaf(inode, path); 3837 ext4_ext_show_leaf(inode, path); 3874 3838 3875 ext4_update_inode_fsync_trans(handle, 3839 ext4_update_inode_fsync_trans(handle, inode, 1); 3876 3840 3877 map->m_flags |= EXT4_MAP_UNWRITTEN; 3841 map->m_flags |= EXT4_MAP_UNWRITTEN; 3878 if (*allocated > map->m_len) 3842 if (*allocated > map->m_len) 3879 *allocated = map->m_len; 3843 *allocated = map->m_len; 3880 map->m_len = *allocated; 3844 map->m_len = *allocated; 3881 return path; !! 3845 return 0; 3882 << 3883 errout: << 3884 ext4_free_ext_path(path); << 3885 return ERR_PTR(err); << 3886 } 3846 } 3887 3847 3888 static struct ext4_ext_path * !! 3848 static int 3889 ext4_ext_handle_unwritten_extents(handle_t *h 3849 ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, 3890 struct ext4_map_block 3850 struct ext4_map_blocks *map, 3891 struct ext4_ext_path !! 3851 struct ext4_ext_path **ppath, int flags, 3892 unsigned int *allocat !! 3852 unsigned int allocated, ext4_fsblk_t newblock) 3893 { 3853 { >> 3854 struct ext4_ext_path __maybe_unused *path = *ppath; >> 3855 int ret = 0; 3894 int err = 0; 3856 int err = 0; 3895 3857 3896 ext_debug(inode, "logical block %llu, 3858 ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n", 3897 (unsigned long long)map->m_ 3859 (unsigned long long)map->m_lblk, map->m_len, flags, 3898 *allocated); !! 3860 allocated); 3899 ext4_ext_show_leaf(inode, path); 3861 ext4_ext_show_leaf(inode, path); 3900 3862 3901 /* 3863 /* 3902 * When writing into unwritten space, 3864 * When writing into unwritten space, we should not fail to 3903 * allocate metadata blocks for the n 3865 * allocate metadata blocks for the new extent block if needed. 3904 */ 3866 */ 3905 flags |= EXT4_GET_BLOCKS_METADATA_NOF 3867 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL; 3906 3868 3907 trace_ext4_ext_handle_unwritten_exten 3869 trace_ext4_ext_handle_unwritten_extents(inode, map, flags, 3908 !! 3870 allocated, newblock); 3909 3871 3910 /* get_block() before submitting IO, 3872 /* get_block() before submitting IO, split the extent */ 3911 if (flags & EXT4_GET_BLOCKS_PRE_IO) { 3873 if (flags & EXT4_GET_BLOCKS_PRE_IO) { 3912 path = ext4_split_convert_ext !! 3874 ret = ext4_split_convert_extents(handle, inode, map, ppath, 3913 flags | EXT4_ !! 3875 flags | EXT4_GET_BLOCKS_CONVERT); 3914 if (IS_ERR(path)) !! 3876 if (ret < 0) { 3915 return path; !! 3877 err = ret; >> 3878 goto out2; >> 3879 } 3916 /* 3880 /* 3917 * shouldn't get a 0 allocate !! 3881 * shouldn't get a 0 return when splitting an extent unless 3918 * m_len is 0 (bug) or extent 3882 * m_len is 0 (bug) or extent has been corrupted 3919 */ 3883 */ 3920 if (unlikely(*allocated == 0) !! 3884 if (unlikely(ret == 0)) { 3921 EXT4_ERROR_INODE(inod 3885 EXT4_ERROR_INODE(inode, 3922 "une !! 3886 "unexpected ret == 0, m_len = %u", 3923 map- 3887 map->m_len); 3924 err = -EFSCORRUPTED; 3888 err = -EFSCORRUPTED; 3925 goto errout; !! 3889 goto out2; 3926 } 3890 } 3927 map->m_flags |= EXT4_MAP_UNWR 3891 map->m_flags |= EXT4_MAP_UNWRITTEN; 3928 goto out; 3892 goto out; 3929 } 3893 } 3930 /* IO end_io complete, convert the fi 3894 /* IO end_io complete, convert the filled extent to written */ 3931 if (flags & EXT4_GET_BLOCKS_CONVERT) 3895 if (flags & EXT4_GET_BLOCKS_CONVERT) { 3932 path = ext4_convert_unwritten !! 3896 err = ext4_convert_unwritten_extents_endio(handle, inode, map, 3933 !! 3897 ppath); 3934 if (IS_ERR(path)) !! 3898 if (err < 0) 3935 return path; !! 3899 goto out2; 3936 ext4_update_inode_fsync_trans 3900 ext4_update_inode_fsync_trans(handle, inode, 1); 3937 goto map_out; 3901 goto map_out; 3938 } 3902 } 3939 /* buffered IO cases */ 3903 /* buffered IO cases */ 3940 /* 3904 /* 3941 * repeat fallocate creation request 3905 * repeat fallocate creation request 3942 * we already have an unwritten exten 3906 * we already have an unwritten extent 3943 */ 3907 */ 3944 if (flags & EXT4_GET_BLOCKS_UNWRIT_EX 3908 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { 3945 map->m_flags |= EXT4_MAP_UNWR 3909 map->m_flags |= EXT4_MAP_UNWRITTEN; 3946 goto map_out; 3910 goto map_out; 3947 } 3911 } 3948 3912 3949 /* buffered READ or buffered write_be 3913 /* buffered READ or buffered write_begin() lookup */ 3950 if ((flags & EXT4_GET_BLOCKS_CREATE) 3914 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3951 /* 3915 /* 3952 * We have blocks reserved al 3916 * We have blocks reserved already. We 3953 * return allocated blocks so 3917 * return allocated blocks so that delalloc 3954 * won't do block reservation 3918 * won't do block reservation for us. But 3955 * the buffer head will be un 3919 * the buffer head will be unmapped so that 3956 * a read from the block retu 3920 * a read from the block returns 0s. 3957 */ 3921 */ 3958 map->m_flags |= EXT4_MAP_UNWR 3922 map->m_flags |= EXT4_MAP_UNWRITTEN; 3959 goto out1; 3923 goto out1; 3960 } 3924 } 3961 3925 3962 /* 3926 /* 3963 * Default case when (flags & EXT4_GE 3927 * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1. 3964 * For buffered writes, at writepage 3928 * For buffered writes, at writepage time, etc. Convert a 3965 * discovered unwritten extent to wri 3929 * discovered unwritten extent to written. 3966 */ 3930 */ 3967 path = ext4_ext_convert_to_initialize !! 3931 ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags); 3968 !! 3932 if (ret < 0) { 3969 if (IS_ERR(path)) !! 3933 err = ret; 3970 return path; !! 3934 goto out2; >> 3935 } 3971 ext4_update_inode_fsync_trans(handle, 3936 ext4_update_inode_fsync_trans(handle, inode, 1); 3972 /* 3937 /* 3973 * shouldn't get a 0 allocated when c !! 3938 * shouldn't get a 0 return when converting an unwritten extent 3974 * unless m_len is 0 (bug) or extent 3939 * unless m_len is 0 (bug) or extent has been corrupted 3975 */ 3940 */ 3976 if (unlikely(*allocated == 0)) { !! 3941 if (unlikely(ret == 0)) { 3977 EXT4_ERROR_INODE(inode, "unex !! 3942 EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u", 3978 map->m_len); 3943 map->m_len); 3979 err = -EFSCORRUPTED; 3944 err = -EFSCORRUPTED; 3980 goto errout; !! 3945 goto out2; 3981 } 3946 } 3982 3947 3983 out: 3948 out: >> 3949 allocated = ret; 3984 map->m_flags |= EXT4_MAP_NEW; 3950 map->m_flags |= EXT4_MAP_NEW; 3985 map_out: 3951 map_out: 3986 map->m_flags |= EXT4_MAP_MAPPED; 3952 map->m_flags |= EXT4_MAP_MAPPED; 3987 out1: 3953 out1: 3988 map->m_pblk = newblock; 3954 map->m_pblk = newblock; 3989 if (*allocated > map->m_len) !! 3955 if (allocated > map->m_len) 3990 *allocated = map->m_len; !! 3956 allocated = map->m_len; 3991 map->m_len = *allocated; !! 3957 map->m_len = allocated; 3992 ext4_ext_show_leaf(inode, path); 3958 ext4_ext_show_leaf(inode, path); 3993 return path; !! 3959 out2: 3994 !! 3960 return err ? err : allocated; 3995 errout: << 3996 ext4_free_ext_path(path); << 3997 return ERR_PTR(err); << 3998 } 3961 } 3999 3962 4000 /* 3963 /* 4001 * get_implied_cluster_alloc - check to see i 3964 * get_implied_cluster_alloc - check to see if the requested 4002 * allocation (in the map structure) overlaps 3965 * allocation (in the map structure) overlaps with a cluster already 4003 * allocated in an extent. 3966 * allocated in an extent. 4004 * @sb The filesystem superblock str 3967 * @sb The filesystem superblock structure 4005 * @map The requested lblk->pblk mapp 3968 * @map The requested lblk->pblk mapping 4006 * @ex The extent structure which mi 3969 * @ex The extent structure which might contain an implied 4007 * cluster allocation 3970 * cluster allocation 4008 * 3971 * 4009 * This function is called by ext4_ext_map_bl 3972 * This function is called by ext4_ext_map_blocks() after we failed to 4010 * find blocks that were already in the inode 3973 * find blocks that were already in the inode's extent tree. Hence, 4011 * we know that the beginning of the requeste 3974 * we know that the beginning of the requested region cannot overlap 4012 * the extent from the inode's extent tree. 3975 * the extent from the inode's extent tree. There are three cases we 4013 * want to catch. The first is this case: 3976 * want to catch. The first is this case: 4014 * 3977 * 4015 * |--- cluster # N--| 3978 * |--- cluster # N--| 4016 * |--- extent ---| |---- requested regio 3979 * |--- extent ---| |---- requested region ---| 4017 * |==========| 3980 * |==========| 4018 * 3981 * 4019 * The second case that we need to test for i 3982 * The second case that we need to test for is this one: 4020 * 3983 * 4021 * |--------- cluster # N ----------------| 3984 * |--------- cluster # N ----------------| 4022 * |--- requested region --| |----- 3985 * |--- requested region --| |------- extent ----| 4023 * |=======================| 3986 * |=======================| 4024 * 3987 * 4025 * The third case is when the requested regio 3988 * The third case is when the requested region lies between two extents 4026 * within the same cluster: 3989 * within the same cluster: 4027 * |------------- cluster # N------- 3990 * |------------- cluster # N-------------| 4028 * |----- ex -----| |---- ex 3991 * |----- ex -----| |---- ex_right ----| 4029 * |------ requested region 3992 * |------ requested region ------| 4030 * |================| 3993 * |================| 4031 * 3994 * 4032 * In each of the above cases, we need to set 3995 * In each of the above cases, we need to set the map->m_pblk and 4033 * map->m_len so it corresponds to the return 3996 * map->m_len so it corresponds to the return the extent labelled as 4034 * "|====|" from cluster #N, since it is alre 3997 * "|====|" from cluster #N, since it is already in use for data in 4035 * cluster EXT4_B2C(sbi, map->m_lblk). We wi 3998 * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to 4036 * signal to ext4_ext_map_blocks() that map-> 3999 * signal to ext4_ext_map_blocks() that map->m_pblk should be treated 4037 * as a new "allocated" block region. Otherw 4000 * as a new "allocated" block region. Otherwise, we will return 0 and 4038 * ext4_ext_map_blocks() will then allocate o 4001 * ext4_ext_map_blocks() will then allocate one or more new clusters 4039 * by calling ext4_mb_new_blocks(). 4002 * by calling ext4_mb_new_blocks(). 4040 */ 4003 */ 4041 static int get_implied_cluster_alloc(struct s 4004 static int get_implied_cluster_alloc(struct super_block *sb, 4042 struct e 4005 struct ext4_map_blocks *map, 4043 struct e 4006 struct ext4_extent *ex, 4044 struct e 4007 struct ext4_ext_path *path) 4045 { 4008 { 4046 struct ext4_sb_info *sbi = EXT4_SB(sb 4009 struct ext4_sb_info *sbi = EXT4_SB(sb); 4047 ext4_lblk_t c_offset = EXT4_LBLK_COFF 4010 ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4048 ext4_lblk_t ex_cluster_start, ex_clus 4011 ext4_lblk_t ex_cluster_start, ex_cluster_end; 4049 ext4_lblk_t rr_cluster_start; 4012 ext4_lblk_t rr_cluster_start; 4050 ext4_lblk_t ee_block = le32_to_cpu(ex 4013 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4051 ext4_fsblk_t ee_start = ext4_ext_pblo 4014 ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4052 unsigned short ee_len = ext4_ext_get_ 4015 unsigned short ee_len = ext4_ext_get_actual_len(ex); 4053 4016 4054 /* The extent passed in that we are t 4017 /* The extent passed in that we are trying to match */ 4055 ex_cluster_start = EXT4_B2C(sbi, ee_b 4018 ex_cluster_start = EXT4_B2C(sbi, ee_block); 4056 ex_cluster_end = EXT4_B2C(sbi, ee_blo 4019 ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); 4057 4020 4058 /* The requested region passed into e 4021 /* The requested region passed into ext4_map_blocks() */ 4059 rr_cluster_start = EXT4_B2C(sbi, map- 4022 rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); 4060 4023 4061 if ((rr_cluster_start == ex_cluster_e 4024 if ((rr_cluster_start == ex_cluster_end) || 4062 (rr_cluster_start == ex_cluster_s 4025 (rr_cluster_start == ex_cluster_start)) { 4063 if (rr_cluster_start == ex_cl 4026 if (rr_cluster_start == ex_cluster_end) 4064 ee_start += ee_len - 4027 ee_start += ee_len - 1; 4065 map->m_pblk = EXT4_PBLK_CMASK 4028 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; 4066 map->m_len = min(map->m_len, 4029 map->m_len = min(map->m_len, 4067 (unsigned) s 4030 (unsigned) sbi->s_cluster_ratio - c_offset); 4068 /* 4031 /* 4069 * Check for and handle this 4032 * Check for and handle this case: 4070 * 4033 * 4071 * |--------- cluster # N-- 4034 * |--------- cluster # N-------------| 4072 * |----- 4035 * |------- extent ----| 4073 * |--- requested reg 4036 * |--- requested region ---| 4074 * |===========| 4037 * |===========| 4075 */ 4038 */ 4076 4039 4077 if (map->m_lblk < ee_block) 4040 if (map->m_lblk < ee_block) 4078 map->m_len = min(map- 4041 map->m_len = min(map->m_len, ee_block - map->m_lblk); 4079 4042 4080 /* 4043 /* 4081 * Check for the case where t 4044 * Check for the case where there is already another allocated 4082 * block to the right of 'ex' 4045 * block to the right of 'ex' but before the end of the cluster. 4083 * 4046 * 4084 * |------------- cl 4047 * |------------- cluster # N-------------| 4085 * |----- ex -----| 4048 * |----- ex -----| |---- ex_right ----| 4086 * |------ r 4049 * |------ requested region ------| 4087 * |======== 4050 * |================| 4088 */ 4051 */ 4089 if (map->m_lblk > ee_block) { 4052 if (map->m_lblk > ee_block) { 4090 ext4_lblk_t next = ex 4053 ext4_lblk_t next = ext4_ext_next_allocated_block(path); 4091 map->m_len = min(map- 4054 map->m_len = min(map->m_len, next - map->m_lblk); 4092 } 4055 } 4093 4056 4094 trace_ext4_get_implied_cluste 4057 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); 4095 return 1; 4058 return 1; 4096 } 4059 } 4097 4060 4098 trace_ext4_get_implied_cluster_alloc_ 4061 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); 4099 return 0; 4062 return 0; 4100 } 4063 } 4101 4064 4102 /* << 4103 * Determine hole length around the given log << 4104 * locate and expand the hole from the given << 4105 * if it's partially or completely converted << 4106 * it into the extent cache tree if it's inde << 4107 * the length of the determined extent. << 4108 */ << 4109 static ext4_lblk_t ext4_ext_determine_insert_ << 4110 << 4111 << 4112 { << 4113 ext4_lblk_t hole_start, len; << 4114 struct extent_status es; << 4115 << 4116 hole_start = lblk; << 4117 len = ext4_ext_find_hole(inode, path, << 4118 again: << 4119 ext4_es_find_extent_range(inode, &ext << 4120 hole_start << 4121 if (!es.es_len) << 4122 goto insert_hole; << 4123 << 4124 /* << 4125 * There's a delalloc extent in the h << 4126 * extent is in front of, behind and << 4127 */ << 4128 if (lblk >= es.es_lblk + es.es_len) { << 4129 /* << 4130 * The delalloc extent is in << 4131 * find again from the querie << 4132 */ << 4133 len -= lblk - hole_start; << 4134 hole_start = lblk; << 4135 goto again; << 4136 } else if (in_range(lblk, es.es_lblk, << 4137 /* << 4138 * The delalloc extent contai << 4139 * added after ext4_map_block << 4140 * tree so we are not holding << 4141 * only stabilized by i_data_ << 4142 * soon. Don't modify the ext << 4143 * extent as a hole, just adj << 4144 * extent's after lblk. << 4145 */ << 4146 len = es.es_lblk + es.es_len << 4147 return len; << 4148 } else { << 4149 /* << 4150 * The delalloc extent is par << 4151 * the queried range, update << 4152 * beginning of the delalloc << 4153 */ << 4154 len = min(es.es_lblk - hole_s << 4155 } << 4156 << 4157 insert_hole: << 4158 /* Put just found gap into cache to s << 4159 ext_debug(inode, " -> %u:%u\n", hole_ << 4160 ext4_es_insert_extent(inode, hole_sta << 4161 EXTENT_STATUS_H << 4162 << 4163 /* Update hole_len to reflect hole si << 4164 if (hole_start != lblk) << 4165 len -= lblk - hole_start; << 4166 << 4167 return len; << 4168 } << 4169 4065 4170 /* 4066 /* 4171 * Block allocation/map/preallocation routine 4067 * Block allocation/map/preallocation routine for extents based files 4172 * 4068 * 4173 * 4069 * 4174 * Need to be called with 4070 * Need to be called with 4175 * down_read(&EXT4_I(inode)->i_data_sem) if n 4071 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 4176 * (ie, flags is zero). Otherwise down_write( !! 4072 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 4177 * 4073 * 4178 * return > 0, number of blocks already mappe 4074 * return > 0, number of blocks already mapped/allocated 4179 * if flags doesn't contain EXT4_GET !! 4075 * if create == 0 and these are pre-allocated blocks 4180 * buffer head is unmapped 4076 * buffer head is unmapped 4181 * otherwise blocks are mapped 4077 * otherwise blocks are mapped 4182 * 4078 * 4183 * return = 0, if plain look up failed (block 4079 * return = 0, if plain look up failed (blocks have not been allocated) 4184 * buffer head is unmapped 4080 * buffer head is unmapped 4185 * 4081 * 4186 * return < 0, error case. 4082 * return < 0, error case. 4187 */ 4083 */ 4188 int ext4_ext_map_blocks(handle_t *handle, str 4084 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 4189 struct ext4_map_block 4085 struct ext4_map_blocks *map, int flags) 4190 { 4086 { 4191 struct ext4_ext_path *path = NULL; 4087 struct ext4_ext_path *path = NULL; 4192 struct ext4_extent newex, *ex, ex2; 4088 struct ext4_extent newex, *ex, ex2; 4193 struct ext4_sb_info *sbi = EXT4_SB(in 4089 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4194 ext4_fsblk_t newblock = 0, pblk; 4090 ext4_fsblk_t newblock = 0, pblk; 4195 int err = 0, depth; !! 4091 int err = 0, depth, ret; 4196 unsigned int allocated = 0, offset = 4092 unsigned int allocated = 0, offset = 0; 4197 unsigned int allocated_clusters = 0; 4093 unsigned int allocated_clusters = 0; 4198 struct ext4_allocation_request ar; 4094 struct ext4_allocation_request ar; 4199 ext4_lblk_t cluster_offset; 4095 ext4_lblk_t cluster_offset; 4200 4096 4201 ext_debug(inode, "blocks %u/%u reques 4097 ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len); 4202 trace_ext4_ext_map_blocks_enter(inode 4098 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 4203 4099 4204 /* find extent for this block */ 4100 /* find extent for this block */ 4205 path = ext4_find_extent(inode, map->m 4101 path = ext4_find_extent(inode, map->m_lblk, NULL, 0); 4206 if (IS_ERR(path)) { 4102 if (IS_ERR(path)) { 4207 err = PTR_ERR(path); 4103 err = PTR_ERR(path); >> 4104 path = NULL; 4208 goto out; 4105 goto out; 4209 } 4106 } 4210 4107 4211 depth = ext_depth(inode); 4108 depth = ext_depth(inode); 4212 4109 4213 /* 4110 /* 4214 * consistent leaf must not be empty; 4111 * consistent leaf must not be empty; 4215 * this situation is possible, though 4112 * this situation is possible, though, _during_ tree modification; 4216 * this is why assert can't be put in 4113 * this is why assert can't be put in ext4_find_extent() 4217 */ 4114 */ 4218 if (unlikely(path[depth].p_ext == NUL 4115 if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 4219 EXT4_ERROR_INODE(inode, "bad 4116 EXT4_ERROR_INODE(inode, "bad extent address " 4220 "lblock: %lu 4117 "lblock: %lu, depth: %d pblock %lld", 4221 (unsigned lo 4118 (unsigned long) map->m_lblk, depth, 4222 path[depth]. 4119 path[depth].p_block); 4223 err = -EFSCORRUPTED; 4120 err = -EFSCORRUPTED; 4224 goto out; 4121 goto out; 4225 } 4122 } 4226 4123 4227 ex = path[depth].p_ext; 4124 ex = path[depth].p_ext; 4228 if (ex) { 4125 if (ex) { 4229 ext4_lblk_t ee_block = le32_t 4126 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4230 ext4_fsblk_t ee_start = ext4_ 4127 ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4231 unsigned short ee_len; 4128 unsigned short ee_len; 4232 4129 4233 4130 4234 /* 4131 /* 4235 * unwritten extents are trea 4132 * unwritten extents are treated as holes, except that 4236 * we split out initialized p 4133 * we split out initialized portions during a write. 4237 */ 4134 */ 4238 ee_len = ext4_ext_get_actual_ 4135 ee_len = ext4_ext_get_actual_len(ex); 4239 4136 4240 trace_ext4_ext_show_extent(in 4137 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); 4241 4138 4242 /* if found extent covers blo 4139 /* if found extent covers block, simply return it */ 4243 if (in_range(map->m_lblk, ee_ 4140 if (in_range(map->m_lblk, ee_block, ee_len)) { 4244 newblock = map->m_lbl 4141 newblock = map->m_lblk - ee_block + ee_start; 4245 /* number of remainin 4142 /* number of remaining blocks in the extent */ 4246 allocated = ee_len - 4143 allocated = ee_len - (map->m_lblk - ee_block); 4247 ext_debug(inode, "%u 4144 ext_debug(inode, "%u fit into %u:%d -> %llu\n", 4248 map->m_lblk 4145 map->m_lblk, ee_block, ee_len, newblock); 4249 4146 4250 /* 4147 /* 4251 * If the extent is i 4148 * If the extent is initialized check whether the 4252 * caller wants to co 4149 * caller wants to convert it to unwritten. 4253 */ 4150 */ 4254 if ((!ext4_ext_is_unw 4151 if ((!ext4_ext_is_unwritten(ex)) && 4255 (flags & EXT4_GET 4152 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { 4256 path = conver !! 4153 err = convert_initialized_extent(handle, 4257 inode !! 4154 inode, map, &path, &allocated); 4258 if (IS_ERR(pa << 4259 err = << 4260 goto out; 4155 goto out; 4261 } else if (!ext4_ext_ 4156 } else if (!ext4_ext_is_unwritten(ex)) { 4262 map->m_flags 4157 map->m_flags |= EXT4_MAP_MAPPED; 4263 map->m_pblk = 4158 map->m_pblk = newblock; 4264 if (allocated 4159 if (allocated > map->m_len) 4265 alloc 4160 allocated = map->m_len; 4266 map->m_len = 4161 map->m_len = allocated; 4267 ext4_ext_show 4162 ext4_ext_show_leaf(inode, path); 4268 goto out; 4163 goto out; 4269 } 4164 } 4270 4165 4271 path = ext4_ext_handl !! 4166 ret = ext4_ext_handle_unwritten_extents( 4272 handle, inode !! 4167 handle, inode, map, &path, flags, 4273 &allocated, n !! 4168 allocated, newblock); 4274 if (IS_ERR(path)) !! 4169 if (ret < 0) 4275 err = PTR_ERR !! 4170 err = ret; >> 4171 else >> 4172 allocated = ret; 4276 goto out; 4173 goto out; 4277 } 4174 } 4278 } 4175 } 4279 4176 4280 /* 4177 /* 4281 * requested block isn't allocated ye 4178 * requested block isn't allocated yet; 4282 * we couldn't try to create block if !! 4179 * we couldn't try to create block if create flag is zero 4283 */ 4180 */ 4284 if ((flags & EXT4_GET_BLOCKS_CREATE) 4181 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 4285 ext4_lblk_t len; !! 4182 ext4_lblk_t hole_start, hole_len; 4286 4183 4287 len = ext4_ext_determine_inse !! 4184 hole_start = map->m_lblk; >> 4185 hole_len = ext4_ext_determine_hole(inode, path, &hole_start); >> 4186 /* >> 4187 * put just found gap into cache to speed up >> 4188 * subsequent requests >> 4189 */ >> 4190 ext4_ext_put_gap_in_cache(inode, hole_start, hole_len); 4288 4191 >> 4192 /* Update hole_len to reflect hole size after map->m_lblk */ >> 4193 if (hole_start != map->m_lblk) >> 4194 hole_len -= map->m_lblk - hole_start; 4289 map->m_pblk = 0; 4195 map->m_pblk = 0; 4290 map->m_len = min_t(unsigned i !! 4196 map->m_len = min_t(unsigned int, map->m_len, hole_len); >> 4197 4291 goto out; 4198 goto out; 4292 } 4199 } 4293 4200 4294 /* 4201 /* 4295 * Okay, we need to do block allocati 4202 * Okay, we need to do block allocation. 4296 */ 4203 */ 4297 newex.ee_block = cpu_to_le32(map->m_l 4204 newex.ee_block = cpu_to_le32(map->m_lblk); 4298 cluster_offset = EXT4_LBLK_COFF(sbi, 4205 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4299 4206 4300 /* 4207 /* 4301 * If we are doing bigalloc, check to 4208 * If we are doing bigalloc, check to see if the extent returned 4302 * by ext4_find_extent() implies a cl 4209 * by ext4_find_extent() implies a cluster we can use. 4303 */ 4210 */ 4304 if (cluster_offset && ex && 4211 if (cluster_offset && ex && 4305 get_implied_cluster_alloc(inode-> 4212 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 4306 ar.len = allocated = map->m_l 4213 ar.len = allocated = map->m_len; 4307 newblock = map->m_pblk; 4214 newblock = map->m_pblk; 4308 goto got_allocated_blocks; 4215 goto got_allocated_blocks; 4309 } 4216 } 4310 4217 4311 /* find neighbour allocated blocks */ 4218 /* find neighbour allocated blocks */ 4312 ar.lleft = map->m_lblk; 4219 ar.lleft = map->m_lblk; 4313 err = ext4_ext_search_left(inode, pat 4220 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 4314 if (err) 4221 if (err) 4315 goto out; 4222 goto out; 4316 ar.lright = map->m_lblk; 4223 ar.lright = map->m_lblk; 4317 err = ext4_ext_search_right(inode, pa 4224 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); 4318 if (err < 0) 4225 if (err < 0) 4319 goto out; 4226 goto out; 4320 4227 4321 /* Check if the extent after searchin 4228 /* Check if the extent after searching to the right implies a 4322 * cluster we can use. */ 4229 * cluster we can use. */ 4323 if ((sbi->s_cluster_ratio > 1) && err 4230 if ((sbi->s_cluster_ratio > 1) && err && 4324 get_implied_cluster_alloc(inode-> 4231 get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) { 4325 ar.len = allocated = map->m_l 4232 ar.len = allocated = map->m_len; 4326 newblock = map->m_pblk; 4233 newblock = map->m_pblk; 4327 err = 0; << 4328 goto got_allocated_blocks; 4234 goto got_allocated_blocks; 4329 } 4235 } 4330 4236 4331 /* 4237 /* 4332 * See if request is beyond maximum n 4238 * See if request is beyond maximum number of blocks we can have in 4333 * a single extent. For an initialize 4239 * a single extent. For an initialized extent this limit is 4334 * EXT_INIT_MAX_LEN and for an unwrit 4240 * EXT_INIT_MAX_LEN and for an unwritten extent this limit is 4335 * EXT_UNWRITTEN_MAX_LEN. 4241 * EXT_UNWRITTEN_MAX_LEN. 4336 */ 4242 */ 4337 if (map->m_len > EXT_INIT_MAX_LEN && 4243 if (map->m_len > EXT_INIT_MAX_LEN && 4338 !(flags & EXT4_GET_BLOCKS_UNWRIT_ 4244 !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4339 map->m_len = EXT_INIT_MAX_LEN 4245 map->m_len = EXT_INIT_MAX_LEN; 4340 else if (map->m_len > EXT_UNWRITTEN_M 4246 else if (map->m_len > EXT_UNWRITTEN_MAX_LEN && 4341 (flags & EXT4_GET_BLOCKS_UNW 4247 (flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4342 map->m_len = EXT_UNWRITTEN_MA 4248 map->m_len = EXT_UNWRITTEN_MAX_LEN; 4343 4249 4344 /* Check if we can really insert (m_l 4250 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 4345 newex.ee_len = cpu_to_le16(map->m_len 4251 newex.ee_len = cpu_to_le16(map->m_len); 4346 err = ext4_ext_check_overlap(sbi, ino 4252 err = ext4_ext_check_overlap(sbi, inode, &newex, path); 4347 if (err) 4253 if (err) 4348 allocated = ext4_ext_get_actu 4254 allocated = ext4_ext_get_actual_len(&newex); 4349 else 4255 else 4350 allocated = map->m_len; 4256 allocated = map->m_len; 4351 4257 4352 /* allocate new block */ 4258 /* allocate new block */ 4353 ar.inode = inode; 4259 ar.inode = inode; 4354 ar.goal = ext4_ext_find_goal(inode, p 4260 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 4355 ar.logical = map->m_lblk; 4261 ar.logical = map->m_lblk; 4356 /* 4262 /* 4357 * We calculate the offset from the b 4263 * We calculate the offset from the beginning of the cluster 4358 * for the logical block number, sinc 4264 * for the logical block number, since when we allocate a 4359 * physical cluster, the physical blo 4265 * physical cluster, the physical block should start at the 4360 * same offset from the beginning of 4266 * same offset from the beginning of the cluster. This is 4361 * needed so that future calls to get 4267 * needed so that future calls to get_implied_cluster_alloc() 4362 * work correctly. 4268 * work correctly. 4363 */ 4269 */ 4364 offset = EXT4_LBLK_COFF(sbi, map->m_l 4270 offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4365 ar.len = EXT4_NUM_B2C(sbi, offset+all 4271 ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 4366 ar.goal -= offset; 4272 ar.goal -= offset; 4367 ar.logical -= offset; 4273 ar.logical -= offset; 4368 if (S_ISREG(inode->i_mode)) 4274 if (S_ISREG(inode->i_mode)) 4369 ar.flags = EXT4_MB_HINT_DATA; 4275 ar.flags = EXT4_MB_HINT_DATA; 4370 else 4276 else 4371 /* disable in-core preallocat 4277 /* disable in-core preallocation for non-regular files */ 4372 ar.flags = 0; 4278 ar.flags = 0; 4373 if (flags & EXT4_GET_BLOCKS_NO_NORMAL 4279 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4374 ar.flags |= EXT4_MB_HINT_NOPR 4280 ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4375 if (flags & EXT4_GET_BLOCKS_DELALLOC_ 4281 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 4376 ar.flags |= EXT4_MB_DELALLOC_ 4282 ar.flags |= EXT4_MB_DELALLOC_RESERVED; 4377 if (flags & EXT4_GET_BLOCKS_METADATA_ 4283 if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 4378 ar.flags |= EXT4_MB_USE_RESER 4284 ar.flags |= EXT4_MB_USE_RESERVED; 4379 newblock = ext4_mb_new_blocks(handle, 4285 newblock = ext4_mb_new_blocks(handle, &ar, &err); 4380 if (!newblock) 4286 if (!newblock) 4381 goto out; 4287 goto out; 4382 allocated_clusters = ar.len; 4288 allocated_clusters = ar.len; 4383 ar.len = EXT4_C2B(sbi, ar.len) - offs 4289 ar.len = EXT4_C2B(sbi, ar.len) - offset; 4384 ext_debug(inode, "allocate new block: 4290 ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n", 4385 ar.goal, newblock, ar.len, 4291 ar.goal, newblock, ar.len, allocated); 4386 if (ar.len > allocated) 4292 if (ar.len > allocated) 4387 ar.len = allocated; 4293 ar.len = allocated; 4388 4294 4389 got_allocated_blocks: 4295 got_allocated_blocks: 4390 /* try to insert new extent into foun 4296 /* try to insert new extent into found leaf and return */ 4391 pblk = newblock + offset; 4297 pblk = newblock + offset; 4392 ext4_ext_store_pblock(&newex, pblk); 4298 ext4_ext_store_pblock(&newex, pblk); 4393 newex.ee_len = cpu_to_le16(ar.len); 4299 newex.ee_len = cpu_to_le16(ar.len); 4394 /* Mark unwritten */ 4300 /* Mark unwritten */ 4395 if (flags & EXT4_GET_BLOCKS_UNWRIT_EX 4301 if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { 4396 ext4_ext_mark_unwritten(&newe 4302 ext4_ext_mark_unwritten(&newex); 4397 map->m_flags |= EXT4_MAP_UNWR 4303 map->m_flags |= EXT4_MAP_UNWRITTEN; 4398 } 4304 } 4399 4305 4400 path = ext4_ext_insert_extent(handle, !! 4306 err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags); 4401 if (IS_ERR(path)) { !! 4307 if (err) { 4402 err = PTR_ERR(path); << 4403 if (allocated_clusters) { 4308 if (allocated_clusters) { 4404 int fb_flags = 0; 4309 int fb_flags = 0; 4405 4310 4406 /* 4311 /* 4407 * free data blocks w 4312 * free data blocks we just allocated. 4408 * not a good idea to 4313 * not a good idea to call discard here directly, 4409 * but otherwise we'd 4314 * but otherwise we'd need to call it every free(). 4410 */ 4315 */ 4411 ext4_discard_prealloc !! 4316 ext4_discard_preallocations(inode, 0); 4412 if (flags & EXT4_GET_ 4317 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 4413 fb_flags = EX 4318 fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE; 4414 ext4_free_blocks(hand 4319 ext4_free_blocks(handle, inode, NULL, newblock, 4415 EXT4 4320 EXT4_C2B(sbi, allocated_clusters), 4416 fb_f 4321 fb_flags); 4417 } 4322 } 4418 goto out; 4323 goto out; 4419 } 4324 } 4420 4325 4421 /* 4326 /* >> 4327 * Reduce the reserved cluster count to reflect successful deferred >> 4328 * allocation of delayed allocated clusters or direct allocation of >> 4329 * clusters discovered to be delayed allocated. Once allocated, a >> 4330 * cluster is not included in the reserved count. >> 4331 */ >> 4332 if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) { >> 4333 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { >> 4334 /* >> 4335 * When allocating delayed allocated clusters, simply >> 4336 * reduce the reserved cluster count and claim quota >> 4337 */ >> 4338 ext4_da_update_reserve_space(inode, allocated_clusters, >> 4339 1); >> 4340 } else { >> 4341 ext4_lblk_t lblk, len; >> 4342 unsigned int n; >> 4343 >> 4344 /* >> 4345 * When allocating non-delayed allocated clusters >> 4346 * (from fallocate, filemap, DIO, or clusters >> 4347 * allocated when delalloc has been disabled by >> 4348 * ext4_nonda_switch), reduce the reserved cluster >> 4349 * count by the number of allocated clusters that >> 4350 * have previously been delayed allocated. Quota >> 4351 * has been claimed by ext4_mb_new_blocks() above, >> 4352 * so release the quota reservations made for any >> 4353 * previously delayed allocated clusters. >> 4354 */ >> 4355 lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk); >> 4356 len = allocated_clusters << sbi->s_cluster_bits; >> 4357 n = ext4_es_delayed_clu(inode, lblk, len); >> 4358 if (n > 0) >> 4359 ext4_da_update_reserve_space(inode, (int) n, 0); >> 4360 } >> 4361 } >> 4362 >> 4363 /* 4422 * Cache the extent and update transa 4364 * Cache the extent and update transaction to commit on fdatasync only 4423 * when it is _not_ an unwritten exte 4365 * when it is _not_ an unwritten extent. 4424 */ 4366 */ 4425 if ((flags & EXT4_GET_BLOCKS_UNWRIT_E 4367 if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0) 4426 ext4_update_inode_fsync_trans 4368 ext4_update_inode_fsync_trans(handle, inode, 1); 4427 else 4369 else 4428 ext4_update_inode_fsync_trans 4370 ext4_update_inode_fsync_trans(handle, inode, 0); 4429 4371 4430 map->m_flags |= (EXT4_MAP_NEW | EXT4_ 4372 map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED); 4431 map->m_pblk = pblk; 4373 map->m_pblk = pblk; 4432 map->m_len = ar.len; 4374 map->m_len = ar.len; 4433 allocated = map->m_len; 4375 allocated = map->m_len; 4434 ext4_ext_show_leaf(inode, path); 4376 ext4_ext_show_leaf(inode, path); 4435 out: 4377 out: 4436 ext4_free_ext_path(path); 4378 ext4_free_ext_path(path); 4437 4379 4438 trace_ext4_ext_map_blocks_exit(inode, 4380 trace_ext4_ext_map_blocks_exit(inode, flags, map, 4439 err ? 4381 err ? err : allocated); 4440 return err ? err : allocated; 4382 return err ? err : allocated; 4441 } 4383 } 4442 4384 4443 int ext4_ext_truncate(handle_t *handle, struc 4385 int ext4_ext_truncate(handle_t *handle, struct inode *inode) 4444 { 4386 { 4445 struct super_block *sb = inode->i_sb; 4387 struct super_block *sb = inode->i_sb; 4446 ext4_lblk_t last_block; 4388 ext4_lblk_t last_block; 4447 int err = 0; 4389 int err = 0; 4448 4390 4449 /* 4391 /* 4450 * TODO: optimization is possible her 4392 * TODO: optimization is possible here. 4451 * Probably we need not scan at all, 4393 * Probably we need not scan at all, 4452 * because page truncation is enough. 4394 * because page truncation is enough. 4453 */ 4395 */ 4454 4396 4455 /* we have to know where to truncate 4397 /* we have to know where to truncate from in crash case */ 4456 EXT4_I(inode)->i_disksize = inode->i_ 4398 EXT4_I(inode)->i_disksize = inode->i_size; 4457 err = ext4_mark_inode_dirty(handle, i 4399 err = ext4_mark_inode_dirty(handle, inode); 4458 if (err) 4400 if (err) 4459 return err; 4401 return err; 4460 4402 4461 last_block = (inode->i_size + sb->s_b 4403 last_block = (inode->i_size + sb->s_blocksize - 1) 4462 >> EXT4_BLOCK_SIZE_BI 4404 >> EXT4_BLOCK_SIZE_BITS(sb); 4463 ext4_es_remove_extent(inode, last_blo 4405 ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block); 4464 4406 4465 retry_remove_space: 4407 retry_remove_space: 4466 err = ext4_ext_remove_space(inode, la 4408 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); 4467 if (err == -ENOMEM) { 4409 if (err == -ENOMEM) { 4468 memalloc_retry_wait(GFP_ATOMI 4410 memalloc_retry_wait(GFP_ATOMIC); 4469 goto retry_remove_space; 4411 goto retry_remove_space; 4470 } 4412 } 4471 return err; 4413 return err; 4472 } 4414 } 4473 4415 4474 static int ext4_alloc_file_blocks(struct file 4416 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, 4475 ext4_lblk_t 4417 ext4_lblk_t len, loff_t new_size, 4476 int flags) 4418 int flags) 4477 { 4419 { 4478 struct inode *inode = file_inode(file 4420 struct inode *inode = file_inode(file); 4479 handle_t *handle; 4421 handle_t *handle; 4480 int ret = 0, ret2 = 0, ret3 = 0; 4422 int ret = 0, ret2 = 0, ret3 = 0; 4481 int retries = 0; 4423 int retries = 0; 4482 int depth = 0; 4424 int depth = 0; 4483 struct ext4_map_blocks map; 4425 struct ext4_map_blocks map; 4484 unsigned int credits; 4426 unsigned int credits; 4485 loff_t epos; 4427 loff_t epos; 4486 4428 4487 BUG_ON(!ext4_test_inode_flag(inode, E 4429 BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)); 4488 map.m_lblk = offset; 4430 map.m_lblk = offset; 4489 map.m_len = len; 4431 map.m_len = len; 4490 /* 4432 /* 4491 * Don't normalize the request if it 4433 * Don't normalize the request if it can fit in one extent so 4492 * that it doesn't get unnecessarily 4434 * that it doesn't get unnecessarily split into multiple 4493 * extents. 4435 * extents. 4494 */ 4436 */ 4495 if (len <= EXT_UNWRITTEN_MAX_LEN) 4437 if (len <= EXT_UNWRITTEN_MAX_LEN) 4496 flags |= EXT4_GET_BLOCKS_NO_N 4438 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; 4497 4439 4498 /* 4440 /* 4499 * credits to insert 1 extent into ex 4441 * credits to insert 1 extent into extent tree 4500 */ 4442 */ 4501 credits = ext4_chunk_trans_blocks(ino 4443 credits = ext4_chunk_trans_blocks(inode, len); 4502 depth = ext_depth(inode); 4444 depth = ext_depth(inode); 4503 4445 4504 retry: 4446 retry: 4505 while (len) { 4447 while (len) { 4506 /* 4448 /* 4507 * Recalculate credits when e 4449 * Recalculate credits when extent tree depth changes. 4508 */ 4450 */ 4509 if (depth != ext_depth(inode) 4451 if (depth != ext_depth(inode)) { 4510 credits = ext4_chunk_ 4452 credits = ext4_chunk_trans_blocks(inode, len); 4511 depth = ext_depth(ino 4453 depth = ext_depth(inode); 4512 } 4454 } 4513 4455 4514 handle = ext4_journal_start(i 4456 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 4515 c 4457 credits); 4516 if (IS_ERR(handle)) { 4458 if (IS_ERR(handle)) { 4517 ret = PTR_ERR(handle) 4459 ret = PTR_ERR(handle); 4518 break; 4460 break; 4519 } 4461 } 4520 ret = ext4_map_blocks(handle, 4462 ret = ext4_map_blocks(handle, inode, &map, flags); 4521 if (ret <= 0) { 4463 if (ret <= 0) { 4522 ext4_debug("inode #%l 4464 ext4_debug("inode #%lu: block %u: len %u: " 4523 "ext4_ext_ 4465 "ext4_ext_map_blocks returned %d", 4524 inode->i_i 4466 inode->i_ino, map.m_lblk, 4525 map.m_len, 4467 map.m_len, ret); 4526 ext4_mark_inode_dirty 4468 ext4_mark_inode_dirty(handle, inode); 4527 ext4_journal_stop(han 4469 ext4_journal_stop(handle); 4528 break; 4470 break; 4529 } 4471 } 4530 /* 4472 /* 4531 * allow a full retry cycle f 4473 * allow a full retry cycle for any remaining allocations 4532 */ 4474 */ 4533 retries = 0; 4475 retries = 0; 4534 map.m_lblk += ret; 4476 map.m_lblk += ret; 4535 map.m_len = len = len - ret; 4477 map.m_len = len = len - ret; 4536 epos = (loff_t)map.m_lblk << 4478 epos = (loff_t)map.m_lblk << inode->i_blkbits; 4537 inode_set_ctime_current(inode !! 4479 inode->i_ctime = current_time(inode); 4538 if (new_size) { 4480 if (new_size) { 4539 if (epos > new_size) 4481 if (epos > new_size) 4540 epos = new_si 4482 epos = new_size; 4541 if (ext4_update_inode 4483 if (ext4_update_inode_size(inode, epos) & 0x1) 4542 inode_set_mti !! 4484 inode->i_mtime = inode->i_ctime; 4543 << 4544 } 4485 } 4545 ret2 = ext4_mark_inode_dirty( 4486 ret2 = ext4_mark_inode_dirty(handle, inode); 4546 ext4_update_inode_fsync_trans 4487 ext4_update_inode_fsync_trans(handle, inode, 1); 4547 ret3 = ext4_journal_stop(hand 4488 ret3 = ext4_journal_stop(handle); 4548 ret2 = ret3 ? ret3 : ret2; 4489 ret2 = ret3 ? ret3 : ret2; 4549 if (unlikely(ret2)) 4490 if (unlikely(ret2)) 4550 break; 4491 break; 4551 } 4492 } 4552 if (ret == -ENOSPC && ext4_should_ret 4493 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 4553 goto retry; 4494 goto retry; 4554 4495 4555 return ret > 0 ? ret2 : ret; 4496 return ret > 0 ? ret2 : ret; 4556 } 4497 } 4557 4498 4558 static int ext4_collapse_range(struct file *f 4499 static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len); 4559 4500 4560 static int ext4_insert_range(struct file *fil 4501 static int ext4_insert_range(struct file *file, loff_t offset, loff_t len); 4561 4502 4562 static long ext4_zero_range(struct file *file 4503 static long ext4_zero_range(struct file *file, loff_t offset, 4563 loff_t len, int m 4504 loff_t len, int mode) 4564 { 4505 { 4565 struct inode *inode = file_inode(file 4506 struct inode *inode = file_inode(file); 4566 struct address_space *mapping = file- 4507 struct address_space *mapping = file->f_mapping; 4567 handle_t *handle = NULL; 4508 handle_t *handle = NULL; 4568 unsigned int max_blocks; 4509 unsigned int max_blocks; 4569 loff_t new_size = 0; 4510 loff_t new_size = 0; 4570 int ret = 0; 4511 int ret = 0; 4571 int flags; 4512 int flags; 4572 int credits; 4513 int credits; 4573 int partial_begin, partial_end; 4514 int partial_begin, partial_end; 4574 loff_t start, end; 4515 loff_t start, end; 4575 ext4_lblk_t lblk; 4516 ext4_lblk_t lblk; 4576 unsigned int blkbits = inode->i_blkbi 4517 unsigned int blkbits = inode->i_blkbits; 4577 4518 4578 trace_ext4_zero_range(inode, offset, 4519 trace_ext4_zero_range(inode, offset, len, mode); 4579 4520 4580 /* 4521 /* 4581 * Round up offset. This is not fallo 4522 * Round up offset. This is not fallocate, we need to zero out 4582 * blocks, so convert interior block 4523 * blocks, so convert interior block aligned part of the range to 4583 * unwritten and possibly manually ze 4524 * unwritten and possibly manually zero out unaligned parts of the 4584 * range. Here, start and partial_beg !! 4525 * range. 4585 * partial_end are exclusive. << 4586 */ 4526 */ 4587 start = round_up(offset, 1 << blkbits 4527 start = round_up(offset, 1 << blkbits); 4588 end = round_down((offset + len), 1 << 4528 end = round_down((offset + len), 1 << blkbits); 4589 4529 4590 if (start < offset || end > offset + 4530 if (start < offset || end > offset + len) 4591 return -EINVAL; 4531 return -EINVAL; 4592 partial_begin = offset & ((1 << blkbi 4532 partial_begin = offset & ((1 << blkbits) - 1); 4593 partial_end = (offset + len) & ((1 << 4533 partial_end = (offset + len) & ((1 << blkbits) - 1); 4594 4534 4595 lblk = start >> blkbits; 4535 lblk = start >> blkbits; 4596 max_blocks = (end >> blkbits); 4536 max_blocks = (end >> blkbits); 4597 if (max_blocks < lblk) 4537 if (max_blocks < lblk) 4598 max_blocks = 0; 4538 max_blocks = 0; 4599 else 4539 else 4600 max_blocks -= lblk; 4540 max_blocks -= lblk; 4601 4541 4602 inode_lock(inode); 4542 inode_lock(inode); 4603 4543 4604 /* 4544 /* 4605 * Indirect files do not support unwr 4545 * Indirect files do not support unwritten extents 4606 */ 4546 */ 4607 if (!(ext4_test_inode_flag(inode, EXT 4547 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4608 ret = -EOPNOTSUPP; 4548 ret = -EOPNOTSUPP; 4609 goto out_mutex; 4549 goto out_mutex; 4610 } 4550 } 4611 4551 4612 if (!(mode & FALLOC_FL_KEEP_SIZE) && 4552 if (!(mode & FALLOC_FL_KEEP_SIZE) && 4613 (offset + len > inode->i_size || 4553 (offset + len > inode->i_size || 4614 offset + len > EXT4_I(inode)->i_ 4554 offset + len > EXT4_I(inode)->i_disksize)) { 4615 new_size = offset + len; 4555 new_size = offset + len; 4616 ret = inode_newsize_ok(inode, 4556 ret = inode_newsize_ok(inode, new_size); 4617 if (ret) 4557 if (ret) 4618 goto out_mutex; 4558 goto out_mutex; 4619 } 4559 } 4620 4560 4621 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT 4561 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 4622 4562 4623 /* Wait all existing dio workers, new 4563 /* Wait all existing dio workers, newcomers will block on i_rwsem */ 4624 inode_dio_wait(inode); 4564 inode_dio_wait(inode); 4625 4565 4626 ret = file_modified(file); 4566 ret = file_modified(file); 4627 if (ret) 4567 if (ret) 4628 goto out_mutex; 4568 goto out_mutex; 4629 4569 4630 /* Preallocate the range including th 4570 /* Preallocate the range including the unaligned edges */ 4631 if (partial_begin || partial_end) { 4571 if (partial_begin || partial_end) { 4632 ret = ext4_alloc_file_blocks( 4572 ret = ext4_alloc_file_blocks(file, 4633 round_down(of 4573 round_down(offset, 1 << blkbits) >> blkbits, 4634 (round_up((of 4574 (round_up((offset + len), 1 << blkbits) - 4635 round_down(o 4575 round_down(offset, 1 << blkbits)) >> blkbits, 4636 new_size, fla 4576 new_size, flags); 4637 if (ret) 4577 if (ret) 4638 goto out_mutex; 4578 goto out_mutex; 4639 4579 4640 } 4580 } 4641 4581 4642 /* Zero range excluding the unaligned 4582 /* Zero range excluding the unaligned edges */ 4643 if (max_blocks > 0) { 4583 if (max_blocks > 0) { 4644 flags |= (EXT4_GET_BLOCKS_CON 4584 flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | 4645 EXT4_EX_NOCACHE); 4585 EXT4_EX_NOCACHE); 4646 4586 4647 /* 4587 /* 4648 * Prevent page faults from r 4588 * Prevent page faults from reinstantiating pages we have 4649 * released from page cache. 4589 * released from page cache. 4650 */ 4590 */ 4651 filemap_invalidate_lock(mappi 4591 filemap_invalidate_lock(mapping); 4652 4592 4653 ret = ext4_break_layouts(inod 4593 ret = ext4_break_layouts(inode); 4654 if (ret) { 4594 if (ret) { 4655 filemap_invalidate_un 4595 filemap_invalidate_unlock(mapping); 4656 goto out_mutex; 4596 goto out_mutex; 4657 } 4597 } 4658 4598 4659 ret = ext4_update_disksize_be 4599 ret = ext4_update_disksize_before_punch(inode, offset, len); 4660 if (ret) { 4600 if (ret) { 4661 filemap_invalidate_un 4601 filemap_invalidate_unlock(mapping); 4662 goto out_mutex; 4602 goto out_mutex; 4663 } 4603 } 4664 4604 4665 /* 4605 /* 4666 * For journalled data we nee 4606 * For journalled data we need to write (and checkpoint) pages 4667 * before discarding page cac 4607 * before discarding page cache to avoid inconsitent data on 4668 * disk in case of crash befo 4608 * disk in case of crash before zeroing trans is committed. 4669 */ 4609 */ 4670 if (ext4_should_journal_data( 4610 if (ext4_should_journal_data(inode)) { 4671 ret = filemap_write_a !! 4611 ret = filemap_write_and_wait_range(mapping, start, end); 4672 << 4673 if (ret) { 4612 if (ret) { 4674 filemap_inval 4613 filemap_invalidate_unlock(mapping); 4675 goto out_mute 4614 goto out_mutex; 4676 } 4615 } 4677 } 4616 } 4678 4617 4679 /* Now release the pages and 4618 /* Now release the pages and zero block aligned part of pages */ 4680 truncate_pagecache_range(inod 4619 truncate_pagecache_range(inode, start, end - 1); 4681 inode_set_mtime_to_ts(inode, !! 4620 inode->i_mtime = inode->i_ctime = current_time(inode); 4682 4621 4683 ret = ext4_alloc_file_blocks( 4622 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, 4684 4623 flags); 4685 filemap_invalidate_unlock(map 4624 filemap_invalidate_unlock(mapping); 4686 if (ret) 4625 if (ret) 4687 goto out_mutex; 4626 goto out_mutex; 4688 } 4627 } 4689 if (!partial_begin && !partial_end) 4628 if (!partial_begin && !partial_end) 4690 goto out_mutex; 4629 goto out_mutex; 4691 4630 4692 /* 4631 /* 4693 * In worst case we have to writeout 4632 * In worst case we have to writeout two nonadjacent unwritten 4694 * blocks and update the inode 4633 * blocks and update the inode 4695 */ 4634 */ 4696 credits = (2 * ext4_ext_index_trans_b 4635 credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1; 4697 if (ext4_should_journal_data(inode)) 4636 if (ext4_should_journal_data(inode)) 4698 credits += 2; 4637 credits += 2; 4699 handle = ext4_journal_start(inode, EX 4638 handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); 4700 if (IS_ERR(handle)) { 4639 if (IS_ERR(handle)) { 4701 ret = PTR_ERR(handle); 4640 ret = PTR_ERR(handle); 4702 ext4_std_error(inode->i_sb, r 4641 ext4_std_error(inode->i_sb, ret); 4703 goto out_mutex; 4642 goto out_mutex; 4704 } 4643 } 4705 4644 4706 inode_set_mtime_to_ts(inode, inode_se !! 4645 inode->i_mtime = inode->i_ctime = current_time(inode); 4707 if (new_size) 4646 if (new_size) 4708 ext4_update_inode_size(inode, 4647 ext4_update_inode_size(inode, new_size); 4709 ret = ext4_mark_inode_dirty(handle, i 4648 ret = ext4_mark_inode_dirty(handle, inode); 4710 if (unlikely(ret)) 4649 if (unlikely(ret)) 4711 goto out_handle; 4650 goto out_handle; 4712 /* Zero out partial block at the edge 4651 /* Zero out partial block at the edges of the range */ 4713 ret = ext4_zero_partial_blocks(handle 4652 ret = ext4_zero_partial_blocks(handle, inode, offset, len); 4714 if (ret >= 0) 4653 if (ret >= 0) 4715 ext4_update_inode_fsync_trans 4654 ext4_update_inode_fsync_trans(handle, inode, 1); 4716 4655 4717 if (file->f_flags & O_SYNC) 4656 if (file->f_flags & O_SYNC) 4718 ext4_handle_sync(handle); 4657 ext4_handle_sync(handle); 4719 4658 4720 out_handle: 4659 out_handle: 4721 ext4_journal_stop(handle); 4660 ext4_journal_stop(handle); 4722 out_mutex: 4661 out_mutex: 4723 inode_unlock(inode); 4662 inode_unlock(inode); 4724 return ret; 4663 return ret; 4725 } 4664 } 4726 4665 4727 /* 4666 /* 4728 * preallocate space for a file. This impleme 4667 * preallocate space for a file. This implements ext4's fallocate file 4729 * operation, which gets called from sys_fall 4668 * operation, which gets called from sys_fallocate system call. 4730 * For block-mapped files, posix_fallocate sh 4669 * For block-mapped files, posix_fallocate should fall back to the method 4731 * of writing zeroes to the required new bloc 4670 * of writing zeroes to the required new blocks (the same behavior which is 4732 * expected for file systems which do not sup 4671 * expected for file systems which do not support fallocate() system call). 4733 */ 4672 */ 4734 long ext4_fallocate(struct file *file, int mo 4673 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 4735 { 4674 { 4736 struct inode *inode = file_inode(file 4675 struct inode *inode = file_inode(file); 4737 loff_t new_size = 0; 4676 loff_t new_size = 0; 4738 unsigned int max_blocks; 4677 unsigned int max_blocks; 4739 int ret = 0; 4678 int ret = 0; 4740 int flags; 4679 int flags; 4741 ext4_lblk_t lblk; 4680 ext4_lblk_t lblk; 4742 unsigned int blkbits = inode->i_blkbi 4681 unsigned int blkbits = inode->i_blkbits; 4743 4682 4744 /* 4683 /* 4745 * Encrypted inodes can't handle coll 4684 * Encrypted inodes can't handle collapse range or insert 4746 * range since we would need to re-en 4685 * range since we would need to re-encrypt blocks with a 4747 * different IV or XTS tweak (which a 4686 * different IV or XTS tweak (which are based on the logical 4748 * block number). 4687 * block number). 4749 */ 4688 */ 4750 if (IS_ENCRYPTED(inode) && 4689 if (IS_ENCRYPTED(inode) && 4751 (mode & (FALLOC_FL_COLLAPSE_RANGE 4690 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) 4752 return -EOPNOTSUPP; 4691 return -EOPNOTSUPP; 4753 4692 4754 /* Return error if mode is not suppor 4693 /* Return error if mode is not supported */ 4755 if (mode & ~(FALLOC_FL_KEEP_SIZE | FA 4694 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 4756 FALLOC_FL_COLLAPSE_RANGE 4695 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 4757 FALLOC_FL_INSERT_RANGE)) 4696 FALLOC_FL_INSERT_RANGE)) 4758 return -EOPNOTSUPP; 4697 return -EOPNOTSUPP; 4759 4698 4760 inode_lock(inode); 4699 inode_lock(inode); 4761 ret = ext4_convert_inline_data(inode) 4700 ret = ext4_convert_inline_data(inode); 4762 inode_unlock(inode); 4701 inode_unlock(inode); 4763 if (ret) 4702 if (ret) 4764 goto exit; 4703 goto exit; 4765 4704 4766 if (mode & FALLOC_FL_PUNCH_HOLE) { 4705 if (mode & FALLOC_FL_PUNCH_HOLE) { 4767 ret = ext4_punch_hole(file, o 4706 ret = ext4_punch_hole(file, offset, len); 4768 goto exit; 4707 goto exit; 4769 } 4708 } 4770 4709 4771 if (mode & FALLOC_FL_COLLAPSE_RANGE) 4710 if (mode & FALLOC_FL_COLLAPSE_RANGE) { 4772 ret = ext4_collapse_range(fil 4711 ret = ext4_collapse_range(file, offset, len); 4773 goto exit; 4712 goto exit; 4774 } 4713 } 4775 4714 4776 if (mode & FALLOC_FL_INSERT_RANGE) { 4715 if (mode & FALLOC_FL_INSERT_RANGE) { 4777 ret = ext4_insert_range(file, 4716 ret = ext4_insert_range(file, offset, len); 4778 goto exit; 4717 goto exit; 4779 } 4718 } 4780 4719 4781 if (mode & FALLOC_FL_ZERO_RANGE) { 4720 if (mode & FALLOC_FL_ZERO_RANGE) { 4782 ret = ext4_zero_range(file, o 4721 ret = ext4_zero_range(file, offset, len, mode); 4783 goto exit; 4722 goto exit; 4784 } 4723 } 4785 trace_ext4_fallocate_enter(inode, off 4724 trace_ext4_fallocate_enter(inode, offset, len, mode); 4786 lblk = offset >> blkbits; 4725 lblk = offset >> blkbits; 4787 4726 4788 max_blocks = EXT4_MAX_BLOCKS(len, off 4727 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); 4789 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT 4728 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 4790 4729 4791 inode_lock(inode); 4730 inode_lock(inode); 4792 4731 4793 /* 4732 /* 4794 * We only support preallocation for 4733 * We only support preallocation for extent-based files only 4795 */ 4734 */ 4796 if (!(ext4_test_inode_flag(inode, EXT 4735 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4797 ret = -EOPNOTSUPP; 4736 ret = -EOPNOTSUPP; 4798 goto out; 4737 goto out; 4799 } 4738 } 4800 4739 4801 if (!(mode & FALLOC_FL_KEEP_SIZE) && 4740 if (!(mode & FALLOC_FL_KEEP_SIZE) && 4802 (offset + len > inode->i_size || 4741 (offset + len > inode->i_size || 4803 offset + len > EXT4_I(inode)->i_ 4742 offset + len > EXT4_I(inode)->i_disksize)) { 4804 new_size = offset + len; 4743 new_size = offset + len; 4805 ret = inode_newsize_ok(inode, 4744 ret = inode_newsize_ok(inode, new_size); 4806 if (ret) 4745 if (ret) 4807 goto out; 4746 goto out; 4808 } 4747 } 4809 4748 4810 /* Wait all existing dio workers, new 4749 /* Wait all existing dio workers, newcomers will block on i_rwsem */ 4811 inode_dio_wait(inode); 4750 inode_dio_wait(inode); 4812 4751 4813 ret = file_modified(file); 4752 ret = file_modified(file); 4814 if (ret) 4753 if (ret) 4815 goto out; 4754 goto out; 4816 4755 4817 ret = ext4_alloc_file_blocks(file, lb 4756 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags); 4818 if (ret) 4757 if (ret) 4819 goto out; 4758 goto out; 4820 4759 4821 if (file->f_flags & O_SYNC && EXT4_SB 4760 if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) { 4822 ret = ext4_fc_commit(EXT4_SB( 4761 ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal, 4823 EXT4_ 4762 EXT4_I(inode)->i_sync_tid); 4824 } 4763 } 4825 out: 4764 out: 4826 inode_unlock(inode); 4765 inode_unlock(inode); 4827 trace_ext4_fallocate_exit(inode, offs 4766 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 4828 exit: 4767 exit: 4829 return ret; 4768 return ret; 4830 } 4769 } 4831 4770 4832 /* 4771 /* 4833 * This function convert a range of blocks to 4772 * This function convert a range of blocks to written extents 4834 * The caller of this function will pass the 4773 * The caller of this function will pass the start offset and the size. 4835 * all unwritten extents within this range wi 4774 * all unwritten extents within this range will be converted to 4836 * written extents. 4775 * written extents. 4837 * 4776 * 4838 * This function is called from the direct IO 4777 * This function is called from the direct IO end io call back 4839 * function, to convert the fallocated extent 4778 * function, to convert the fallocated extents after IO is completed. 4840 * Returns 0 on success. 4779 * Returns 0 on success. 4841 */ 4780 */ 4842 int ext4_convert_unwritten_extents(handle_t * 4781 int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, 4843 loff_t off 4782 loff_t offset, ssize_t len) 4844 { 4783 { 4845 unsigned int max_blocks; 4784 unsigned int max_blocks; 4846 int ret = 0, ret2 = 0, ret3 = 0; 4785 int ret = 0, ret2 = 0, ret3 = 0; 4847 struct ext4_map_blocks map; 4786 struct ext4_map_blocks map; 4848 unsigned int blkbits = inode->i_blkbi 4787 unsigned int blkbits = inode->i_blkbits; 4849 unsigned int credits = 0; 4788 unsigned int credits = 0; 4850 4789 4851 map.m_lblk = offset >> blkbits; 4790 map.m_lblk = offset >> blkbits; 4852 max_blocks = EXT4_MAX_BLOCKS(len, off 4791 max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); 4853 4792 4854 if (!handle) { 4793 if (!handle) { 4855 /* 4794 /* 4856 * credits to insert 1 extent 4795 * credits to insert 1 extent into extent tree 4857 */ 4796 */ 4858 credits = ext4_chunk_trans_bl 4797 credits = ext4_chunk_trans_blocks(inode, max_blocks); 4859 } 4798 } 4860 while (ret >= 0 && ret < max_blocks) 4799 while (ret >= 0 && ret < max_blocks) { 4861 map.m_lblk += ret; 4800 map.m_lblk += ret; 4862 map.m_len = (max_blocks -= re 4801 map.m_len = (max_blocks -= ret); 4863 if (credits) { 4802 if (credits) { 4864 handle = ext4_journal 4803 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 4865 4804 credits); 4866 if (IS_ERR(handle)) { 4805 if (IS_ERR(handle)) { 4867 ret = PTR_ERR 4806 ret = PTR_ERR(handle); 4868 break; 4807 break; 4869 } 4808 } 4870 } 4809 } 4871 ret = ext4_map_blocks(handle, 4810 ret = ext4_map_blocks(handle, inode, &map, 4872 EXT4_GE 4811 EXT4_GET_BLOCKS_IO_CONVERT_EXT); 4873 if (ret <= 0) 4812 if (ret <= 0) 4874 ext4_warning(inode->i 4813 ext4_warning(inode->i_sb, 4875 "inode # 4814 "inode #%lu: block %u: len %u: " 4876 "ext4_ex 4815 "ext4_ext_map_blocks returned %d", 4877 inode->i 4816 inode->i_ino, map.m_lblk, 4878 map.m_le 4817 map.m_len, ret); 4879 ret2 = ext4_mark_inode_dirty( 4818 ret2 = ext4_mark_inode_dirty(handle, inode); 4880 if (credits) { 4819 if (credits) { 4881 ret3 = ext4_journal_s 4820 ret3 = ext4_journal_stop(handle); 4882 if (unlikely(ret3)) 4821 if (unlikely(ret3)) 4883 ret2 = ret3; 4822 ret2 = ret3; 4884 } 4823 } 4885 4824 4886 if (ret <= 0 || ret2) 4825 if (ret <= 0 || ret2) 4887 break; 4826 break; 4888 } 4827 } 4889 return ret > 0 ? ret2 : ret; 4828 return ret > 0 ? ret2 : ret; 4890 } 4829 } 4891 4830 4892 int ext4_convert_unwritten_io_end_vec(handle_ 4831 int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end) 4893 { 4832 { 4894 int ret = 0, err = 0; 4833 int ret = 0, err = 0; 4895 struct ext4_io_end_vec *io_end_vec; 4834 struct ext4_io_end_vec *io_end_vec; 4896 4835 4897 /* 4836 /* 4898 * This is somewhat ugly but the idea 4837 * This is somewhat ugly but the idea is clear: When transaction is 4899 * reserved, everything goes into it. 4838 * reserved, everything goes into it. Otherwise we rather start several 4900 * smaller transactions for conversio 4839 * smaller transactions for conversion of each extent separately. 4901 */ 4840 */ 4902 if (handle) { 4841 if (handle) { 4903 handle = ext4_journal_start_r 4842 handle = ext4_journal_start_reserved(handle, 4904 4843 EXT4_HT_EXT_CONVERT); 4905 if (IS_ERR(handle)) 4844 if (IS_ERR(handle)) 4906 return PTR_ERR(handle 4845 return PTR_ERR(handle); 4907 } 4846 } 4908 4847 4909 list_for_each_entry(io_end_vec, &io_e 4848 list_for_each_entry(io_end_vec, &io_end->list_vec, list) { 4910 ret = ext4_convert_unwritten_ 4849 ret = ext4_convert_unwritten_extents(handle, io_end->inode, 4911 4850 io_end_vec->offset, 4912 4851 io_end_vec->size); 4913 if (ret) 4852 if (ret) 4914 break; 4853 break; 4915 } 4854 } 4916 4855 4917 if (handle) 4856 if (handle) 4918 err = ext4_journal_stop(handl 4857 err = ext4_journal_stop(handle); 4919 4858 4920 return ret < 0 ? ret : err; 4859 return ret < 0 ? ret : err; 4921 } 4860 } 4922 4861 4923 static int ext4_iomap_xattr_fiemap(struct ino 4862 static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap) 4924 { 4863 { 4925 __u64 physical = 0; 4864 __u64 physical = 0; 4926 __u64 length = 0; 4865 __u64 length = 0; 4927 int blockbits = inode->i_sb->s_blocks 4866 int blockbits = inode->i_sb->s_blocksize_bits; 4928 int error = 0; 4867 int error = 0; 4929 u16 iomap_type; 4868 u16 iomap_type; 4930 4869 4931 /* in-inode? */ 4870 /* in-inode? */ 4932 if (ext4_test_inode_state(inode, EXT4 4871 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 4933 struct ext4_iloc iloc; 4872 struct ext4_iloc iloc; 4934 int offset; /* offset of 4873 int offset; /* offset of xattr in inode */ 4935 4874 4936 error = ext4_get_inode_loc(in 4875 error = ext4_get_inode_loc(inode, &iloc); 4937 if (error) 4876 if (error) 4938 return error; 4877 return error; 4939 physical = (__u64)iloc.bh->b_ 4878 physical = (__u64)iloc.bh->b_blocknr << blockbits; 4940 offset = EXT4_GOOD_OLD_INODE_ 4879 offset = EXT4_GOOD_OLD_INODE_SIZE + 4941 EXT4_I(inode) 4880 EXT4_I(inode)->i_extra_isize; 4942 physical += offset; 4881 physical += offset; 4943 length = EXT4_SB(inode->i_sb) 4882 length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 4944 brelse(iloc.bh); 4883 brelse(iloc.bh); 4945 iomap_type = IOMAP_INLINE; 4884 iomap_type = IOMAP_INLINE; 4946 } else if (EXT4_I(inode)->i_file_acl) 4885 } else if (EXT4_I(inode)->i_file_acl) { /* external block */ 4947 physical = (__u64)EXT4_I(inod 4886 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; 4948 length = inode->i_sb->s_block 4887 length = inode->i_sb->s_blocksize; 4949 iomap_type = IOMAP_MAPPED; 4888 iomap_type = IOMAP_MAPPED; 4950 } else { 4889 } else { 4951 /* no in-inode or external bl 4890 /* no in-inode or external block for xattr, so return -ENOENT */ 4952 error = -ENOENT; 4891 error = -ENOENT; 4953 goto out; 4892 goto out; 4954 } 4893 } 4955 4894 4956 iomap->addr = physical; 4895 iomap->addr = physical; 4957 iomap->offset = 0; 4896 iomap->offset = 0; 4958 iomap->length = length; 4897 iomap->length = length; 4959 iomap->type = iomap_type; 4898 iomap->type = iomap_type; 4960 iomap->flags = 0; 4899 iomap->flags = 0; 4961 out: 4900 out: 4962 return error; 4901 return error; 4963 } 4902 } 4964 4903 4965 static int ext4_iomap_xattr_begin(struct inod 4904 static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset, 4966 loff_t leng 4905 loff_t length, unsigned flags, 4967 struct ioma 4906 struct iomap *iomap, struct iomap *srcmap) 4968 { 4907 { 4969 int error; 4908 int error; 4970 4909 4971 error = ext4_iomap_xattr_fiemap(inode 4910 error = ext4_iomap_xattr_fiemap(inode, iomap); 4972 if (error == 0 && (offset >= iomap->l 4911 if (error == 0 && (offset >= iomap->length)) 4973 error = -ENOENT; 4912 error = -ENOENT; 4974 return error; 4913 return error; 4975 } 4914 } 4976 4915 4977 static const struct iomap_ops ext4_iomap_xatt 4916 static const struct iomap_ops ext4_iomap_xattr_ops = { 4978 .iomap_begin = ext4_iomap_ 4917 .iomap_begin = ext4_iomap_xattr_begin, 4979 }; 4918 }; 4980 4919 4981 static int ext4_fiemap_check_ranges(struct in 4920 static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len) 4982 { 4921 { 4983 u64 maxbytes; 4922 u64 maxbytes; 4984 4923 4985 if (ext4_test_inode_flag(inode, EXT4_ 4924 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4986 maxbytes = inode->i_sb->s_max 4925 maxbytes = inode->i_sb->s_maxbytes; 4987 else 4926 else 4988 maxbytes = EXT4_SB(inode->i_s 4927 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; 4989 4928 4990 if (*len == 0) 4929 if (*len == 0) 4991 return -EINVAL; 4930 return -EINVAL; 4992 if (start > maxbytes) 4931 if (start > maxbytes) 4993 return -EFBIG; 4932 return -EFBIG; 4994 4933 4995 /* 4934 /* 4996 * Shrink request scope to what the f 4935 * Shrink request scope to what the fs can actually handle. 4997 */ 4936 */ 4998 if (*len > maxbytes || (maxbytes - *l 4937 if (*len > maxbytes || (maxbytes - *len) < start) 4999 *len = maxbytes - start; 4938 *len = maxbytes - start; 5000 return 0; 4939 return 0; 5001 } 4940 } 5002 4941 5003 int ext4_fiemap(struct inode *inode, struct f 4942 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 5004 u64 start, u64 len) 4943 u64 start, u64 len) 5005 { 4944 { 5006 int error = 0; 4945 int error = 0; 5007 4946 5008 if (fieinfo->fi_flags & FIEMAP_FLAG_C 4947 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { 5009 error = ext4_ext_precache(ino 4948 error = ext4_ext_precache(inode); 5010 if (error) 4949 if (error) 5011 return error; 4950 return error; 5012 fieinfo->fi_flags &= ~FIEMAP_ 4951 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; 5013 } 4952 } 5014 4953 5015 /* 4954 /* 5016 * For bitmap files the maximum size 4955 * For bitmap files the maximum size limit could be smaller than 5017 * s_maxbytes, so check len here manu 4956 * s_maxbytes, so check len here manually instead of just relying on the 5018 * generic check. 4957 * generic check. 5019 */ 4958 */ 5020 error = ext4_fiemap_check_ranges(inod 4959 error = ext4_fiemap_check_ranges(inode, start, &len); 5021 if (error) 4960 if (error) 5022 return error; 4961 return error; 5023 4962 5024 if (fieinfo->fi_flags & FIEMAP_FLAG_X 4963 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 5025 fieinfo->fi_flags &= ~FIEMAP_ 4964 fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR; 5026 return iomap_fiemap(inode, fi 4965 return iomap_fiemap(inode, fieinfo, start, len, 5027 &ext4_iom 4966 &ext4_iomap_xattr_ops); 5028 } 4967 } 5029 4968 5030 return iomap_fiemap(inode, fieinfo, s 4969 return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops); 5031 } 4970 } 5032 4971 5033 int ext4_get_es_cache(struct inode *inode, st 4972 int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo, 5034 __u64 start, __u64 len) 4973 __u64 start, __u64 len) 5035 { 4974 { 5036 ext4_lblk_t start_blk, len_blks; 4975 ext4_lblk_t start_blk, len_blks; 5037 __u64 last_blk; 4976 __u64 last_blk; 5038 int error = 0; 4977 int error = 0; 5039 4978 5040 if (ext4_has_inline_data(inode)) { 4979 if (ext4_has_inline_data(inode)) { 5041 int has_inline; 4980 int has_inline; 5042 4981 5043 down_read(&EXT4_I(inode)->xat 4982 down_read(&EXT4_I(inode)->xattr_sem); 5044 has_inline = ext4_has_inline_ 4983 has_inline = ext4_has_inline_data(inode); 5045 up_read(&EXT4_I(inode)->xattr 4984 up_read(&EXT4_I(inode)->xattr_sem); 5046 if (has_inline) 4985 if (has_inline) 5047 return 0; 4986 return 0; 5048 } 4987 } 5049 4988 5050 if (fieinfo->fi_flags & FIEMAP_FLAG_C 4989 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { 5051 error = ext4_ext_precache(ino 4990 error = ext4_ext_precache(inode); 5052 if (error) 4991 if (error) 5053 return error; 4992 return error; 5054 fieinfo->fi_flags &= ~FIEMAP_ 4993 fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; 5055 } 4994 } 5056 4995 5057 error = fiemap_prep(inode, fieinfo, s 4996 error = fiemap_prep(inode, fieinfo, start, &len, 0); 5058 if (error) 4997 if (error) 5059 return error; 4998 return error; 5060 4999 5061 error = ext4_fiemap_check_ranges(inod 5000 error = ext4_fiemap_check_ranges(inode, start, &len); 5062 if (error) 5001 if (error) 5063 return error; 5002 return error; 5064 5003 5065 start_blk = start >> inode->i_sb->s_b 5004 start_blk = start >> inode->i_sb->s_blocksize_bits; 5066 last_blk = (start + len - 1) >> inode 5005 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 5067 if (last_blk >= EXT_MAX_BLOCKS) 5006 if (last_blk >= EXT_MAX_BLOCKS) 5068 last_blk = EXT_MAX_BLOCKS-1; 5007 last_blk = EXT_MAX_BLOCKS-1; 5069 len_blks = ((ext4_lblk_t) last_blk) - 5008 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 5070 5009 5071 /* 5010 /* 5072 * Walk the extent tree gathering ext 5011 * Walk the extent tree gathering extent information 5073 * and pushing extents back to the us 5012 * and pushing extents back to the user. 5074 */ 5013 */ 5075 return ext4_fill_es_cache_info(inode, 5014 return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo); 5076 } 5015 } 5077 5016 5078 /* 5017 /* 5079 * ext4_ext_shift_path_extents: 5018 * ext4_ext_shift_path_extents: 5080 * Shift the extents of a path structure lyin 5019 * Shift the extents of a path structure lying between path[depth].p_ext 5081 * and EXT_LAST_EXTENT(path[depth].p_hdr), by 5020 * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells 5082 * if it is right shift or left shift operati 5021 * if it is right shift or left shift operation. 5083 */ 5022 */ 5084 static int 5023 static int 5085 ext4_ext_shift_path_extents(struct ext4_ext_p 5024 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift, 5086 struct inode *ino 5025 struct inode *inode, handle_t *handle, 5087 enum SHIFT_DIRECT 5026 enum SHIFT_DIRECTION SHIFT) 5088 { 5027 { 5089 int depth, err = 0; 5028 int depth, err = 0; 5090 struct ext4_extent *ex_start, *ex_las 5029 struct ext4_extent *ex_start, *ex_last; 5091 bool update = false; 5030 bool update = false; 5092 int credits, restart_credits; 5031 int credits, restart_credits; 5093 depth = path->p_depth; 5032 depth = path->p_depth; 5094 5033 5095 while (depth >= 0) { 5034 while (depth >= 0) { 5096 if (depth == path->p_depth) { 5035 if (depth == path->p_depth) { 5097 ex_start = path[depth 5036 ex_start = path[depth].p_ext; 5098 if (!ex_start) 5037 if (!ex_start) 5099 return -EFSCO 5038 return -EFSCORRUPTED; 5100 5039 5101 ex_last = EXT_LAST_EX 5040 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); 5102 /* leaf + sb + inode 5041 /* leaf + sb + inode */ 5103 credits = 3; 5042 credits = 3; 5104 if (ex_start == EXT_F 5043 if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) { 5105 update = true 5044 update = true; 5106 /* extent tre 5045 /* extent tree + sb + inode */ 5107 credits = dep 5046 credits = depth + 2; 5108 } 5047 } 5109 5048 5110 restart_credits = ext 5049 restart_credits = ext4_writepage_trans_blocks(inode); 5111 err = ext4_datasem_en 5050 err = ext4_datasem_ensure_credits(handle, inode, credits, 5112 resta 5051 restart_credits, 0); 5113 if (err) { 5052 if (err) { 5114 if (err > 0) 5053 if (err > 0) 5115 err = 5054 err = -EAGAIN; 5116 goto out; 5055 goto out; 5117 } 5056 } 5118 5057 5119 err = ext4_ext_get_ac 5058 err = ext4_ext_get_access(handle, inode, path + depth); 5120 if (err) 5059 if (err) 5121 goto out; 5060 goto out; 5122 5061 5123 while (ex_start <= ex 5062 while (ex_start <= ex_last) { 5124 if (SHIFT == 5063 if (SHIFT == SHIFT_LEFT) { 5125 le32_ 5064 le32_add_cpu(&ex_start->ee_block, 5126 5065 -shift); 5127 /* Tr 5066 /* Try to merge to the left. */ 5128 if (( 5067 if ((ex_start > 5129 E 5068 EXT_FIRST_EXTENT(path[depth].p_hdr)) 5130 & 5069 && 5131 e 5070 ext4_ext_try_to_merge_right(inode, 5132 p 5071 path, ex_start - 1)) 5133 5072 ex_last--; 5134 else 5073 else 5135 5074 ex_start++; 5136 } else { 5075 } else { 5137 le32_ 5076 le32_add_cpu(&ex_last->ee_block, shift); 5138 ext4_ 5077 ext4_ext_try_to_merge_right(inode, path, 5139 5078 ex_last); 5140 ex_la 5079 ex_last--; 5141 } 5080 } 5142 } 5081 } 5143 err = ext4_ext_dirty( 5082 err = ext4_ext_dirty(handle, inode, path + depth); 5144 if (err) 5083 if (err) 5145 goto out; 5084 goto out; 5146 5085 5147 if (--depth < 0 || !u 5086 if (--depth < 0 || !update) 5148 break; 5087 break; 5149 } 5088 } 5150 5089 5151 /* Update index too */ 5090 /* Update index too */ 5152 err = ext4_ext_get_access(han 5091 err = ext4_ext_get_access(handle, inode, path + depth); 5153 if (err) 5092 if (err) 5154 goto out; 5093 goto out; 5155 5094 5156 if (SHIFT == SHIFT_LEFT) 5095 if (SHIFT == SHIFT_LEFT) 5157 le32_add_cpu(&path[de 5096 le32_add_cpu(&path[depth].p_idx->ei_block, -shift); 5158 else 5097 else 5159 le32_add_cpu(&path[de 5098 le32_add_cpu(&path[depth].p_idx->ei_block, shift); 5160 err = ext4_ext_dirty(handle, 5099 err = ext4_ext_dirty(handle, inode, path + depth); 5161 if (err) 5100 if (err) 5162 goto out; 5101 goto out; 5163 5102 5164 /* we are done if current ind 5103 /* we are done if current index is not a starting index */ 5165 if (path[depth].p_idx != EXT_ 5104 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr)) 5166 break; 5105 break; 5167 5106 5168 depth--; 5107 depth--; 5169 } 5108 } 5170 5109 5171 out: 5110 out: 5172 return err; 5111 return err; 5173 } 5112 } 5174 5113 5175 /* 5114 /* 5176 * ext4_ext_shift_extents: 5115 * ext4_ext_shift_extents: 5177 * All the extents which lies in the range fr 5116 * All the extents which lies in the range from @start to the last allocated 5178 * block for the @inode are shifted either to 5117 * block for the @inode are shifted either towards left or right (depending 5179 * upon @SHIFT) by @shift blocks. 5118 * upon @SHIFT) by @shift blocks. 5180 * On success, 0 is returned, error otherwise 5119 * On success, 0 is returned, error otherwise. 5181 */ 5120 */ 5182 static int 5121 static int 5183 ext4_ext_shift_extents(struct inode *inode, h 5122 ext4_ext_shift_extents(struct inode *inode, handle_t *handle, 5184 ext4_lblk_t start, ext 5123 ext4_lblk_t start, ext4_lblk_t shift, 5185 enum SHIFT_DIRECTION S 5124 enum SHIFT_DIRECTION SHIFT) 5186 { 5125 { 5187 struct ext4_ext_path *path; 5126 struct ext4_ext_path *path; 5188 int ret = 0, depth; 5127 int ret = 0, depth; 5189 struct ext4_extent *extent; 5128 struct ext4_extent *extent; 5190 ext4_lblk_t stop, *iterator, ex_start 5129 ext4_lblk_t stop, *iterator, ex_start, ex_end; 5191 ext4_lblk_t tmp = EXT_MAX_BLOCKS; 5130 ext4_lblk_t tmp = EXT_MAX_BLOCKS; 5192 5131 5193 /* Let path point to the last extent 5132 /* Let path point to the last extent */ 5194 path = ext4_find_extent(inode, EXT_MA 5133 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 5195 EXT4_EX_NOCAC 5134 EXT4_EX_NOCACHE); 5196 if (IS_ERR(path)) 5135 if (IS_ERR(path)) 5197 return PTR_ERR(path); 5136 return PTR_ERR(path); 5198 5137 5199 depth = path->p_depth; 5138 depth = path->p_depth; 5200 extent = path[depth].p_ext; 5139 extent = path[depth].p_ext; 5201 if (!extent) 5140 if (!extent) 5202 goto out; 5141 goto out; 5203 5142 5204 stop = le32_to_cpu(extent->ee_block); 5143 stop = le32_to_cpu(extent->ee_block); 5205 5144 5206 /* 5145 /* 5207 * For left shifts, make sure the hole 5146 * For left shifts, make sure the hole on the left is big enough to 5208 * accommodate the shift. For right s 5147 * accommodate the shift. For right shifts, make sure the last extent 5209 * won't be shifted beyond EXT_MAX_BLO 5148 * won't be shifted beyond EXT_MAX_BLOCKS. 5210 */ 5149 */ 5211 if (SHIFT == SHIFT_LEFT) { 5150 if (SHIFT == SHIFT_LEFT) { 5212 path = ext4_find_extent(inode !! 5151 path = ext4_find_extent(inode, start - 1, &path, 5213 EXT4_ 5152 EXT4_EX_NOCACHE); 5214 if (IS_ERR(path)) 5153 if (IS_ERR(path)) 5215 return PTR_ERR(path); 5154 return PTR_ERR(path); 5216 depth = path->p_depth; 5155 depth = path->p_depth; 5217 extent = path[depth].p_ext; 5156 extent = path[depth].p_ext; 5218 if (extent) { 5157 if (extent) { 5219 ex_start = le32_to_cp 5158 ex_start = le32_to_cpu(extent->ee_block); 5220 ex_end = le32_to_cpu( 5159 ex_end = le32_to_cpu(extent->ee_block) + 5221 ext4_ext_get_ 5160 ext4_ext_get_actual_len(extent); 5222 } else { 5161 } else { 5223 ex_start = 0; 5162 ex_start = 0; 5224 ex_end = 0; 5163 ex_end = 0; 5225 } 5164 } 5226 5165 5227 if ((start == ex_start && shi 5166 if ((start == ex_start && shift > ex_start) || 5228 (shift > start - ex_end)) 5167 (shift > start - ex_end)) { 5229 ret = -EINVAL; 5168 ret = -EINVAL; 5230 goto out; 5169 goto out; 5231 } 5170 } 5232 } else { 5171 } else { 5233 if (shift > EXT_MAX_BLOCKS - 5172 if (shift > EXT_MAX_BLOCKS - 5234 (stop + ext4_ext_get_actu 5173 (stop + ext4_ext_get_actual_len(extent))) { 5235 ret = -EINVAL; 5174 ret = -EINVAL; 5236 goto out; 5175 goto out; 5237 } 5176 } 5238 } 5177 } 5239 5178 5240 /* 5179 /* 5241 * In case of left shift, iterator po 5180 * In case of left shift, iterator points to start and it is increased 5242 * till we reach stop. In case of rig 5181 * till we reach stop. In case of right shift, iterator points to stop 5243 * and it is decreased till we reach 5182 * and it is decreased till we reach start. 5244 */ 5183 */ 5245 again: 5184 again: 5246 ret = 0; 5185 ret = 0; 5247 if (SHIFT == SHIFT_LEFT) 5186 if (SHIFT == SHIFT_LEFT) 5248 iterator = &start; 5187 iterator = &start; 5249 else 5188 else 5250 iterator = &stop; 5189 iterator = &stop; 5251 5190 5252 if (tmp != EXT_MAX_BLOCKS) 5191 if (tmp != EXT_MAX_BLOCKS) 5253 *iterator = tmp; 5192 *iterator = tmp; 5254 5193 5255 /* 5194 /* 5256 * Its safe to start updating extents 5195 * Its safe to start updating extents. Start and stop are unsigned, so 5257 * in case of right shift if extent w 5196 * in case of right shift if extent with 0 block is reached, iterator 5258 * becomes NULL to indicate the end o 5197 * becomes NULL to indicate the end of the loop. 5259 */ 5198 */ 5260 while (iterator && start <= stop) { 5199 while (iterator && start <= stop) { 5261 path = ext4_find_extent(inode !! 5200 path = ext4_find_extent(inode, *iterator, &path, 5262 EXT4_ 5201 EXT4_EX_NOCACHE); 5263 if (IS_ERR(path)) 5202 if (IS_ERR(path)) 5264 return PTR_ERR(path); 5203 return PTR_ERR(path); 5265 depth = path->p_depth; 5204 depth = path->p_depth; 5266 extent = path[depth].p_ext; 5205 extent = path[depth].p_ext; 5267 if (!extent) { 5206 if (!extent) { 5268 EXT4_ERROR_INODE(inod 5207 EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 5269 (uns 5208 (unsigned long) *iterator); 5270 return -EFSCORRUPTED; 5209 return -EFSCORRUPTED; 5271 } 5210 } 5272 if (SHIFT == SHIFT_LEFT && *i 5211 if (SHIFT == SHIFT_LEFT && *iterator > 5273 le32_to_cpu(extent->ee_bl 5212 le32_to_cpu(extent->ee_block)) { 5274 /* Hole, move to the 5213 /* Hole, move to the next extent */ 5275 if (extent < EXT_LAST 5214 if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) { 5276 path[depth].p 5215 path[depth].p_ext++; 5277 } else { 5216 } else { 5278 *iterator = e 5217 *iterator = ext4_ext_next_allocated_block(path); 5279 continue; 5218 continue; 5280 } 5219 } 5281 } 5220 } 5282 5221 5283 tmp = *iterator; 5222 tmp = *iterator; 5284 if (SHIFT == SHIFT_LEFT) { 5223 if (SHIFT == SHIFT_LEFT) { 5285 extent = EXT_LAST_EXT 5224 extent = EXT_LAST_EXTENT(path[depth].p_hdr); 5286 *iterator = le32_to_c 5225 *iterator = le32_to_cpu(extent->ee_block) + 5287 ext4_ 5226 ext4_ext_get_actual_len(extent); 5288 } else { 5227 } else { 5289 extent = EXT_FIRST_EX 5228 extent = EXT_FIRST_EXTENT(path[depth].p_hdr); 5290 if (le32_to_cpu(exten 5229 if (le32_to_cpu(extent->ee_block) > start) 5291 *iterator = l 5230 *iterator = le32_to_cpu(extent->ee_block) - 1; 5292 else if (le32_to_cpu( 5231 else if (le32_to_cpu(extent->ee_block) == start) 5293 iterator = NU 5232 iterator = NULL; 5294 else { 5233 else { 5295 extent = EXT_ 5234 extent = EXT_LAST_EXTENT(path[depth].p_hdr); 5296 while (le32_t 5235 while (le32_to_cpu(extent->ee_block) >= start) 5297 exten 5236 extent--; 5298 5237 5299 if (extent == 5238 if (extent == EXT_LAST_EXTENT(path[depth].p_hdr)) 5300 break 5239 break; 5301 5240 5302 extent++; 5241 extent++; 5303 iterator = NU 5242 iterator = NULL; 5304 } 5243 } 5305 path[depth].p_ext = e 5244 path[depth].p_ext = extent; 5306 } 5245 } 5307 ret = ext4_ext_shift_path_ext 5246 ret = ext4_ext_shift_path_extents(path, shift, inode, 5308 handle, SHIFT 5247 handle, SHIFT); 5309 /* iterator can be NULL which 5248 /* iterator can be NULL which means we should break */ 5310 if (ret == -EAGAIN) 5249 if (ret == -EAGAIN) 5311 goto again; 5250 goto again; 5312 if (ret) 5251 if (ret) 5313 break; 5252 break; 5314 } 5253 } 5315 out: 5254 out: 5316 ext4_free_ext_path(path); 5255 ext4_free_ext_path(path); 5317 return ret; 5256 return ret; 5318 } 5257 } 5319 5258 5320 /* 5259 /* 5321 * ext4_collapse_range: 5260 * ext4_collapse_range: 5322 * This implements the fallocate's collapse r 5261 * This implements the fallocate's collapse range functionality for ext4 5323 * Returns: 0 and non-zero on error. 5262 * Returns: 0 and non-zero on error. 5324 */ 5263 */ 5325 static int ext4_collapse_range(struct file *f 5264 static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len) 5326 { 5265 { 5327 struct inode *inode = file_inode(file 5266 struct inode *inode = file_inode(file); 5328 struct super_block *sb = inode->i_sb; 5267 struct super_block *sb = inode->i_sb; 5329 struct address_space *mapping = inode 5268 struct address_space *mapping = inode->i_mapping; 5330 ext4_lblk_t punch_start, punch_stop; 5269 ext4_lblk_t punch_start, punch_stop; 5331 handle_t *handle; 5270 handle_t *handle; 5332 unsigned int credits; 5271 unsigned int credits; 5333 loff_t new_size, ioffset; 5272 loff_t new_size, ioffset; 5334 int ret; 5273 int ret; 5335 5274 5336 /* 5275 /* 5337 * We need to test this early because 5276 * We need to test this early because xfstests assumes that a 5338 * collapse range of (0, 1) will retu 5277 * collapse range of (0, 1) will return EOPNOTSUPP if the file 5339 * system does not support collapse r 5278 * system does not support collapse range. 5340 */ 5279 */ 5341 if (!ext4_test_inode_flag(inode, EXT4 5280 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5342 return -EOPNOTSUPP; 5281 return -EOPNOTSUPP; 5343 5282 5344 /* Collapse range works only on fs cl 5283 /* Collapse range works only on fs cluster size aligned regions. */ 5345 if (!IS_ALIGNED(offset | len, EXT4_CL 5284 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) 5346 return -EINVAL; 5285 return -EINVAL; 5347 5286 5348 trace_ext4_collapse_range(inode, offs 5287 trace_ext4_collapse_range(inode, offset, len); 5349 5288 5350 punch_start = offset >> EXT4_BLOCK_SI 5289 punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb); 5351 punch_stop = (offset + len) >> EXT4_B 5290 punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb); 5352 5291 5353 inode_lock(inode); 5292 inode_lock(inode); 5354 /* 5293 /* 5355 * There is no need to overlap collap 5294 * There is no need to overlap collapse range with EOF, in which case 5356 * it is effectively a truncate opera 5295 * it is effectively a truncate operation 5357 */ 5296 */ 5358 if (offset + len >= inode->i_size) { 5297 if (offset + len >= inode->i_size) { 5359 ret = -EINVAL; 5298 ret = -EINVAL; 5360 goto out_mutex; 5299 goto out_mutex; 5361 } 5300 } 5362 5301 5363 /* Currently just for extent based fi 5302 /* Currently just for extent based files */ 5364 if (!ext4_test_inode_flag(inode, EXT4 5303 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 5365 ret = -EOPNOTSUPP; 5304 ret = -EOPNOTSUPP; 5366 goto out_mutex; 5305 goto out_mutex; 5367 } 5306 } 5368 5307 5369 /* Wait for existing dio to complete 5308 /* Wait for existing dio to complete */ 5370 inode_dio_wait(inode); 5309 inode_dio_wait(inode); 5371 5310 5372 ret = file_modified(file); 5311 ret = file_modified(file); 5373 if (ret) 5312 if (ret) 5374 goto out_mutex; 5313 goto out_mutex; 5375 5314 5376 /* 5315 /* 5377 * Prevent page faults from reinstant 5316 * Prevent page faults from reinstantiating pages we have released from 5378 * page cache. 5317 * page cache. 5379 */ 5318 */ 5380 filemap_invalidate_lock(mapping); 5319 filemap_invalidate_lock(mapping); 5381 5320 5382 ret = ext4_break_layouts(inode); 5321 ret = ext4_break_layouts(inode); 5383 if (ret) 5322 if (ret) 5384 goto out_mmap; 5323 goto out_mmap; 5385 5324 5386 /* 5325 /* 5387 * Need to round down offset to be al 5326 * Need to round down offset to be aligned with page size boundary 5388 * for page size > block size. 5327 * for page size > block size. 5389 */ 5328 */ 5390 ioffset = round_down(offset, PAGE_SIZ 5329 ioffset = round_down(offset, PAGE_SIZE); 5391 /* 5330 /* 5392 * Write tail of the last page before 5331 * Write tail of the last page before removed range since it will get 5393 * removed from the page cache below. 5332 * removed from the page cache below. 5394 */ 5333 */ 5395 ret = filemap_write_and_wait_range(ma 5334 ret = filemap_write_and_wait_range(mapping, ioffset, offset); 5396 if (ret) 5335 if (ret) 5397 goto out_mmap; 5336 goto out_mmap; 5398 /* 5337 /* 5399 * Write data that will be shifted to 5338 * Write data that will be shifted to preserve them when discarding 5400 * page cache below. We are also prot 5339 * page cache below. We are also protected from pages becoming dirty 5401 * by i_rwsem and invalidate_lock. 5340 * by i_rwsem and invalidate_lock. 5402 */ 5341 */ 5403 ret = filemap_write_and_wait_range(ma 5342 ret = filemap_write_and_wait_range(mapping, offset + len, 5404 LL 5343 LLONG_MAX); 5405 if (ret) 5344 if (ret) 5406 goto out_mmap; 5345 goto out_mmap; 5407 truncate_pagecache(inode, ioffset); 5346 truncate_pagecache(inode, ioffset); 5408 5347 5409 credits = ext4_writepage_trans_blocks 5348 credits = ext4_writepage_trans_blocks(inode); 5410 handle = ext4_journal_start(inode, EX 5349 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 5411 if (IS_ERR(handle)) { 5350 if (IS_ERR(handle)) { 5412 ret = PTR_ERR(handle); 5351 ret = PTR_ERR(handle); 5413 goto out_mmap; 5352 goto out_mmap; 5414 } 5353 } 5415 ext4_fc_mark_ineligible(sb, EXT4_FC_R 5354 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle); 5416 5355 5417 down_write(&EXT4_I(inode)->i_data_sem 5356 down_write(&EXT4_I(inode)->i_data_sem); 5418 ext4_discard_preallocations(inode); !! 5357 ext4_discard_preallocations(inode, 0); 5419 ext4_es_remove_extent(inode, punch_st 5358 ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start); 5420 5359 5421 ret = ext4_ext_remove_space(inode, pu 5360 ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1); 5422 if (ret) { 5361 if (ret) { 5423 up_write(&EXT4_I(inode)->i_da 5362 up_write(&EXT4_I(inode)->i_data_sem); 5424 goto out_stop; 5363 goto out_stop; 5425 } 5364 } 5426 ext4_discard_preallocations(inode); !! 5365 ext4_discard_preallocations(inode, 0); 5427 5366 5428 ret = ext4_ext_shift_extents(inode, h 5367 ret = ext4_ext_shift_extents(inode, handle, punch_stop, 5429 punch_st 5368 punch_stop - punch_start, SHIFT_LEFT); 5430 if (ret) { 5369 if (ret) { 5431 up_write(&EXT4_I(inode)->i_da 5370 up_write(&EXT4_I(inode)->i_data_sem); 5432 goto out_stop; 5371 goto out_stop; 5433 } 5372 } 5434 5373 5435 new_size = inode->i_size - len; 5374 new_size = inode->i_size - len; 5436 i_size_write(inode, new_size); 5375 i_size_write(inode, new_size); 5437 EXT4_I(inode)->i_disksize = new_size; 5376 EXT4_I(inode)->i_disksize = new_size; 5438 5377 5439 up_write(&EXT4_I(inode)->i_data_sem); 5378 up_write(&EXT4_I(inode)->i_data_sem); 5440 if (IS_SYNC(inode)) 5379 if (IS_SYNC(inode)) 5441 ext4_handle_sync(handle); 5380 ext4_handle_sync(handle); 5442 inode_set_mtime_to_ts(inode, inode_se !! 5381 inode->i_mtime = inode->i_ctime = current_time(inode); 5443 ret = ext4_mark_inode_dirty(handle, i 5382 ret = ext4_mark_inode_dirty(handle, inode); 5444 ext4_update_inode_fsync_trans(handle, 5383 ext4_update_inode_fsync_trans(handle, inode, 1); 5445 5384 5446 out_stop: 5385 out_stop: 5447 ext4_journal_stop(handle); 5386 ext4_journal_stop(handle); 5448 out_mmap: 5387 out_mmap: 5449 filemap_invalidate_unlock(mapping); 5388 filemap_invalidate_unlock(mapping); 5450 out_mutex: 5389 out_mutex: 5451 inode_unlock(inode); 5390 inode_unlock(inode); 5452 return ret; 5391 return ret; 5453 } 5392 } 5454 5393 5455 /* 5394 /* 5456 * ext4_insert_range: 5395 * ext4_insert_range: 5457 * This function implements the FALLOC_FL_INS 5396 * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate. 5458 * The data blocks starting from @offset to t 5397 * The data blocks starting from @offset to the EOF are shifted by @len 5459 * towards right to create a hole in the @ino 5398 * towards right to create a hole in the @inode. Inode size is increased 5460 * by len bytes. 5399 * by len bytes. 5461 * Returns 0 on success, error otherwise. 5400 * Returns 0 on success, error otherwise. 5462 */ 5401 */ 5463 static int ext4_insert_range(struct file *fil 5402 static int ext4_insert_range(struct file *file, loff_t offset, loff_t len) 5464 { 5403 { 5465 struct inode *inode = file_inode(file 5404 struct inode *inode = file_inode(file); 5466 struct super_block *sb = inode->i_sb; 5405 struct super_block *sb = inode->i_sb; 5467 struct address_space *mapping = inode 5406 struct address_space *mapping = inode->i_mapping; 5468 handle_t *handle; 5407 handle_t *handle; 5469 struct ext4_ext_path *path; 5408 struct ext4_ext_path *path; 5470 struct ext4_extent *extent; 5409 struct ext4_extent *extent; 5471 ext4_lblk_t offset_lblk, len_lblk, ee 5410 ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0; 5472 unsigned int credits, ee_len; 5411 unsigned int credits, ee_len; 5473 int ret = 0, depth, split_flag = 0; 5412 int ret = 0, depth, split_flag = 0; 5474 loff_t ioffset; 5413 loff_t ioffset; 5475 5414 5476 /* 5415 /* 5477 * We need to test this early because 5416 * We need to test this early because xfstests assumes that an 5478 * insert range of (0, 1) will return 5417 * insert range of (0, 1) will return EOPNOTSUPP if the file 5479 * system does not support insert ran 5418 * system does not support insert range. 5480 */ 5419 */ 5481 if (!ext4_test_inode_flag(inode, EXT4 5420 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5482 return -EOPNOTSUPP; 5421 return -EOPNOTSUPP; 5483 5422 5484 /* Insert range works only on fs clus 5423 /* Insert range works only on fs cluster size aligned regions. */ 5485 if (!IS_ALIGNED(offset | len, EXT4_CL 5424 if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) 5486 return -EINVAL; 5425 return -EINVAL; 5487 5426 5488 trace_ext4_insert_range(inode, offset 5427 trace_ext4_insert_range(inode, offset, len); 5489 5428 5490 offset_lblk = offset >> EXT4_BLOCK_SI 5429 offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb); 5491 len_lblk = len >> EXT4_BLOCK_SIZE_BIT 5430 len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb); 5492 5431 5493 inode_lock(inode); 5432 inode_lock(inode); 5494 /* Currently just for extent based fi 5433 /* Currently just for extent based files */ 5495 if (!ext4_test_inode_flag(inode, EXT4 5434 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 5496 ret = -EOPNOTSUPP; 5435 ret = -EOPNOTSUPP; 5497 goto out_mutex; 5436 goto out_mutex; 5498 } 5437 } 5499 5438 5500 /* Check whether the maximum file siz 5439 /* Check whether the maximum file size would be exceeded */ 5501 if (len > inode->i_sb->s_maxbytes - i 5440 if (len > inode->i_sb->s_maxbytes - inode->i_size) { 5502 ret = -EFBIG; 5441 ret = -EFBIG; 5503 goto out_mutex; 5442 goto out_mutex; 5504 } 5443 } 5505 5444 5506 /* Offset must be less than i_size */ 5445 /* Offset must be less than i_size */ 5507 if (offset >= inode->i_size) { 5446 if (offset >= inode->i_size) { 5508 ret = -EINVAL; 5447 ret = -EINVAL; 5509 goto out_mutex; 5448 goto out_mutex; 5510 } 5449 } 5511 5450 5512 /* Wait for existing dio to complete 5451 /* Wait for existing dio to complete */ 5513 inode_dio_wait(inode); 5452 inode_dio_wait(inode); 5514 5453 5515 ret = file_modified(file); 5454 ret = file_modified(file); 5516 if (ret) 5455 if (ret) 5517 goto out_mutex; 5456 goto out_mutex; 5518 5457 5519 /* 5458 /* 5520 * Prevent page faults from reinstant 5459 * Prevent page faults from reinstantiating pages we have released from 5521 * page cache. 5460 * page cache. 5522 */ 5461 */ 5523 filemap_invalidate_lock(mapping); 5462 filemap_invalidate_lock(mapping); 5524 5463 5525 ret = ext4_break_layouts(inode); 5464 ret = ext4_break_layouts(inode); 5526 if (ret) 5465 if (ret) 5527 goto out_mmap; 5466 goto out_mmap; 5528 5467 5529 /* 5468 /* 5530 * Need to round down to align start 5469 * Need to round down to align start offset to page size boundary 5531 * for page size > block size. 5470 * for page size > block size. 5532 */ 5471 */ 5533 ioffset = round_down(offset, PAGE_SIZ 5472 ioffset = round_down(offset, PAGE_SIZE); 5534 /* Write out all dirty pages */ 5473 /* Write out all dirty pages */ 5535 ret = filemap_write_and_wait_range(in 5474 ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, 5536 LLONG_MAX); 5475 LLONG_MAX); 5537 if (ret) 5476 if (ret) 5538 goto out_mmap; 5477 goto out_mmap; 5539 truncate_pagecache(inode, ioffset); 5478 truncate_pagecache(inode, ioffset); 5540 5479 5541 credits = ext4_writepage_trans_blocks 5480 credits = ext4_writepage_trans_blocks(inode); 5542 handle = ext4_journal_start(inode, EX 5481 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 5543 if (IS_ERR(handle)) { 5482 if (IS_ERR(handle)) { 5544 ret = PTR_ERR(handle); 5483 ret = PTR_ERR(handle); 5545 goto out_mmap; 5484 goto out_mmap; 5546 } 5485 } 5547 ext4_fc_mark_ineligible(sb, EXT4_FC_R 5486 ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle); 5548 5487 5549 /* Expand file to avoid data loss if 5488 /* Expand file to avoid data loss if there is error while shifting */ 5550 inode->i_size += len; 5489 inode->i_size += len; 5551 EXT4_I(inode)->i_disksize += len; 5490 EXT4_I(inode)->i_disksize += len; 5552 inode_set_mtime_to_ts(inode, inode_se !! 5491 inode->i_mtime = inode->i_ctime = current_time(inode); 5553 ret = ext4_mark_inode_dirty(handle, i 5492 ret = ext4_mark_inode_dirty(handle, inode); 5554 if (ret) 5493 if (ret) 5555 goto out_stop; 5494 goto out_stop; 5556 5495 5557 down_write(&EXT4_I(inode)->i_data_sem 5496 down_write(&EXT4_I(inode)->i_data_sem); 5558 ext4_discard_preallocations(inode); !! 5497 ext4_discard_preallocations(inode, 0); 5559 5498 5560 path = ext4_find_extent(inode, offset 5499 path = ext4_find_extent(inode, offset_lblk, NULL, 0); 5561 if (IS_ERR(path)) { 5500 if (IS_ERR(path)) { 5562 up_write(&EXT4_I(inode)->i_da 5501 up_write(&EXT4_I(inode)->i_data_sem); 5563 ret = PTR_ERR(path); << 5564 goto out_stop; 5502 goto out_stop; 5565 } 5503 } 5566 5504 5567 depth = ext_depth(inode); 5505 depth = ext_depth(inode); 5568 extent = path[depth].p_ext; 5506 extent = path[depth].p_ext; 5569 if (extent) { 5507 if (extent) { 5570 ee_start_lblk = le32_to_cpu(e 5508 ee_start_lblk = le32_to_cpu(extent->ee_block); 5571 ee_len = ext4_ext_get_actual_ 5509 ee_len = ext4_ext_get_actual_len(extent); 5572 5510 5573 /* 5511 /* 5574 * If offset_lblk is not the 5512 * If offset_lblk is not the starting block of extent, split 5575 * the extent @offset_lblk 5513 * the extent @offset_lblk 5576 */ 5514 */ 5577 if ((offset_lblk > ee_start_l 5515 if ((offset_lblk > ee_start_lblk) && 5578 (offset_lblk 5516 (offset_lblk < (ee_start_lblk + ee_len))) { 5579 if (ext4_ext_is_unwri 5517 if (ext4_ext_is_unwritten(extent)) 5580 split_flag = 5518 split_flag = EXT4_EXT_MARK_UNWRIT1 | 5581 EXT4_ 5519 EXT4_EXT_MARK_UNWRIT2; 5582 path = ext4_split_ext !! 5520 ret = ext4_split_extent_at(handle, inode, &path, 5583 offse 5521 offset_lblk, split_flag, 5584 EXT4_ 5522 EXT4_EX_NOCACHE | 5585 EXT4_ 5523 EXT4_GET_BLOCKS_PRE_IO | 5586 EXT4_ 5524 EXT4_GET_BLOCKS_METADATA_NOFAIL); 5587 } 5525 } 5588 5526 5589 if (IS_ERR(path)) { !! 5527 ext4_free_ext_path(path); >> 5528 if (ret < 0) { 5590 up_write(&EXT4_I(inod 5529 up_write(&EXT4_I(inode)->i_data_sem); 5591 ret = PTR_ERR(path); << 5592 goto out_stop; 5530 goto out_stop; 5593 } 5531 } >> 5532 } else { >> 5533 ext4_free_ext_path(path); 5594 } 5534 } 5595 5535 5596 ext4_free_ext_path(path); << 5597 ext4_es_remove_extent(inode, offset_l 5536 ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk); 5598 5537 5599 /* 5538 /* 5600 * if offset_lblk lies in a hole whic 5539 * if offset_lblk lies in a hole which is at start of file, use 5601 * ee_start_lblk to shift extents 5540 * ee_start_lblk to shift extents 5602 */ 5541 */ 5603 ret = ext4_ext_shift_extents(inode, h 5542 ret = ext4_ext_shift_extents(inode, handle, 5604 max(ee_start_lblk, offset_lbl 5543 max(ee_start_lblk, offset_lblk), len_lblk, SHIFT_RIGHT); 5605 5544 5606 up_write(&EXT4_I(inode)->i_data_sem); 5545 up_write(&EXT4_I(inode)->i_data_sem); 5607 if (IS_SYNC(inode)) 5546 if (IS_SYNC(inode)) 5608 ext4_handle_sync(handle); 5547 ext4_handle_sync(handle); 5609 if (ret >= 0) 5548 if (ret >= 0) 5610 ext4_update_inode_fsync_trans 5549 ext4_update_inode_fsync_trans(handle, inode, 1); 5611 5550 5612 out_stop: 5551 out_stop: 5613 ext4_journal_stop(handle); 5552 ext4_journal_stop(handle); 5614 out_mmap: 5553 out_mmap: 5615 filemap_invalidate_unlock(mapping); 5554 filemap_invalidate_unlock(mapping); 5616 out_mutex: 5555 out_mutex: 5617 inode_unlock(inode); 5556 inode_unlock(inode); 5618 return ret; 5557 return ret; 5619 } 5558 } 5620 5559 5621 /** 5560 /** 5622 * ext4_swap_extents() - Swap extents between 5561 * ext4_swap_extents() - Swap extents between two inodes 5623 * @handle: handle for this transaction 5562 * @handle: handle for this transaction 5624 * @inode1: First inode 5563 * @inode1: First inode 5625 * @inode2: Second inode 5564 * @inode2: Second inode 5626 * @lblk1: Start block for first inode 5565 * @lblk1: Start block for first inode 5627 * @lblk2: Start block for second inode 5566 * @lblk2: Start block for second inode 5628 * @count: Number of blocks to swap 5567 * @count: Number of blocks to swap 5629 * @unwritten: Mark second inode's extents as 5568 * @unwritten: Mark second inode's extents as unwritten after swap 5630 * @erp: Pointer to save error value 5569 * @erp: Pointer to save error value 5631 * 5570 * 5632 * This helper routine does exactly what is p 5571 * This helper routine does exactly what is promise "swap extents". All other 5633 * stuff such as page-cache locking consisten 5572 * stuff such as page-cache locking consistency, bh mapping consistency or 5634 * extent's data copying must be performed by 5573 * extent's data copying must be performed by caller. 5635 * Locking: 5574 * Locking: 5636 * i_rwsem is held for both inod 5575 * i_rwsem is held for both inodes 5637 * i_data_sem is locked for writ 5576 * i_data_sem is locked for write for both inodes 5638 * Assumptions: 5577 * Assumptions: 5639 * All pages from requested rang 5578 * All pages from requested range are locked for both inodes 5640 */ 5579 */ 5641 int 5580 int 5642 ext4_swap_extents(handle_t *handle, struct in 5581 ext4_swap_extents(handle_t *handle, struct inode *inode1, 5643 struct inode *inode2, ext4_ 5582 struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2, 5644 ext4_lblk_t count, int unwr 5583 ext4_lblk_t count, int unwritten, int *erp) 5645 { 5584 { 5646 struct ext4_ext_path *path1 = NULL; 5585 struct ext4_ext_path *path1 = NULL; 5647 struct ext4_ext_path *path2 = NULL; 5586 struct ext4_ext_path *path2 = NULL; 5648 int replaced_count = 0; 5587 int replaced_count = 0; 5649 5588 5650 BUG_ON(!rwsem_is_locked(&EXT4_I(inode 5589 BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem)); 5651 BUG_ON(!rwsem_is_locked(&EXT4_I(inode 5590 BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem)); 5652 BUG_ON(!inode_is_locked(inode1)); 5591 BUG_ON(!inode_is_locked(inode1)); 5653 BUG_ON(!inode_is_locked(inode2)); 5592 BUG_ON(!inode_is_locked(inode2)); 5654 5593 5655 ext4_es_remove_extent(inode1, lblk1, 5594 ext4_es_remove_extent(inode1, lblk1, count); 5656 ext4_es_remove_extent(inode2, lblk2, 5595 ext4_es_remove_extent(inode2, lblk2, count); 5657 5596 5658 while (count) { 5597 while (count) { 5659 struct ext4_extent *ex1, *ex2 5598 struct ext4_extent *ex1, *ex2, tmp_ex; 5660 ext4_lblk_t e1_blk, e2_blk; 5599 ext4_lblk_t e1_blk, e2_blk; 5661 int e1_len, e2_len, len; 5600 int e1_len, e2_len, len; 5662 int split = 0; 5601 int split = 0; 5663 5602 5664 path1 = ext4_find_extent(inod !! 5603 path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE); 5665 if (IS_ERR(path1)) { 5604 if (IS_ERR(path1)) { 5666 *erp = PTR_ERR(path1) 5605 *erp = PTR_ERR(path1); 5667 goto errout; !! 5606 path1 = NULL; >> 5607 finish: >> 5608 count = 0; >> 5609 goto repeat; 5668 } 5610 } 5669 path2 = ext4_find_extent(inod !! 5611 path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE); 5670 if (IS_ERR(path2)) { 5612 if (IS_ERR(path2)) { 5671 *erp = PTR_ERR(path2) 5613 *erp = PTR_ERR(path2); 5672 goto errout; !! 5614 path2 = NULL; >> 5615 goto finish; 5673 } 5616 } 5674 ex1 = path1[path1->p_depth].p 5617 ex1 = path1[path1->p_depth].p_ext; 5675 ex2 = path2[path2->p_depth].p 5618 ex2 = path2[path2->p_depth].p_ext; 5676 /* Do we have something to sw 5619 /* Do we have something to swap ? */ 5677 if (unlikely(!ex2 || !ex1)) 5620 if (unlikely(!ex2 || !ex1)) 5678 goto errout; !! 5621 goto finish; 5679 5622 5680 e1_blk = le32_to_cpu(ex1->ee_ 5623 e1_blk = le32_to_cpu(ex1->ee_block); 5681 e2_blk = le32_to_cpu(ex2->ee_ 5624 e2_blk = le32_to_cpu(ex2->ee_block); 5682 e1_len = ext4_ext_get_actual_ 5625 e1_len = ext4_ext_get_actual_len(ex1); 5683 e2_len = ext4_ext_get_actual_ 5626 e2_len = ext4_ext_get_actual_len(ex2); 5684 5627 5685 /* Hole handling */ 5628 /* Hole handling */ 5686 if (!in_range(lblk1, e1_blk, 5629 if (!in_range(lblk1, e1_blk, e1_len) || 5687 !in_range(lblk2, e2_blk, 5630 !in_range(lblk2, e2_blk, e2_len)) { 5688 ext4_lblk_t next1, ne 5631 ext4_lblk_t next1, next2; 5689 5632 5690 /* if hole after exte 5633 /* if hole after extent, then go to next extent */ 5691 next1 = ext4_ext_next 5634 next1 = ext4_ext_next_allocated_block(path1); 5692 next2 = ext4_ext_next 5635 next2 = ext4_ext_next_allocated_block(path2); 5693 /* If hole before ext 5636 /* If hole before extent, then shift to that extent */ 5694 if (e1_blk > lblk1) 5637 if (e1_blk > lblk1) 5695 next1 = e1_bl 5638 next1 = e1_blk; 5696 if (e2_blk > lblk2) 5639 if (e2_blk > lblk2) 5697 next2 = e2_bl 5640 next2 = e2_blk; 5698 /* Do we have somethi 5641 /* Do we have something to swap */ 5699 if (next1 == EXT_MAX_ 5642 if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS) 5700 goto errout; !! 5643 goto finish; 5701 /* Move to the righte 5644 /* Move to the rightest boundary */ 5702 len = next1 - lblk1; 5645 len = next1 - lblk1; 5703 if (len < next2 - lbl 5646 if (len < next2 - lblk2) 5704 len = next2 - 5647 len = next2 - lblk2; 5705 if (len > count) 5648 if (len > count) 5706 len = count; 5649 len = count; 5707 lblk1 += len; 5650 lblk1 += len; 5708 lblk2 += len; 5651 lblk2 += len; 5709 count -= len; 5652 count -= len; 5710 continue; !! 5653 goto repeat; 5711 } 5654 } 5712 5655 5713 /* Prepare left boundary */ 5656 /* Prepare left boundary */ 5714 if (e1_blk < lblk1) { 5657 if (e1_blk < lblk1) { 5715 split = 1; 5658 split = 1; 5716 path1 = ext4_force_sp !! 5659 *erp = ext4_force_split_extent_at(handle, inode1, 5717 !! 5660 &path1, lblk1, 0); 5718 if (IS_ERR(path1)) { !! 5661 if (unlikely(*erp)) 5719 *erp = PTR_ER !! 5662 goto finish; 5720 goto errout; << 5721 } << 5722 } 5663 } 5723 if (e2_blk < lblk2) { 5664 if (e2_blk < lblk2) { 5724 split = 1; 5665 split = 1; 5725 path2 = ext4_force_sp !! 5666 *erp = ext4_force_split_extent_at(handle, inode2, 5726 !! 5667 &path2, lblk2, 0); 5727 if (IS_ERR(path2)) { !! 5668 if (unlikely(*erp)) 5728 *erp = PTR_ER !! 5669 goto finish; 5729 goto errout; << 5730 } << 5731 } 5670 } 5732 /* ext4_split_extent_at() may 5671 /* ext4_split_extent_at() may result in leaf extent split, 5733 * path must to be revalidate 5672 * path must to be revalidated. */ 5734 if (split) 5673 if (split) 5735 continue; !! 5674 goto repeat; 5736 5675 5737 /* Prepare right boundary */ 5676 /* Prepare right boundary */ 5738 len = count; 5677 len = count; 5739 if (len > e1_blk + e1_len - l 5678 if (len > e1_blk + e1_len - lblk1) 5740 len = e1_blk + e1_len 5679 len = e1_blk + e1_len - lblk1; 5741 if (len > e2_blk + e2_len - l 5680 if (len > e2_blk + e2_len - lblk2) 5742 len = e2_blk + e2_len 5681 len = e2_blk + e2_len - lblk2; 5743 5682 5744 if (len != e1_len) { 5683 if (len != e1_len) { 5745 split = 1; 5684 split = 1; 5746 path1 = ext4_force_sp !! 5685 *erp = ext4_force_split_extent_at(handle, inode1, 5747 !! 5686 &path1, lblk1 + len, 0); 5748 if (IS_ERR(path1)) { !! 5687 if (unlikely(*erp)) 5749 *erp = PTR_ER !! 5688 goto finish; 5750 goto errout; << 5751 } << 5752 } 5689 } 5753 if (len != e2_len) { 5690 if (len != e2_len) { 5754 split = 1; 5691 split = 1; 5755 path2 = ext4_force_sp !! 5692 *erp = ext4_force_split_extent_at(handle, inode2, 5756 !! 5693 &path2, lblk2 + len, 0); 5757 if (IS_ERR(path2)) { !! 5694 if (*erp) 5758 *erp = PTR_ER !! 5695 goto finish; 5759 goto errout; << 5760 } << 5761 } 5696 } 5762 /* ext4_split_extent_at() may 5697 /* ext4_split_extent_at() may result in leaf extent split, 5763 * path must to be revalidate 5698 * path must to be revalidated. */ 5764 if (split) 5699 if (split) 5765 continue; !! 5700 goto repeat; 5766 5701 5767 BUG_ON(e2_len != e1_len); 5702 BUG_ON(e2_len != e1_len); 5768 *erp = ext4_ext_get_access(ha 5703 *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth); 5769 if (unlikely(*erp)) 5704 if (unlikely(*erp)) 5770 goto errout; !! 5705 goto finish; 5771 *erp = ext4_ext_get_access(ha 5706 *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth); 5772 if (unlikely(*erp)) 5707 if (unlikely(*erp)) 5773 goto errout; !! 5708 goto finish; 5774 5709 5775 /* Both extents are fully ins 5710 /* Both extents are fully inside boundaries. Swap it now */ 5776 tmp_ex = *ex1; 5711 tmp_ex = *ex1; 5777 ext4_ext_store_pblock(ex1, ex 5712 ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2)); 5778 ext4_ext_store_pblock(ex2, ex 5713 ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex)); 5779 ex1->ee_len = cpu_to_le16(e2_ 5714 ex1->ee_len = cpu_to_le16(e2_len); 5780 ex2->ee_len = cpu_to_le16(e1_ 5715 ex2->ee_len = cpu_to_le16(e1_len); 5781 if (unwritten) 5716 if (unwritten) 5782 ext4_ext_mark_unwritt 5717 ext4_ext_mark_unwritten(ex2); 5783 if (ext4_ext_is_unwritten(&tm 5718 if (ext4_ext_is_unwritten(&tmp_ex)) 5784 ext4_ext_mark_unwritt 5719 ext4_ext_mark_unwritten(ex1); 5785 5720 5786 ext4_ext_try_to_merge(handle, 5721 ext4_ext_try_to_merge(handle, inode2, path2, ex2); 5787 ext4_ext_try_to_merge(handle, 5722 ext4_ext_try_to_merge(handle, inode1, path1, ex1); 5788 *erp = ext4_ext_dirty(handle, 5723 *erp = ext4_ext_dirty(handle, inode2, path2 + 5789 path2-> 5724 path2->p_depth); 5790 if (unlikely(*erp)) 5725 if (unlikely(*erp)) 5791 goto errout; !! 5726 goto finish; 5792 *erp = ext4_ext_dirty(handle, 5727 *erp = ext4_ext_dirty(handle, inode1, path1 + 5793 path1-> 5728 path1->p_depth); 5794 /* 5729 /* 5795 * Looks scarry ah..? second 5730 * Looks scarry ah..? second inode already points to new blocks, 5796 * and it was successfully di 5731 * and it was successfully dirtied. But luckily error may happen 5797 * only due to journal error, 5732 * only due to journal error, so full transaction will be 5798 * aborted anyway. 5733 * aborted anyway. 5799 */ 5734 */ 5800 if (unlikely(*erp)) 5735 if (unlikely(*erp)) 5801 goto errout; !! 5736 goto finish; 5802 << 5803 lblk1 += len; 5737 lblk1 += len; 5804 lblk2 += len; 5738 lblk2 += len; 5805 replaced_count += len; 5739 replaced_count += len; 5806 count -= len; 5740 count -= len; 5807 } << 5808 5741 5809 errout: !! 5742 repeat: 5810 ext4_free_ext_path(path1); !! 5743 ext4_free_ext_path(path1); 5811 ext4_free_ext_path(path2); !! 5744 ext4_free_ext_path(path2); >> 5745 path1 = path2 = NULL; >> 5746 } 5812 return replaced_count; 5747 return replaced_count; 5813 } 5748 } 5814 5749 5815 /* 5750 /* 5816 * ext4_clu_mapped - determine whether any bl 5751 * ext4_clu_mapped - determine whether any block in a logical cluster has 5817 * been mapped to a physica 5752 * been mapped to a physical cluster 5818 * 5753 * 5819 * @inode - file containing the logical clust 5754 * @inode - file containing the logical cluster 5820 * @lclu - logical cluster of interest 5755 * @lclu - logical cluster of interest 5821 * 5756 * 5822 * Returns 1 if any block in the logical clus 5757 * Returns 1 if any block in the logical cluster is mapped, signifying 5823 * that a physical cluster has been allocated 5758 * that a physical cluster has been allocated for it. Otherwise, 5824 * returns 0. Can also return negative error 5759 * returns 0. Can also return negative error codes. Derived from 5825 * ext4_ext_map_blocks(). 5760 * ext4_ext_map_blocks(). 5826 */ 5761 */ 5827 int ext4_clu_mapped(struct inode *inode, ext4 5762 int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu) 5828 { 5763 { 5829 struct ext4_sb_info *sbi = EXT4_SB(in 5764 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5830 struct ext4_ext_path *path; 5765 struct ext4_ext_path *path; 5831 int depth, mapped = 0, err = 0; 5766 int depth, mapped = 0, err = 0; 5832 struct ext4_extent *extent; 5767 struct ext4_extent *extent; 5833 ext4_lblk_t first_lblk, first_lclu, l 5768 ext4_lblk_t first_lblk, first_lclu, last_lclu; 5834 5769 5835 /* 5770 /* 5836 * if data can be stored inline, the 5771 * if data can be stored inline, the logical cluster isn't 5837 * mapped - no physical clusters have 5772 * mapped - no physical clusters have been allocated, and the 5838 * file has no extents 5773 * file has no extents 5839 */ 5774 */ 5840 if (ext4_test_inode_state(inode, EXT4 5775 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) || 5841 ext4_has_inline_data(inode)) 5776 ext4_has_inline_data(inode)) 5842 return 0; 5777 return 0; 5843 5778 5844 /* search for the extent closest to t 5779 /* search for the extent closest to the first block in the cluster */ 5845 path = ext4_find_extent(inode, EXT4_C 5780 path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0); 5846 if (IS_ERR(path)) !! 5781 if (IS_ERR(path)) { 5847 return PTR_ERR(path); !! 5782 err = PTR_ERR(path); >> 5783 path = NULL; >> 5784 goto out; >> 5785 } 5848 5786 5849 depth = ext_depth(inode); 5787 depth = ext_depth(inode); 5850 5788 5851 /* 5789 /* 5852 * A consistent leaf must not be empt 5790 * A consistent leaf must not be empty. This situation is possible, 5853 * though, _during_ tree modification 5791 * though, _during_ tree modification, and it's why an assert can't 5854 * be put in ext4_find_extent(). 5792 * be put in ext4_find_extent(). 5855 */ 5793 */ 5856 if (unlikely(path[depth].p_ext == NUL 5794 if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 5857 EXT4_ERROR_INODE(inode, 5795 EXT4_ERROR_INODE(inode, 5858 "bad extent address - lbl 5796 "bad extent address - lblock: %lu, depth: %d, pblock: %lld", 5859 (unsigned lo 5797 (unsigned long) EXT4_C2B(sbi, lclu), 5860 depth, path[ 5798 depth, path[depth].p_block); 5861 err = -EFSCORRUPTED; 5799 err = -EFSCORRUPTED; 5862 goto out; 5800 goto out; 5863 } 5801 } 5864 5802 5865 extent = path[depth].p_ext; 5803 extent = path[depth].p_ext; 5866 5804 5867 /* can't be mapped if the extent tree 5805 /* can't be mapped if the extent tree is empty */ 5868 if (extent == NULL) 5806 if (extent == NULL) 5869 goto out; 5807 goto out; 5870 5808 5871 first_lblk = le32_to_cpu(extent->ee_b 5809 first_lblk = le32_to_cpu(extent->ee_block); 5872 first_lclu = EXT4_B2C(sbi, first_lblk 5810 first_lclu = EXT4_B2C(sbi, first_lblk); 5873 5811 5874 /* 5812 /* 5875 * Three possible outcomes at this po 5813 * Three possible outcomes at this point - found extent spanning 5876 * the target cluster, to the left of 5814 * the target cluster, to the left of the target cluster, or to the 5877 * right of the target cluster. The 5815 * right of the target cluster. The first two cases are handled here. 5878 * The last case indicates the target 5816 * The last case indicates the target cluster is not mapped. 5879 */ 5817 */ 5880 if (lclu >= first_lclu) { 5818 if (lclu >= first_lclu) { 5881 last_lclu = EXT4_B2C(sbi, fir 5819 last_lclu = EXT4_B2C(sbi, first_lblk + 5882 ext4_ext 5820 ext4_ext_get_actual_len(extent) - 1); 5883 if (lclu <= last_lclu) { 5821 if (lclu <= last_lclu) { 5884 mapped = 1; 5822 mapped = 1; 5885 } else { 5823 } else { 5886 first_lblk = ext4_ext 5824 first_lblk = ext4_ext_next_allocated_block(path); 5887 first_lclu = EXT4_B2C 5825 first_lclu = EXT4_B2C(sbi, first_lblk); 5888 if (lclu == first_lcl 5826 if (lclu == first_lclu) 5889 mapped = 1; 5827 mapped = 1; 5890 } 5828 } 5891 } 5829 } 5892 5830 5893 out: 5831 out: 5894 ext4_free_ext_path(path); 5832 ext4_free_ext_path(path); 5895 5833 5896 return err ? err : mapped; 5834 return err ? err : mapped; 5897 } 5835 } 5898 5836 5899 /* 5837 /* 5900 * Updates physical block address and unwritt 5838 * Updates physical block address and unwritten status of extent 5901 * starting at lblk start and of len. If such 5839 * starting at lblk start and of len. If such an extent doesn't exist, 5902 * this function splits the extent tree appro 5840 * this function splits the extent tree appropriately to create an 5903 * extent like this. This function is called 5841 * extent like this. This function is called in the fast commit 5904 * replay path. Returns 0 on success and err 5842 * replay path. Returns 0 on success and error on failure. 5905 */ 5843 */ 5906 int ext4_ext_replay_update_ex(struct inode *i 5844 int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start, 5907 int len, int un 5845 int len, int unwritten, ext4_fsblk_t pblk) 5908 { 5846 { 5909 struct ext4_ext_path *path; !! 5847 struct ext4_ext_path *path = NULL, *ppath; 5910 struct ext4_extent *ex; 5848 struct ext4_extent *ex; 5911 int ret; 5849 int ret; 5912 5850 5913 path = ext4_find_extent(inode, start, 5851 path = ext4_find_extent(inode, start, NULL, 0); 5914 if (IS_ERR(path)) 5852 if (IS_ERR(path)) 5915 return PTR_ERR(path); 5853 return PTR_ERR(path); 5916 ex = path[path->p_depth].p_ext; 5854 ex = path[path->p_depth].p_ext; 5917 if (!ex) { 5855 if (!ex) { 5918 ret = -EFSCORRUPTED; 5856 ret = -EFSCORRUPTED; 5919 goto out; 5857 goto out; 5920 } 5858 } 5921 5859 5922 if (le32_to_cpu(ex->ee_block) != star 5860 if (le32_to_cpu(ex->ee_block) != start || 5923 ext4_ext_get_actual_len(ex) ! 5861 ext4_ext_get_actual_len(ex) != len) { 5924 /* We need to split this exte 5862 /* We need to split this extent to match our extent first */ >> 5863 ppath = path; 5925 down_write(&EXT4_I(inode)->i_ 5864 down_write(&EXT4_I(inode)->i_data_sem); 5926 path = ext4_force_split_exten !! 5865 ret = ext4_force_split_extent_at(NULL, inode, &ppath, start, 1); 5927 up_write(&EXT4_I(inode)->i_da 5866 up_write(&EXT4_I(inode)->i_data_sem); 5928 if (IS_ERR(path)) { !! 5867 if (ret) 5929 ret = PTR_ERR(path); << 5930 goto out; 5868 goto out; 5931 } !! 5869 kfree(path); 5932 !! 5870 path = ext4_find_extent(inode, start, NULL, 0); 5933 path = ext4_find_extent(inode << 5934 if (IS_ERR(path)) 5871 if (IS_ERR(path)) 5935 return PTR_ERR(path); !! 5872 return -1; 5936 !! 5873 ppath = path; 5937 ex = path[path->p_depth].p_ex 5874 ex = path[path->p_depth].p_ext; 5938 WARN_ON(le32_to_cpu(ex->ee_bl 5875 WARN_ON(le32_to_cpu(ex->ee_block) != start); 5939 << 5940 if (ext4_ext_get_actual_len(e 5876 if (ext4_ext_get_actual_len(ex) != len) { 5941 down_write(&EXT4_I(in 5877 down_write(&EXT4_I(inode)->i_data_sem); 5942 path = ext4_force_spl !! 5878 ret = ext4_force_split_extent_at(NULL, inode, &ppath, 5943 !! 5879 start + len, 1); 5944 up_write(&EXT4_I(inod 5880 up_write(&EXT4_I(inode)->i_data_sem); 5945 if (IS_ERR(path)) { !! 5881 if (ret) 5946 ret = PTR_ERR << 5947 goto out; 5882 goto out; 5948 } !! 5883 kfree(path); 5949 !! 5884 path = ext4_find_extent(inode, start, NULL, 0); 5950 path = ext4_find_exte << 5951 if (IS_ERR(path)) 5885 if (IS_ERR(path)) 5952 return PTR_ER !! 5886 return -EINVAL; 5953 ex = path[path->p_dep 5887 ex = path[path->p_depth].p_ext; 5954 } 5888 } 5955 } 5889 } 5956 if (unwritten) 5890 if (unwritten) 5957 ext4_ext_mark_unwritten(ex); 5891 ext4_ext_mark_unwritten(ex); 5958 else 5892 else 5959 ext4_ext_mark_initialized(ex) 5893 ext4_ext_mark_initialized(ex); 5960 ext4_ext_store_pblock(ex, pblk); 5894 ext4_ext_store_pblock(ex, pblk); 5961 down_write(&EXT4_I(inode)->i_data_sem 5895 down_write(&EXT4_I(inode)->i_data_sem); 5962 ret = ext4_ext_dirty(NULL, inode, &pa 5896 ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]); 5963 up_write(&EXT4_I(inode)->i_data_sem); 5897 up_write(&EXT4_I(inode)->i_data_sem); 5964 out: 5898 out: 5965 ext4_free_ext_path(path); 5899 ext4_free_ext_path(path); 5966 ext4_mark_inode_dirty(NULL, inode); 5900 ext4_mark_inode_dirty(NULL, inode); 5967 return ret; 5901 return ret; 5968 } 5902 } 5969 5903 5970 /* Try to shrink the extent tree */ 5904 /* Try to shrink the extent tree */ 5971 void ext4_ext_replay_shrink_inode(struct inod 5905 void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end) 5972 { 5906 { 5973 struct ext4_ext_path *path = NULL; 5907 struct ext4_ext_path *path = NULL; 5974 struct ext4_extent *ex; 5908 struct ext4_extent *ex; 5975 ext4_lblk_t old_cur, cur = 0; 5909 ext4_lblk_t old_cur, cur = 0; 5976 5910 5977 while (cur < end) { 5911 while (cur < end) { 5978 path = ext4_find_extent(inode 5912 path = ext4_find_extent(inode, cur, NULL, 0); 5979 if (IS_ERR(path)) 5913 if (IS_ERR(path)) 5980 return; 5914 return; 5981 ex = path[path->p_depth].p_ex 5915 ex = path[path->p_depth].p_ext; 5982 if (!ex) { 5916 if (!ex) { 5983 ext4_free_ext_path(pa 5917 ext4_free_ext_path(path); 5984 ext4_mark_inode_dirty 5918 ext4_mark_inode_dirty(NULL, inode); 5985 return; 5919 return; 5986 } 5920 } 5987 old_cur = cur; 5921 old_cur = cur; 5988 cur = le32_to_cpu(ex->ee_bloc 5922 cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 5989 if (cur <= old_cur) 5923 if (cur <= old_cur) 5990 cur = old_cur + 1; 5924 cur = old_cur + 1; 5991 ext4_ext_try_to_merge(NULL, i 5925 ext4_ext_try_to_merge(NULL, inode, path, ex); 5992 down_write(&EXT4_I(inode)->i_ 5926 down_write(&EXT4_I(inode)->i_data_sem); 5993 ext4_ext_dirty(NULL, inode, & 5927 ext4_ext_dirty(NULL, inode, &path[path->p_depth]); 5994 up_write(&EXT4_I(inode)->i_da 5928 up_write(&EXT4_I(inode)->i_data_sem); 5995 ext4_mark_inode_dirty(NULL, i 5929 ext4_mark_inode_dirty(NULL, inode); 5996 ext4_free_ext_path(path); 5930 ext4_free_ext_path(path); 5997 } 5931 } 5998 } 5932 } 5999 5933 6000 /* Check if *cur is a hole and if it is, skip 5934 /* Check if *cur is a hole and if it is, skip it */ 6001 static int skip_hole(struct inode *inode, ext 5935 static int skip_hole(struct inode *inode, ext4_lblk_t *cur) 6002 { 5936 { 6003 int ret; 5937 int ret; 6004 struct ext4_map_blocks map; 5938 struct ext4_map_blocks map; 6005 5939 6006 map.m_lblk = *cur; 5940 map.m_lblk = *cur; 6007 map.m_len = ((inode->i_size) >> inode 5941 map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur; 6008 5942 6009 ret = ext4_map_blocks(NULL, inode, &m 5943 ret = ext4_map_blocks(NULL, inode, &map, 0); 6010 if (ret < 0) 5944 if (ret < 0) 6011 return ret; 5945 return ret; 6012 if (ret != 0) 5946 if (ret != 0) 6013 return 0; 5947 return 0; 6014 *cur = *cur + map.m_len; 5948 *cur = *cur + map.m_len; 6015 return 0; 5949 return 0; 6016 } 5950 } 6017 5951 6018 /* Count number of blocks used by this inode 5952 /* Count number of blocks used by this inode and update i_blocks */ 6019 int ext4_ext_replay_set_iblocks(struct inode 5953 int ext4_ext_replay_set_iblocks(struct inode *inode) 6020 { 5954 { 6021 struct ext4_ext_path *path = NULL, *p 5955 struct ext4_ext_path *path = NULL, *path2 = NULL; 6022 struct ext4_extent *ex; 5956 struct ext4_extent *ex; 6023 ext4_lblk_t cur = 0, end; 5957 ext4_lblk_t cur = 0, end; 6024 int numblks = 0, i, ret = 0; 5958 int numblks = 0, i, ret = 0; 6025 ext4_fsblk_t cmp1, cmp2; 5959 ext4_fsblk_t cmp1, cmp2; 6026 struct ext4_map_blocks map; 5960 struct ext4_map_blocks map; 6027 5961 6028 /* Determin the size of the file firs 5962 /* Determin the size of the file first */ 6029 path = ext4_find_extent(inode, EXT_MA 5963 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 6030 EXT4_ 5964 EXT4_EX_NOCACHE); 6031 if (IS_ERR(path)) 5965 if (IS_ERR(path)) 6032 return PTR_ERR(path); 5966 return PTR_ERR(path); 6033 ex = path[path->p_depth].p_ext; 5967 ex = path[path->p_depth].p_ext; 6034 if (!ex) !! 5968 if (!ex) { >> 5969 ext4_free_ext_path(path); 6035 goto out; 5970 goto out; >> 5971 } 6036 end = le32_to_cpu(ex->ee_block) + ext 5972 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); >> 5973 ext4_free_ext_path(path); 6037 5974 6038 /* Count the number of data blocks */ 5975 /* Count the number of data blocks */ 6039 cur = 0; 5976 cur = 0; 6040 while (cur < end) { 5977 while (cur < end) { 6041 map.m_lblk = cur; 5978 map.m_lblk = cur; 6042 map.m_len = end - cur; 5979 map.m_len = end - cur; 6043 ret = ext4_map_blocks(NULL, i 5980 ret = ext4_map_blocks(NULL, inode, &map, 0); 6044 if (ret < 0) 5981 if (ret < 0) 6045 break; 5982 break; 6046 if (ret > 0) 5983 if (ret > 0) 6047 numblks += ret; 5984 numblks += ret; 6048 cur = cur + map.m_len; 5985 cur = cur + map.m_len; 6049 } 5986 } 6050 5987 6051 /* 5988 /* 6052 * Count the number of extent tree bl 5989 * Count the number of extent tree blocks. We do it by looking up 6053 * two successive extents and determi 5990 * two successive extents and determining the difference between 6054 * their paths. When path is differen 5991 * their paths. When path is different for 2 successive extents 6055 * we compare the blocks in the path 5992 * we compare the blocks in the path at each level and increment 6056 * iblocks by total number of differe 5993 * iblocks by total number of differences found. 6057 */ 5994 */ 6058 cur = 0; 5995 cur = 0; 6059 ret = skip_hole(inode, &cur); 5996 ret = skip_hole(inode, &cur); 6060 if (ret < 0) 5997 if (ret < 0) 6061 goto out; 5998 goto out; 6062 path = ext4_find_extent(inode, cur, p !! 5999 path = ext4_find_extent(inode, cur, NULL, 0); 6063 if (IS_ERR(path)) 6000 if (IS_ERR(path)) 6064 goto out; 6001 goto out; 6065 numblks += path->p_depth; 6002 numblks += path->p_depth; >> 6003 ext4_free_ext_path(path); 6066 while (cur < end) { 6004 while (cur < end) { 6067 path = ext4_find_extent(inode !! 6005 path = ext4_find_extent(inode, cur, NULL, 0); 6068 if (IS_ERR(path)) 6006 if (IS_ERR(path)) 6069 break; 6007 break; 6070 ex = path[path->p_depth].p_ex 6008 ex = path[path->p_depth].p_ext; 6071 if (!ex) !! 6009 if (!ex) { 6072 goto cleanup; !! 6010 ext4_free_ext_path(path); 6073 !! 6011 return 0; >> 6012 } 6074 cur = max(cur + 1, le32_to_cp 6013 cur = max(cur + 1, le32_to_cpu(ex->ee_block) + 6075 ext4_ 6014 ext4_ext_get_actual_len(ex)); 6076 ret = skip_hole(inode, &cur); 6015 ret = skip_hole(inode, &cur); 6077 if (ret < 0) !! 6016 if (ret < 0) { >> 6017 ext4_free_ext_path(path); 6078 break; 6018 break; 6079 !! 6019 } 6080 path2 = ext4_find_extent(inod !! 6020 path2 = ext4_find_extent(inode, cur, NULL, 0); 6081 if (IS_ERR(path2)) !! 6021 if (IS_ERR(path2)) { >> 6022 ext4_free_ext_path(path); 6082 break; 6023 break; 6083 !! 6024 } 6084 for (i = 0; i <= max(path->p_ 6025 for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) { 6085 cmp1 = cmp2 = 0; 6026 cmp1 = cmp2 = 0; 6086 if (i <= path->p_dept 6027 if (i <= path->p_depth) 6087 cmp1 = path[i 6028 cmp1 = path[i].p_bh ? 6088 path[ 6029 path[i].p_bh->b_blocknr : 0; 6089 if (i <= path2->p_dep 6030 if (i <= path2->p_depth) 6090 cmp2 = path2[ 6031 cmp2 = path2[i].p_bh ? 6091 path2 6032 path2[i].p_bh->b_blocknr : 0; 6092 if (cmp1 != cmp2 && c 6033 if (cmp1 != cmp2 && cmp2 != 0) 6093 numblks++; 6034 numblks++; 6094 } 6035 } >> 6036 ext4_free_ext_path(path); >> 6037 ext4_free_ext_path(path2); 6095 } 6038 } 6096 6039 6097 out: 6040 out: 6098 inode->i_blocks = numblks << (inode-> 6041 inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9); 6099 ext4_mark_inode_dirty(NULL, inode); 6042 ext4_mark_inode_dirty(NULL, inode); 6100 cleanup: << 6101 ext4_free_ext_path(path); << 6102 ext4_free_ext_path(path2); << 6103 return 0; 6043 return 0; 6104 } 6044 } 6105 6045 6106 int ext4_ext_clear_bb(struct inode *inode) 6046 int ext4_ext_clear_bb(struct inode *inode) 6107 { 6047 { 6108 struct ext4_ext_path *path = NULL; 6048 struct ext4_ext_path *path = NULL; 6109 struct ext4_extent *ex; 6049 struct ext4_extent *ex; 6110 ext4_lblk_t cur = 0, end; 6050 ext4_lblk_t cur = 0, end; 6111 int j, ret = 0; 6051 int j, ret = 0; 6112 struct ext4_map_blocks map; 6052 struct ext4_map_blocks map; 6113 6053 6114 if (ext4_test_inode_flag(inode, EXT4_ 6054 if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA)) 6115 return 0; 6055 return 0; 6116 6056 6117 /* Determin the size of the file firs 6057 /* Determin the size of the file first */ 6118 path = ext4_find_extent(inode, EXT_MA 6058 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 6119 EXT4_ 6059 EXT4_EX_NOCACHE); 6120 if (IS_ERR(path)) 6060 if (IS_ERR(path)) 6121 return PTR_ERR(path); 6061 return PTR_ERR(path); 6122 ex = path[path->p_depth].p_ext; 6062 ex = path[path->p_depth].p_ext; 6123 if (!ex) !! 6063 if (!ex) { 6124 goto out; !! 6064 ext4_free_ext_path(path); >> 6065 return 0; >> 6066 } 6125 end = le32_to_cpu(ex->ee_block) + ext 6067 end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); >> 6068 ext4_free_ext_path(path); 6126 6069 6127 cur = 0; 6070 cur = 0; 6128 while (cur < end) { 6071 while (cur < end) { 6129 map.m_lblk = cur; 6072 map.m_lblk = cur; 6130 map.m_len = end - cur; 6073 map.m_len = end - cur; 6131 ret = ext4_map_blocks(NULL, i 6074 ret = ext4_map_blocks(NULL, inode, &map, 0); 6132 if (ret < 0) 6075 if (ret < 0) 6133 break; 6076 break; 6134 if (ret > 0) { 6077 if (ret > 0) { 6135 path = ext4_find_exte !! 6078 path = ext4_find_extent(inode, map.m_lblk, NULL, 0); 6136 if (!IS_ERR(path)) { !! 6079 if (!IS_ERR_OR_NULL(path)) { 6137 for (j = 0; j 6080 for (j = 0; j < path->p_depth; j++) { >> 6081 6138 ext4_ 6082 ext4_mb_mark_bb(inode->i_sb, 6139 !! 6083 path[j].p_block, 1, 0); 6140 ext4_ 6084 ext4_fc_record_regions(inode->i_sb, inode->i_ino, 6141 6085 0, path[j].p_block, 1, 1); 6142 } 6086 } 6143 } else { !! 6087 ext4_free_ext_path(path); 6144 path = NULL; << 6145 } 6088 } 6146 ext4_mb_mark_bb(inode !! 6089 ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0); 6147 ext4_fc_record_region 6090 ext4_fc_record_regions(inode->i_sb, inode->i_ino, 6148 map.m 6091 map.m_lblk, map.m_pblk, map.m_len, 1); 6149 } 6092 } 6150 cur = cur + map.m_len; 6093 cur = cur + map.m_len; 6151 } 6094 } 6152 6095 6153 out: << 6154 ext4_free_ext_path(path); << 6155 return 0; 6096 return 0; 6156 } 6097 } 6157 6098
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.