~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/ext4/extents.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /fs/ext4/extents.c (Version linux-6.12-rc7) and /fs/ext4/extents.c (Version linux-4.18.20)


  1 // SPDX-License-Identifier: GPL-2.0                 1 // SPDX-License-Identifier: GPL-2.0
  2 /*                                                  2 /*
  3  * Copyright (c) 2003-2006, Cluster File Syste      3  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
  4  * Written by Alex Tomas <alex@clusterfs.com>       4  * Written by Alex Tomas <alex@clusterfs.com>
  5  *                                                  5  *
  6  * Architecture independence:                       6  * Architecture independence:
  7  *   Copyright (c) 2005, Bull S.A.                  7  *   Copyright (c) 2005, Bull S.A.
  8  *   Written by Pierre Peiffer <pierre.peiffer      8  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
  9  */                                                 9  */
 10                                                    10 
 11 /*                                                 11 /*
 12  * Extents support for EXT4                        12  * Extents support for EXT4
 13  *                                                 13  *
 14  * TODO:                                           14  * TODO:
 15  *   - ext4*_error() should be used in some si     15  *   - ext4*_error() should be used in some situations
 16  *   - analyze all BUG()/BUG_ON(), use -EIO wh     16  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
 17  *   - smart tree reduction                        17  *   - smart tree reduction
 18  */                                                18  */
 19                                                    19 
 20 #include <linux/fs.h>                              20 #include <linux/fs.h>
 21 #include <linux/time.h>                            21 #include <linux/time.h>
 22 #include <linux/jbd2.h>                            22 #include <linux/jbd2.h>
 23 #include <linux/highuid.h>                         23 #include <linux/highuid.h>
 24 #include <linux/pagemap.h>                         24 #include <linux/pagemap.h>
 25 #include <linux/quotaops.h>                        25 #include <linux/quotaops.h>
 26 #include <linux/string.h>                          26 #include <linux/string.h>
 27 #include <linux/slab.h>                            27 #include <linux/slab.h>
 28 #include <linux/uaccess.h>                         28 #include <linux/uaccess.h>
 29 #include <linux/fiemap.h>                          29 #include <linux/fiemap.h>
 30 #include <linux/iomap.h>                       !!  30 #include <linux/backing-dev.h>
 31 #include <linux/sched/mm.h>                    << 
 32 #include "ext4_jbd2.h"                             31 #include "ext4_jbd2.h"
 33 #include "ext4_extents.h"                          32 #include "ext4_extents.h"
 34 #include "xattr.h"                                 33 #include "xattr.h"
 35                                                    34 
 36 #include <trace/events/ext4.h>                     35 #include <trace/events/ext4.h>
 37                                                    36 
 38 /*                                                 37 /*
 39  * used by extent splitting.                       38  * used by extent splitting.
 40  */                                                39  */
 41 #define EXT4_EXT_MAY_ZEROOUT    0x1  /* safe t     40 #define EXT4_EXT_MAY_ZEROOUT    0x1  /* safe to zeroout if split fails \
 42                                         due to     41                                         due to ENOSPC */
 43 #define EXT4_EXT_MARK_UNWRIT1   0x2  /* mark f     42 #define EXT4_EXT_MARK_UNWRIT1   0x2  /* mark first half unwritten */
 44 #define EXT4_EXT_MARK_UNWRIT2   0x4  /* mark s     43 #define EXT4_EXT_MARK_UNWRIT2   0x4  /* mark second half unwritten */
 45                                                    44 
 46 #define EXT4_EXT_DATA_VALID1    0x8  /* first      45 #define EXT4_EXT_DATA_VALID1    0x8  /* first half contains valid data */
 47 #define EXT4_EXT_DATA_VALID2    0x10 /* second     46 #define EXT4_EXT_DATA_VALID2    0x10 /* second half contains valid data */
 48                                                    47 
 49 static __le32 ext4_extent_block_csum(struct in     48 static __le32 ext4_extent_block_csum(struct inode *inode,
 50                                      struct ex     49                                      struct ext4_extent_header *eh)
 51 {                                                  50 {
 52         struct ext4_inode_info *ei = EXT4_I(in     51         struct ext4_inode_info *ei = EXT4_I(inode);
 53         struct ext4_sb_info *sbi = EXT4_SB(ino     52         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 54         __u32 csum;                                53         __u32 csum;
 55                                                    54 
 56         csum = ext4_chksum(sbi, ei->i_csum_see     55         csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
 57                            EXT4_EXTENT_TAIL_OF     56                            EXT4_EXTENT_TAIL_OFFSET(eh));
 58         return cpu_to_le32(csum);                  57         return cpu_to_le32(csum);
 59 }                                                  58 }
 60                                                    59 
 61 static int ext4_extent_block_csum_verify(struc     60 static int ext4_extent_block_csum_verify(struct inode *inode,
 62                                          struc     61                                          struct ext4_extent_header *eh)
 63 {                                                  62 {
 64         struct ext4_extent_tail *et;               63         struct ext4_extent_tail *et;
 65                                                    64 
 66         if (!ext4_has_metadata_csum(inode->i_s     65         if (!ext4_has_metadata_csum(inode->i_sb))
 67                 return 1;                          66                 return 1;
 68                                                    67 
 69         et = find_ext4_extent_tail(eh);            68         et = find_ext4_extent_tail(eh);
 70         if (et->et_checksum != ext4_extent_blo     69         if (et->et_checksum != ext4_extent_block_csum(inode, eh))
 71                 return 0;                          70                 return 0;
 72         return 1;                                  71         return 1;
 73 }                                                  72 }
 74                                                    73 
 75 static void ext4_extent_block_csum_set(struct      74 static void ext4_extent_block_csum_set(struct inode *inode,
 76                                        struct      75                                        struct ext4_extent_header *eh)
 77 {                                                  76 {
 78         struct ext4_extent_tail *et;               77         struct ext4_extent_tail *et;
 79                                                    78 
 80         if (!ext4_has_metadata_csum(inode->i_s     79         if (!ext4_has_metadata_csum(inode->i_sb))
 81                 return;                            80                 return;
 82                                                    81 
 83         et = find_ext4_extent_tail(eh);            82         et = find_ext4_extent_tail(eh);
 84         et->et_checksum = ext4_extent_block_cs     83         et->et_checksum = ext4_extent_block_csum(inode, eh);
 85 }                                                  84 }
 86                                                    85 
 87 static struct ext4_ext_path *ext4_split_extent !!  86 static int ext4_split_extent(handle_t *handle,
 88                                                !!  87                                 struct inode *inode,
 89                                                !!  88                                 struct ext4_ext_path **ppath,
 90                                                !!  89                                 struct ext4_map_blocks *map,
 91                                                !!  90                                 int split_flag,
 92                                                !!  91                                 int flags);
 93 static int ext4_ext_trunc_restart_fn(struct in !!  92 
                                                   >>  93 static int ext4_split_extent_at(handle_t *handle,
                                                   >>  94                              struct inode *inode,
                                                   >>  95                              struct ext4_ext_path **ppath,
                                                   >>  96                              ext4_lblk_t split,
                                                   >>  97                              int split_flag,
                                                   >>  98                              int flags);
                                                   >>  99 
                                                   >> 100 static int ext4_find_delayed_extent(struct inode *inode,
                                                   >> 101                                     struct extent_status *newes);
                                                   >> 102 
                                                   >> 103 static int ext4_ext_truncate_extend_restart(handle_t *handle,
                                                   >> 104                                             struct inode *inode,
                                                   >> 105                                             int needed)
 94 {                                                 106 {
                                                   >> 107         int err;
                                                   >> 108 
                                                   >> 109         if (!ext4_handle_valid(handle))
                                                   >> 110                 return 0;
                                                   >> 111         if (handle->h_buffer_credits >= needed)
                                                   >> 112                 return 0;
 95         /*                                        113         /*
 96          * Drop i_data_sem to avoid deadlock w !! 114          * If we need to extend the journal get a few extra blocks
 97          * moment, get_block can be called onl !! 115          * while we're at it for efficiency's sake.
 98          * page cache has been already dropped << 
 99          * i_rwsem. So we can safely drop the  << 
100          */                                       116          */
101         BUG_ON(EXT4_JOURNAL(inode) == NULL);   !! 117         needed += 3;
102         ext4_discard_preallocations(inode);    !! 118         err = ext4_journal_extend(handle, needed - handle->h_buffer_credits);
103         up_write(&EXT4_I(inode)->i_data_sem);  !! 119         if (err <= 0)
104         *dropped = 1;                          !! 120                 return err;
105         return 0;                              !! 121         err = ext4_truncate_restart_trans(handle, inode, needed);
106 }                                              !! 122         if (err == 0)
107                                                !! 123                 err = -EAGAIN;
108 static inline void ext4_ext_path_brelse(struct << 
109 {                                              << 
110         brelse(path->p_bh);                    << 
111         path->p_bh = NULL;                     << 
112 }                                              << 
113                                                << 
114 static void ext4_ext_drop_refs(struct ext4_ext << 
115 {                                              << 
116         int depth, i;                          << 
117                                                << 
118         if (IS_ERR_OR_NULL(path))              << 
119                 return;                        << 
120         depth = path->p_depth;                 << 
121         for (i = 0; i <= depth; i++, path++)   << 
122                 ext4_ext_path_brelse(path);    << 
123 }                                              << 
124                                                << 
125 void ext4_free_ext_path(struct ext4_ext_path * << 
126 {                                              << 
127         if (IS_ERR_OR_NULL(path))              << 
128                 return;                        << 
129         ext4_ext_drop_refs(path);              << 
130         kfree(path);                           << 
131 }                                              << 
132                                                << 
133 /*                                             << 
134  * Make sure 'handle' has at least 'check_cred << 
135  * transaction with 'restart_cred' credits. Th << 
136  * when restarting transaction and gets it aft << 
137  *                                             << 
138  * The function returns 0 on success, 1 if tra << 
139  * and < 0 in case of fatal error.             << 
140  */                                            << 
141 int ext4_datasem_ensure_credits(handle_t *hand << 
142                                 int check_cred << 
143                                 int revoke_cre << 
144 {                                              << 
145         int ret;                               << 
146         int dropped = 0;                       << 
147                                                   124 
148         ret = ext4_journal_ensure_credits_fn(h !! 125         return err;
149                 revoke_cred, ext4_ext_trunc_re << 
150         if (dropped)                           << 
151                 down_write(&EXT4_I(inode)->i_d << 
152         return ret;                            << 
153 }                                                 126 }
154                                                   127 
155 /*                                                128 /*
156  * could return:                                  129  * could return:
157  *  - EROFS                                       130  *  - EROFS
158  *  - ENOMEM                                      131  *  - ENOMEM
159  */                                               132  */
160 static int ext4_ext_get_access(handle_t *handl    133 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
161                                 struct ext4_ex    134                                 struct ext4_ext_path *path)
162 {                                                 135 {
163         int err = 0;                           << 
164                                                << 
165         if (path->p_bh) {                         136         if (path->p_bh) {
166                 /* path points to block */        137                 /* path points to block */
167                 BUFFER_TRACE(path->p_bh, "get_    138                 BUFFER_TRACE(path->p_bh, "get_write_access");
168                 err = ext4_journal_get_write_a !! 139                 return ext4_journal_get_write_access(handle, path->p_bh);
169                                                << 
170                 /*                             << 
171                  * The extent buffer's verifie << 
172                  * __ext4_ext_dirty(). We coul << 
173                  * buffer if the extents updat << 
174                  * to some error happens, forc << 
175                  */                            << 
176                 if (!err)                      << 
177                         clear_buffer_verified( << 
178         }                                         140         }
179         /* path points to leaf/index in inode     141         /* path points to leaf/index in inode body */
180         /* we use in-core data, no need to pro    142         /* we use in-core data, no need to protect them */
181         return err;                            !! 143         return 0;
182 }                                                 144 }
183                                                   145 
184 /*                                                146 /*
185  * could return:                                  147  * could return:
186  *  - EROFS                                       148  *  - EROFS
187  *  - ENOMEM                                      149  *  - ENOMEM
188  *  - EIO                                         150  *  - EIO
189  */                                               151  */
190 static int __ext4_ext_dirty(const char *where, !! 152 int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
191                             handle_t *handle,  !! 153                      struct inode *inode, struct ext4_ext_path *path)
192                             struct ext4_ext_pa << 
193 {                                                 154 {
194         int err;                                  155         int err;
195                                                   156 
196         WARN_ON(!rwsem_is_locked(&EXT4_I(inode    157         WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
197         if (path->p_bh) {                         158         if (path->p_bh) {
198                 ext4_extent_block_csum_set(ino    159                 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
199                 /* path points to block */        160                 /* path points to block */
200                 err = __ext4_handle_dirty_meta    161                 err = __ext4_handle_dirty_metadata(where, line, handle,
201                                                   162                                                    inode, path->p_bh);
202                 /* Extents updating done, re-s << 
203                 if (!err)                      << 
204                         set_buffer_verified(pa << 
205         } else {                                  163         } else {
206                 /* path points to leaf/index i    164                 /* path points to leaf/index in inode body */
207                 err = ext4_mark_inode_dirty(ha    165                 err = ext4_mark_inode_dirty(handle, inode);
208         }                                         166         }
209         return err;                               167         return err;
210 }                                                 168 }
211                                                   169 
212 #define ext4_ext_dirty(handle, inode, path) \  << 
213                 __ext4_ext_dirty(__func__, __L << 
214                                                << 
215 static ext4_fsblk_t ext4_ext_find_goal(struct     170 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
216                               struct ext4_ext_    171                               struct ext4_ext_path *path,
217                               ext4_lblk_t bloc    172                               ext4_lblk_t block)
218 {                                                 173 {
219         if (path) {                               174         if (path) {
220                 int depth = path->p_depth;        175                 int depth = path->p_depth;
221                 struct ext4_extent *ex;           176                 struct ext4_extent *ex;
222                                                   177 
223                 /*                                178                 /*
224                  * Try to predict block placem    179                  * Try to predict block placement assuming that we are
225                  * filling in a file which wil    180                  * filling in a file which will eventually be
226                  * non-sparse --- i.e., in the    181                  * non-sparse --- i.e., in the case of libbfd writing
227                  * an ELF object sections out-    182                  * an ELF object sections out-of-order but in a way
228                  * the eventually results in a    183                  * the eventually results in a contiguous object or
229                  * executable file, or some da    184                  * executable file, or some database extending a table
230                  * space file.  However, this     185                  * space file.  However, this is actually somewhat
231                  * non-ideal if we are writing    186                  * non-ideal if we are writing a sparse file such as
232                  * qemu or KVM writing a raw i    187                  * qemu or KVM writing a raw image file that is going
233                  * to stay fairly sparse, sinc    188                  * to stay fairly sparse, since it will end up
234                  * fragmenting the file system    189                  * fragmenting the file system's free space.  Maybe we
235                  * should have some hueristics    190                  * should have some hueristics or some way to allow
236                  * userspace to pass a hint to    191                  * userspace to pass a hint to file system,
237                  * especially if the latter ca    192                  * especially if the latter case turns out to be
238                  * common.                        193                  * common.
239                  */                               194                  */
240                 ex = path[depth].p_ext;           195                 ex = path[depth].p_ext;
241                 if (ex) {                         196                 if (ex) {
242                         ext4_fsblk_t ext_pblk     197                         ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
243                         ext4_lblk_t ext_block     198                         ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
244                                                   199 
245                         if (block > ext_block)    200                         if (block > ext_block)
246                                 return ext_pbl    201                                 return ext_pblk + (block - ext_block);
247                         else                      202                         else
248                                 return ext_pbl    203                                 return ext_pblk - (ext_block - block);
249                 }                                 204                 }
250                                                   205 
251                 /* it looks like index is empt    206                 /* it looks like index is empty;
252                  * try to find starting block     207                  * try to find starting block from index itself */
253                 if (path[depth].p_bh)             208                 if (path[depth].p_bh)
254                         return path[depth].p_b    209                         return path[depth].p_bh->b_blocknr;
255         }                                         210         }
256                                                   211 
257         /* OK. use inode's group */               212         /* OK. use inode's group */
258         return ext4_inode_to_goal_block(inode)    213         return ext4_inode_to_goal_block(inode);
259 }                                                 214 }
260                                                   215 
261 /*                                                216 /*
262  * Allocation for a meta data block               217  * Allocation for a meta data block
263  */                                               218  */
264 static ext4_fsblk_t                               219 static ext4_fsblk_t
265 ext4_ext_new_meta_block(handle_t *handle, stru    220 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
266                         struct ext4_ext_path *    221                         struct ext4_ext_path *path,
267                         struct ext4_extent *ex    222                         struct ext4_extent *ex, int *err, unsigned int flags)
268 {                                                 223 {
269         ext4_fsblk_t goal, newblock;              224         ext4_fsblk_t goal, newblock;
270                                                   225 
271         goal = ext4_ext_find_goal(inode, path,    226         goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
272         newblock = ext4_new_meta_blocks(handle    227         newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
273                                         NULL,     228                                         NULL, err);
274         return newblock;                          229         return newblock;
275 }                                                 230 }
276                                                   231 
277 static inline int ext4_ext_space_block(struct     232 static inline int ext4_ext_space_block(struct inode *inode, int check)
278 {                                                 233 {
279         int size;                                 234         int size;
280                                                   235 
281         size = (inode->i_sb->s_blocksize - siz    236         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
282                         / sizeof(struct ext4_e    237                         / sizeof(struct ext4_extent);
283 #ifdef AGGRESSIVE_TEST                            238 #ifdef AGGRESSIVE_TEST
284         if (!check && size > 6)                   239         if (!check && size > 6)
285                 size = 6;                         240                 size = 6;
286 #endif                                            241 #endif
287         return size;                              242         return size;
288 }                                                 243 }
289                                                   244 
290 static inline int ext4_ext_space_block_idx(str    245 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
291 {                                                 246 {
292         int size;                                 247         int size;
293                                                   248 
294         size = (inode->i_sb->s_blocksize - siz    249         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
295                         / sizeof(struct ext4_e    250                         / sizeof(struct ext4_extent_idx);
296 #ifdef AGGRESSIVE_TEST                            251 #ifdef AGGRESSIVE_TEST
297         if (!check && size > 5)                   252         if (!check && size > 5)
298                 size = 5;                         253                 size = 5;
299 #endif                                            254 #endif
300         return size;                              255         return size;
301 }                                                 256 }
302                                                   257 
303 static inline int ext4_ext_space_root(struct i    258 static inline int ext4_ext_space_root(struct inode *inode, int check)
304 {                                                 259 {
305         int size;                                 260         int size;
306                                                   261 
307         size = sizeof(EXT4_I(inode)->i_data);     262         size = sizeof(EXT4_I(inode)->i_data);
308         size -= sizeof(struct ext4_extent_head    263         size -= sizeof(struct ext4_extent_header);
309         size /= sizeof(struct ext4_extent);       264         size /= sizeof(struct ext4_extent);
310 #ifdef AGGRESSIVE_TEST                            265 #ifdef AGGRESSIVE_TEST
311         if (!check && size > 3)                   266         if (!check && size > 3)
312                 size = 3;                         267                 size = 3;
313 #endif                                            268 #endif
314         return size;                              269         return size;
315 }                                                 270 }
316                                                   271 
317 static inline int ext4_ext_space_root_idx(stru    272 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
318 {                                                 273 {
319         int size;                                 274         int size;
320                                                   275 
321         size = sizeof(EXT4_I(inode)->i_data);     276         size = sizeof(EXT4_I(inode)->i_data);
322         size -= sizeof(struct ext4_extent_head    277         size -= sizeof(struct ext4_extent_header);
323         size /= sizeof(struct ext4_extent_idx)    278         size /= sizeof(struct ext4_extent_idx);
324 #ifdef AGGRESSIVE_TEST                            279 #ifdef AGGRESSIVE_TEST
325         if (!check && size > 4)                   280         if (!check && size > 4)
326                 size = 4;                         281                 size = 4;
327 #endif                                            282 #endif
328         return size;                              283         return size;
329 }                                                 284 }
330                                                   285 
331 static inline struct ext4_ext_path *           !! 286 static inline int
332 ext4_force_split_extent_at(handle_t *handle, s    287 ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
333                            struct ext4_ext_pat !! 288                            struct ext4_ext_path **ppath, ext4_lblk_t lblk,
334                            int nofail)            289                            int nofail)
335 {                                                 290 {
                                                   >> 291         struct ext4_ext_path *path = *ppath;
336         int unwritten = ext4_ext_is_unwritten(    292         int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
337         int flags = EXT4_EX_NOCACHE | EXT4_GET << 
338                                                << 
339         if (nofail)                            << 
340                 flags |= EXT4_GET_BLOCKS_METAD << 
341                                                   293 
342         return ext4_split_extent_at(handle, in !! 294         return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
343                         EXT4_EXT_MARK_UNWRIT1|    295                         EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
344                         flags);                !! 296                         EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO |
                                                   >> 297                         (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0));
                                                   >> 298 }
                                                   >> 299 
                                                   >> 300 /*
                                                   >> 301  * Calculate the number of metadata blocks needed
                                                   >> 302  * to allocate @blocks
                                                   >> 303  * Worse case is one block per extent
                                                   >> 304  */
                                                   >> 305 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
                                                   >> 306 {
                                                   >> 307         struct ext4_inode_info *ei = EXT4_I(inode);
                                                   >> 308         int idxs;
                                                   >> 309 
                                                   >> 310         idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
                                                   >> 311                 / sizeof(struct ext4_extent_idx));
                                                   >> 312 
                                                   >> 313         /*
                                                   >> 314          * If the new delayed allocation block is contiguous with the
                                                   >> 315          * previous da block, it can share index blocks with the
                                                   >> 316          * previous block, so we only need to allocate a new index
                                                   >> 317          * block every idxs leaf blocks.  At ldxs**2 blocks, we need
                                                   >> 318          * an additional index block, and at ldxs**3 blocks, yet
                                                   >> 319          * another index blocks.
                                                   >> 320          */
                                                   >> 321         if (ei->i_da_metadata_calc_len &&
                                                   >> 322             ei->i_da_metadata_calc_last_lblock+1 == lblock) {
                                                   >> 323                 int num = 0;
                                                   >> 324 
                                                   >> 325                 if ((ei->i_da_metadata_calc_len % idxs) == 0)
                                                   >> 326                         num++;
                                                   >> 327                 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
                                                   >> 328                         num++;
                                                   >> 329                 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
                                                   >> 330                         num++;
                                                   >> 331                         ei->i_da_metadata_calc_len = 0;
                                                   >> 332                 } else
                                                   >> 333                         ei->i_da_metadata_calc_len++;
                                                   >> 334                 ei->i_da_metadata_calc_last_lblock++;
                                                   >> 335                 return num;
                                                   >> 336         }
                                                   >> 337 
                                                   >> 338         /*
                                                   >> 339          * In the worst case we need a new set of index blocks at
                                                   >> 340          * every level of the inode's extent tree.
                                                   >> 341          */
                                                   >> 342         ei->i_da_metadata_calc_len = 1;
                                                   >> 343         ei->i_da_metadata_calc_last_lblock = lblock;
                                                   >> 344         return ext_depth(inode) + 1;
345 }                                                 345 }
346                                                   346 
347 static int                                        347 static int
348 ext4_ext_max_entries(struct inode *inode, int     348 ext4_ext_max_entries(struct inode *inode, int depth)
349 {                                                 349 {
350         int max;                                  350         int max;
351                                                   351 
352         if (depth == ext_depth(inode)) {          352         if (depth == ext_depth(inode)) {
353                 if (depth == 0)                   353                 if (depth == 0)
354                         max = ext4_ext_space_r    354                         max = ext4_ext_space_root(inode, 1);
355                 else                              355                 else
356                         max = ext4_ext_space_r    356                         max = ext4_ext_space_root_idx(inode, 1);
357         } else {                                  357         } else {
358                 if (depth == 0)                   358                 if (depth == 0)
359                         max = ext4_ext_space_b    359                         max = ext4_ext_space_block(inode, 1);
360                 else                              360                 else
361                         max = ext4_ext_space_b    361                         max = ext4_ext_space_block_idx(inode, 1);
362         }                                         362         }
363                                                   363 
364         return max;                               364         return max;
365 }                                                 365 }
366                                                   366 
367 static int ext4_valid_extent(struct inode *ino    367 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
368 {                                                 368 {
369         ext4_fsblk_t block = ext4_ext_pblock(e    369         ext4_fsblk_t block = ext4_ext_pblock(ext);
370         int len = ext4_ext_get_actual_len(ext)    370         int len = ext4_ext_get_actual_len(ext);
371         ext4_lblk_t lblock = le32_to_cpu(ext->    371         ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
372                                                   372 
373         /*                                        373         /*
374          * We allow neither:                      374          * We allow neither:
375          *  - zero length                         375          *  - zero length
376          *  - overflow/wrap-around                376          *  - overflow/wrap-around
377          */                                       377          */
378         if (lblock + len <= lblock)               378         if (lblock + len <= lblock)
379                 return 0;                         379                 return 0;
380         return ext4_inode_block_valid(inode, b !! 380         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
381 }                                                 381 }
382                                                   382 
383 static int ext4_valid_extent_idx(struct inode     383 static int ext4_valid_extent_idx(struct inode *inode,
384                                 struct ext4_ex    384                                 struct ext4_extent_idx *ext_idx)
385 {                                                 385 {
386         ext4_fsblk_t block = ext4_idx_pblock(e    386         ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
387                                                   387 
388         return ext4_inode_block_valid(inode, b !! 388         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
389 }                                                 389 }
390                                                   390 
391 static int ext4_valid_extent_entries(struct in    391 static int ext4_valid_extent_entries(struct inode *inode,
392                                      struct ex !! 392                                 struct ext4_extent_header *eh,
393                                      ext4_lblk !! 393                                 int depth)
394                                      int depth << 
395 {                                                 394 {
396         unsigned short entries;                   395         unsigned short entries;
397         ext4_lblk_t lblock = 0;                << 
398         ext4_lblk_t cur = 0;                   << 
399                                                << 
400         if (eh->eh_entries == 0)                  396         if (eh->eh_entries == 0)
401                 return 1;                         397                 return 1;
402                                                   398 
403         entries = le16_to_cpu(eh->eh_entries);    399         entries = le16_to_cpu(eh->eh_entries);
404                                                   400 
405         if (depth == 0) {                         401         if (depth == 0) {
406                 /* leaf entries */                402                 /* leaf entries */
407                 struct ext4_extent *ext = EXT_    403                 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
408                                                !! 404                 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
409                 /*                             !! 405                 ext4_fsblk_t pblock = 0;
410                  * The logical block in the fi !! 406                 ext4_lblk_t lblock = 0;
411                  * the number in the index blo !! 407                 ext4_lblk_t prev = 0;
412                  */                            !! 408                 int len = 0;
413                 if (depth != ext_depth(inode)  << 
414                     lblk != le32_to_cpu(ext->e << 
415                         return 0;              << 
416                 while (entries) {                 409                 while (entries) {
417                         if (!ext4_valid_extent    410                         if (!ext4_valid_extent(inode, ext))
418                                 return 0;         411                                 return 0;
419                                                   412 
420                         /* Check for overlappi    413                         /* Check for overlapping extents */
421                         lblock = le32_to_cpu(e    414                         lblock = le32_to_cpu(ext->ee_block);
422                         if (lblock < cur) {    !! 415                         len = ext4_ext_get_actual_len(ext);
423                                 *pblk = ext4_e !! 416                         if ((lblock <= prev) && prev) {
                                                   >> 417                                 pblock = ext4_ext_pblock(ext);
                                                   >> 418                                 es->s_last_error_block = cpu_to_le64(pblock);
424                                 return 0;         419                                 return 0;
425                         }                         420                         }
426                         cur = lblock + ext4_ex << 
427                         ext++;                    421                         ext++;
428                         entries--;                422                         entries--;
                                                   >> 423                         prev = lblock + len - 1;
429                 }                                 424                 }
430         } else {                                  425         } else {
431                 struct ext4_extent_idx *ext_id    426                 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
432                                                << 
433                 /*                             << 
434                  * The logical block in the fi << 
435                  * the number in the parent in << 
436                  */                            << 
437                 if (depth != ext_depth(inode)  << 
438                     lblk != le32_to_cpu(ext_id << 
439                         return 0;              << 
440                 while (entries) {                 427                 while (entries) {
441                         if (!ext4_valid_extent    428                         if (!ext4_valid_extent_idx(inode, ext_idx))
442                                 return 0;         429                                 return 0;
443                                                << 
444                         /* Check for overlappi << 
445                         lblock = le32_to_cpu(e << 
446                         if (lblock < cur) {    << 
447                                 *pblk = ext4_i << 
448                                 return 0;      << 
449                         }                      << 
450                         ext_idx++;                430                         ext_idx++;
451                         entries--;                431                         entries--;
452                         cur = lblock + 1;      << 
453                 }                                 432                 }
454         }                                         433         }
455         return 1;                                 434         return 1;
456 }                                                 435 }
457                                                   436 
458 static int __ext4_ext_check(const char *functi    437 static int __ext4_ext_check(const char *function, unsigned int line,
459                             struct inode *inod    438                             struct inode *inode, struct ext4_extent_header *eh,
460                             int depth, ext4_fs !! 439                             int depth, ext4_fsblk_t pblk)
461 {                                                 440 {
462         const char *error_msg;                    441         const char *error_msg;
463         int max = 0, err = -EFSCORRUPTED;         442         int max = 0, err = -EFSCORRUPTED;
464                                                   443 
465         if (unlikely(eh->eh_magic != EXT4_EXT_    444         if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
466                 error_msg = "invalid magic";      445                 error_msg = "invalid magic";
467                 goto corrupted;                   446                 goto corrupted;
468         }                                         447         }
469         if (unlikely(le16_to_cpu(eh->eh_depth)    448         if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
470                 error_msg = "unexpected eh_dep    449                 error_msg = "unexpected eh_depth";
471                 goto corrupted;                   450                 goto corrupted;
472         }                                         451         }
473         if (unlikely(eh->eh_max == 0)) {          452         if (unlikely(eh->eh_max == 0)) {
474                 error_msg = "invalid eh_max";     453                 error_msg = "invalid eh_max";
475                 goto corrupted;                   454                 goto corrupted;
476         }                                         455         }
477         max = ext4_ext_max_entries(inode, dept    456         max = ext4_ext_max_entries(inode, depth);
478         if (unlikely(le16_to_cpu(eh->eh_max) >    457         if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
479                 error_msg = "too large eh_max"    458                 error_msg = "too large eh_max";
480                 goto corrupted;                   459                 goto corrupted;
481         }                                         460         }
482         if (unlikely(le16_to_cpu(eh->eh_entrie    461         if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
483                 error_msg = "invalid eh_entrie    462                 error_msg = "invalid eh_entries";
484                 goto corrupted;                   463                 goto corrupted;
485         }                                         464         }
486         if (unlikely((eh->eh_entries == 0) &&  !! 465         if (!ext4_valid_extent_entries(inode, eh, depth)) {
487                 error_msg = "eh_entries is 0 b << 
488                 goto corrupted;                << 
489         }                                      << 
490         if (!ext4_valid_extent_entries(inode,  << 
491                 error_msg = "invalid extent en    466                 error_msg = "invalid extent entries";
492                 goto corrupted;                   467                 goto corrupted;
493         }                                         468         }
494         if (unlikely(depth > 32)) {               469         if (unlikely(depth > 32)) {
495                 error_msg = "too large eh_dept    470                 error_msg = "too large eh_depth";
496                 goto corrupted;                   471                 goto corrupted;
497         }                                         472         }
498         /* Verify checksum on non-root extent     473         /* Verify checksum on non-root extent tree nodes */
499         if (ext_depth(inode) != depth &&          474         if (ext_depth(inode) != depth &&
500             !ext4_extent_block_csum_verify(ino    475             !ext4_extent_block_csum_verify(inode, eh)) {
501                 error_msg = "extent tree corru    476                 error_msg = "extent tree corrupted";
502                 err = -EFSBADCRC;                 477                 err = -EFSBADCRC;
503                 goto corrupted;                   478                 goto corrupted;
504         }                                         479         }
505         return 0;                                 480         return 0;
506                                                   481 
507 corrupted:                                        482 corrupted:
508         ext4_error_inode_err(inode, function,  !! 483         ext4_error_inode(inode, function, line, 0,
509                              "pblk %llu bad he !! 484                          "pblk %llu bad header/extent: %s - magic %x, "
510                              "entries %u, max  !! 485                          "entries %u, max %u(%u), depth %u(%u)",
511                              (unsigned long lo !! 486                          (unsigned long long) pblk, error_msg,
512                              le16_to_cpu(eh->e !! 487                          le16_to_cpu(eh->eh_magic),
513                              le16_to_cpu(eh->e !! 488                          le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
514                              le16_to_cpu(eh->e !! 489                          max, le16_to_cpu(eh->eh_depth), depth);
515                              max, le16_to_cpu( << 
516         return err;                               490         return err;
517 }                                                 491 }
518                                                   492 
519 #define ext4_ext_check(inode, eh, depth, pblk)    493 #define ext4_ext_check(inode, eh, depth, pblk)                  \
520         __ext4_ext_check(__func__, __LINE__, ( !! 494         __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
521                                                   495 
522 int ext4_ext_check_inode(struct inode *inode)     496 int ext4_ext_check_inode(struct inode *inode)
523 {                                                 497 {
524         return ext4_ext_check(inode, ext_inode    498         return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
525 }                                                 499 }
526                                                   500 
527 static void ext4_cache_extents(struct inode *i << 
528                                struct ext4_ext << 
529 {                                              << 
530         struct ext4_extent *ex = EXT_FIRST_EXT << 
531         ext4_lblk_t prev = 0;                  << 
532         int i;                                 << 
533                                                << 
534         for (i = le16_to_cpu(eh->eh_entries);  << 
535                 unsigned int status = EXTENT_S << 
536                 ext4_lblk_t lblk = le32_to_cpu << 
537                 int len = ext4_ext_get_actual_ << 
538                                                << 
539                 if (prev && (prev != lblk))    << 
540                         ext4_es_cache_extent(i << 
541                                              E << 
542                                                << 
543                 if (ext4_ext_is_unwritten(ex)) << 
544                         status = EXTENT_STATUS << 
545                 ext4_es_cache_extent(inode, lb << 
546                                      ext4_ext_ << 
547                 prev = lblk + len;             << 
548         }                                      << 
549 }                                              << 
550                                                << 
551 static struct buffer_head *                       501 static struct buffer_head *
552 __read_extent_tree_block(const char *function,    502 __read_extent_tree_block(const char *function, unsigned int line,
553                          struct inode *inode,  !! 503                          struct inode *inode, ext4_fsblk_t pblk, int depth,
554                          int depth, int flags) !! 504                          int flags)
555 {                                                 505 {
556         struct buffer_head              *bh;      506         struct buffer_head              *bh;
557         int                             err;      507         int                             err;
558         gfp_t                           gfp_fl << 
559         ext4_fsblk_t                    pblk;  << 
560                                                << 
561         if (flags & EXT4_EX_NOFAIL)            << 
562                 gfp_flags |= __GFP_NOFAIL;     << 
563                                                   508 
564         pblk = ext4_idx_pblock(idx);           !! 509         bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
565         bh = sb_getblk_gfp(inode->i_sb, pblk,  << 
566         if (unlikely(!bh))                        510         if (unlikely(!bh))
567                 return ERR_PTR(-ENOMEM);          511                 return ERR_PTR(-ENOMEM);
568                                                   512 
569         if (!bh_uptodate_or_lock(bh)) {           513         if (!bh_uptodate_or_lock(bh)) {
570                 trace_ext4_ext_load_extent(ino    514                 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
571                 err = ext4_read_bh(bh, 0, NULL !! 515                 err = bh_submit_read(bh);
572                 if (err < 0)                      516                 if (err < 0)
573                         goto errout;              517                         goto errout;
574         }                                         518         }
575         if (buffer_verified(bh) && !(flags & E    519         if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
576                 return bh;                        520                 return bh;
577         err = __ext4_ext_check(function, line, !! 521         err = __ext4_ext_check(function, line, inode,
578                                depth, pblk, le !! 522                                ext_block_hdr(bh), depth, pblk);
579         if (err)                                  523         if (err)
580                 goto errout;                      524                 goto errout;
581         set_buffer_verified(bh);                  525         set_buffer_verified(bh);
582         /*                                        526         /*
583          * If this is a leaf block, cache all     527          * If this is a leaf block, cache all of its entries
584          */                                       528          */
585         if (!(flags & EXT4_EX_NOCACHE) && dept    529         if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
586                 struct ext4_extent_header *eh     530                 struct ext4_extent_header *eh = ext_block_hdr(bh);
587                 ext4_cache_extents(inode, eh); !! 531                 struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
                                                   >> 532                 ext4_lblk_t prev = 0;
                                                   >> 533                 int i;
                                                   >> 534 
                                                   >> 535                 for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
                                                   >> 536                         unsigned int status = EXTENT_STATUS_WRITTEN;
                                                   >> 537                         ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
                                                   >> 538                         int len = ext4_ext_get_actual_len(ex);
                                                   >> 539 
                                                   >> 540                         if (prev && (prev != lblk))
                                                   >> 541                                 ext4_es_cache_extent(inode, prev,
                                                   >> 542                                                      lblk - prev, ~0,
                                                   >> 543                                                      EXTENT_STATUS_HOLE);
                                                   >> 544 
                                                   >> 545                         if (ext4_ext_is_unwritten(ex))
                                                   >> 546                                 status = EXTENT_STATUS_UNWRITTEN;
                                                   >> 547                         ext4_es_cache_extent(inode, lblk, len,
                                                   >> 548                                              ext4_ext_pblock(ex), status);
                                                   >> 549                         prev = lblk + len;
                                                   >> 550                 }
588         }                                         551         }
589         return bh;                                552         return bh;
590 errout:                                           553 errout:
591         put_bh(bh);                               554         put_bh(bh);
592         return ERR_PTR(err);                      555         return ERR_PTR(err);
593                                                   556 
594 }                                                 557 }
595                                                   558 
596 #define read_extent_tree_block(inode, idx, dep !! 559 #define read_extent_tree_block(inode, pblk, depth, flags)               \
597         __read_extent_tree_block(__func__, __L !! 560         __read_extent_tree_block(__func__, __LINE__, (inode), (pblk),   \
598                                  (depth), (fla    561                                  (depth), (flags))
599                                                   562 
600 /*                                                563 /*
601  * This function is called to cache a file's e    564  * This function is called to cache a file's extent information in the
602  * extent status tree                             565  * extent status tree
603  */                                               566  */
604 int ext4_ext_precache(struct inode *inode)        567 int ext4_ext_precache(struct inode *inode)
605 {                                                 568 {
606         struct ext4_inode_info *ei = EXT4_I(in    569         struct ext4_inode_info *ei = EXT4_I(inode);
607         struct ext4_ext_path *path = NULL;        570         struct ext4_ext_path *path = NULL;
608         struct buffer_head *bh;                   571         struct buffer_head *bh;
609         int i = 0, depth, ret = 0;                572         int i = 0, depth, ret = 0;
610                                                   573 
611         if (!ext4_test_inode_flag(inode, EXT4_    574         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
612                 return 0;       /* not an exte    575                 return 0;       /* not an extent-mapped inode */
613                                                   576 
614         down_read(&ei->i_data_sem);               577         down_read(&ei->i_data_sem);
615         depth = ext_depth(inode);                 578         depth = ext_depth(inode);
616                                                   579 
617         /* Don't cache anything if there are n << 
618         if (!depth) {                          << 
619                 up_read(&ei->i_data_sem);      << 
620                 return ret;                    << 
621         }                                      << 
622                                                << 
623         path = kcalloc(depth + 1, sizeof(struc    580         path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
624                        GFP_NOFS);                 581                        GFP_NOFS);
625         if (path == NULL) {                       582         if (path == NULL) {
626                 up_read(&ei->i_data_sem);         583                 up_read(&ei->i_data_sem);
627                 return -ENOMEM;                   584                 return -ENOMEM;
628         }                                         585         }
629                                                   586 
                                                   >> 587         /* Don't cache anything if there are no external extent blocks */
                                                   >> 588         if (depth == 0)
                                                   >> 589                 goto out;
630         path[0].p_hdr = ext_inode_hdr(inode);     590         path[0].p_hdr = ext_inode_hdr(inode);
631         ret = ext4_ext_check(inode, path[0].p_    591         ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
632         if (ret)                                  592         if (ret)
633                 goto out;                         593                 goto out;
634         path[0].p_idx = EXT_FIRST_INDEX(path[0    594         path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
635         while (i >= 0) {                          595         while (i >= 0) {
636                 /*                                596                 /*
637                  * If this is a leaf block or     597                  * If this is a leaf block or we've reached the end of
638                  * the index block, go up         598                  * the index block, go up
639                  */                               599                  */
640                 if ((i == depth) ||               600                 if ((i == depth) ||
641                     path[i].p_idx > EXT_LAST_I    601                     path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
642                         ext4_ext_path_brelse(p !! 602                         brelse(path[i].p_bh);
                                                   >> 603                         path[i].p_bh = NULL;
643                         i--;                      604                         i--;
644                         continue;                 605                         continue;
645                 }                                 606                 }
646                 bh = read_extent_tree_block(in !! 607                 bh = read_extent_tree_block(inode,
                                                   >> 608                                             ext4_idx_pblock(path[i].p_idx++),
647                                             de    609                                             depth - i - 1,
648                                             EX    610                                             EXT4_EX_FORCE_CACHE);
649                 if (IS_ERR(bh)) {                 611                 if (IS_ERR(bh)) {
650                         ret = PTR_ERR(bh);        612                         ret = PTR_ERR(bh);
651                         break;                    613                         break;
652                 }                                 614                 }
653                 i++;                              615                 i++;
654                 path[i].p_bh = bh;                616                 path[i].p_bh = bh;
655                 path[i].p_hdr = ext_block_hdr(    617                 path[i].p_hdr = ext_block_hdr(bh);
656                 path[i].p_idx = EXT_FIRST_INDE    618                 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
657         }                                         619         }
658         ext4_set_inode_state(inode, EXT4_STATE    620         ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
659 out:                                              621 out:
660         up_read(&ei->i_data_sem);                 622         up_read(&ei->i_data_sem);
661         ext4_free_ext_path(path);              !! 623         ext4_ext_drop_refs(path);
                                                   >> 624         kfree(path);
662         return ret;                               625         return ret;
663 }                                                 626 }
664                                                   627 
665 #ifdef EXT_DEBUG                                  628 #ifdef EXT_DEBUG
666 static void ext4_ext_show_path(struct inode *i    629 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
667 {                                                 630 {
668         int k, l = path->p_depth;                 631         int k, l = path->p_depth;
669                                                   632 
670         ext_debug(inode, "path:");             !! 633         ext_debug("path:");
671         for (k = 0; k <= l; k++, path++) {        634         for (k = 0; k <= l; k++, path++) {
672                 if (path->p_idx) {                635                 if (path->p_idx) {
673                         ext_debug(inode, "  %d !! 636                   ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
674                                   le32_to_cpu( !! 637                             ext4_idx_pblock(path->p_idx));
675                                   ext4_idx_pbl << 
676                 } else if (path->p_ext) {         638                 } else if (path->p_ext) {
677                         ext_debug(inode, "  %d !! 639                         ext_debug("  %d:[%d]%d:%llu ",
678                                   le32_to_cpu(    640                                   le32_to_cpu(path->p_ext->ee_block),
679                                   ext4_ext_is_    641                                   ext4_ext_is_unwritten(path->p_ext),
680                                   ext4_ext_get    642                                   ext4_ext_get_actual_len(path->p_ext),
681                                   ext4_ext_pbl    643                                   ext4_ext_pblock(path->p_ext));
682                 } else                            644                 } else
683                         ext_debug(inode, "  [] !! 645                         ext_debug("  []");
684         }                                         646         }
685         ext_debug(inode, "\n");                !! 647         ext_debug("\n");
686 }                                                 648 }
687                                                   649 
688 static void ext4_ext_show_leaf(struct inode *i    650 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
689 {                                                 651 {
690         int depth = ext_depth(inode);             652         int depth = ext_depth(inode);
691         struct ext4_extent_header *eh;            653         struct ext4_extent_header *eh;
692         struct ext4_extent *ex;                   654         struct ext4_extent *ex;
693         int i;                                    655         int i;
694                                                   656 
695         if (IS_ERR_OR_NULL(path))              !! 657         if (!path)
696                 return;                           658                 return;
697                                                   659 
698         eh = path[depth].p_hdr;                   660         eh = path[depth].p_hdr;
699         ex = EXT_FIRST_EXTENT(eh);                661         ex = EXT_FIRST_EXTENT(eh);
700                                                   662 
701         ext_debug(inode, "Displaying leaf exte !! 663         ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
702                                                   664 
703         for (i = 0; i < le16_to_cpu(eh->eh_ent    665         for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
704                 ext_debug(inode, "%d:[%d]%d:%l !! 666                 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
705                           ext4_ext_is_unwritte    667                           ext4_ext_is_unwritten(ex),
706                           ext4_ext_get_actual_    668                           ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
707         }                                         669         }
708         ext_debug(inode, "\n");                !! 670         ext_debug("\n");
709 }                                                 671 }
710                                                   672 
711 static void ext4_ext_show_move(struct inode *i    673 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
712                         ext4_fsblk_t newblock,    674                         ext4_fsblk_t newblock, int level)
713 {                                                 675 {
714         int depth = ext_depth(inode);             676         int depth = ext_depth(inode);
715         struct ext4_extent *ex;                   677         struct ext4_extent *ex;
716                                                   678 
717         if (depth != level) {                     679         if (depth != level) {
718                 struct ext4_extent_idx *idx;      680                 struct ext4_extent_idx *idx;
719                 idx = path[level].p_idx;          681                 idx = path[level].p_idx;
720                 while (idx <= EXT_MAX_INDEX(pa    682                 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
721                         ext_debug(inode, "%d:  !! 683                         ext_debug("%d: move %d:%llu in new index %llu\n", level,
722                                   level, le32_ !! 684                                         le32_to_cpu(idx->ei_block),
723                                   ext4_idx_pbl !! 685                                         ext4_idx_pblock(idx),
                                                   >> 686                                         newblock);
724                         idx++;                    687                         idx++;
725                 }                                 688                 }
726                                                   689 
727                 return;                           690                 return;
728         }                                         691         }
729                                                   692 
730         ex = path[depth].p_ext;                   693         ex = path[depth].p_ext;
731         while (ex <= EXT_MAX_EXTENT(path[depth    694         while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
732                 ext_debug(inode, "move %d:%llu !! 695                 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
733                                 le32_to_cpu(ex    696                                 le32_to_cpu(ex->ee_block),
734                                 ext4_ext_pbloc    697                                 ext4_ext_pblock(ex),
735                                 ext4_ext_is_un    698                                 ext4_ext_is_unwritten(ex),
736                                 ext4_ext_get_a    699                                 ext4_ext_get_actual_len(ex),
737                                 newblock);        700                                 newblock);
738                 ex++;                             701                 ex++;
739         }                                         702         }
740 }                                                 703 }
741                                                   704 
742 #else                                             705 #else
743 #define ext4_ext_show_path(inode, path)           706 #define ext4_ext_show_path(inode, path)
744 #define ext4_ext_show_leaf(inode, path)           707 #define ext4_ext_show_leaf(inode, path)
745 #define ext4_ext_show_move(inode, path, newblo    708 #define ext4_ext_show_move(inode, path, newblock, level)
746 #endif                                            709 #endif
747                                                   710 
                                                   >> 711 void ext4_ext_drop_refs(struct ext4_ext_path *path)
                                                   >> 712 {
                                                   >> 713         int depth, i;
                                                   >> 714 
                                                   >> 715         if (!path)
                                                   >> 716                 return;
                                                   >> 717         depth = path->p_depth;
                                                   >> 718         for (i = 0; i <= depth; i++, path++)
                                                   >> 719                 if (path->p_bh) {
                                                   >> 720                         brelse(path->p_bh);
                                                   >> 721                         path->p_bh = NULL;
                                                   >> 722                 }
                                                   >> 723 }
                                                   >> 724 
748 /*                                                725 /*
749  * ext4_ext_binsearch_idx:                        726  * ext4_ext_binsearch_idx:
750  * binary search for the closest index of the     727  * binary search for the closest index of the given block
751  * the header must be checked before calling t    728  * the header must be checked before calling this
752  */                                               729  */
753 static void                                       730 static void
754 ext4_ext_binsearch_idx(struct inode *inode,       731 ext4_ext_binsearch_idx(struct inode *inode,
755                         struct ext4_ext_path *    732                         struct ext4_ext_path *path, ext4_lblk_t block)
756 {                                                 733 {
757         struct ext4_extent_header *eh = path->    734         struct ext4_extent_header *eh = path->p_hdr;
758         struct ext4_extent_idx *r, *l, *m;        735         struct ext4_extent_idx *r, *l, *m;
759                                                   736 
760                                                   737 
761         ext_debug(inode, "binsearch for %u(idx !! 738         ext_debug("binsearch for %u(idx):  ", block);
762                                                   739 
763         l = EXT_FIRST_INDEX(eh) + 1;              740         l = EXT_FIRST_INDEX(eh) + 1;
764         r = EXT_LAST_INDEX(eh);                   741         r = EXT_LAST_INDEX(eh);
765         while (l <= r) {                          742         while (l <= r) {
766                 m = l + (r - l) / 2;              743                 m = l + (r - l) / 2;
767                 ext_debug(inode, "%p(%u):%p(%u << 
768                           le32_to_cpu(l->ei_bl << 
769                           r, le32_to_cpu(r->ei << 
770                                                << 
771                 if (block < le32_to_cpu(m->ei_    744                 if (block < le32_to_cpu(m->ei_block))
772                         r = m - 1;                745                         r = m - 1;
773                 else                              746                 else
774                         l = m + 1;                747                         l = m + 1;
                                                   >> 748                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
                                                   >> 749                                 m, le32_to_cpu(m->ei_block),
                                                   >> 750                                 r, le32_to_cpu(r->ei_block));
775         }                                         751         }
776                                                   752 
777         path->p_idx = l - 1;                      753         path->p_idx = l - 1;
778         ext_debug(inode, "  -> %u->%lld ", le3 !! 754         ext_debug("  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
779                   ext4_idx_pblock(path->p_idx)    755                   ext4_idx_pblock(path->p_idx));
780                                                   756 
781 #ifdef CHECK_BINSEARCH                            757 #ifdef CHECK_BINSEARCH
782         {                                         758         {
783                 struct ext4_extent_idx *chix,     759                 struct ext4_extent_idx *chix, *ix;
784                 int k;                            760                 int k;
785                                                   761 
786                 chix = ix = EXT_FIRST_INDEX(eh    762                 chix = ix = EXT_FIRST_INDEX(eh);
787                 for (k = 0; k < le16_to_cpu(eh    763                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
788                         if (k != 0 && le32_to_ !! 764                   if (k != 0 &&
789                             le32_to_cpu(ix[-1] !! 765                       le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
790                                 printk(KERN_DE    766                                 printk(KERN_DEBUG "k=%d, ix=0x%p, "
791                                        "first=    767                                        "first=0x%p\n", k,
792                                        ix, EXT    768                                        ix, EXT_FIRST_INDEX(eh));
793                                 printk(KERN_DE    769                                 printk(KERN_DEBUG "%u <= %u\n",
794                                        le32_to    770                                        le32_to_cpu(ix->ei_block),
795                                        le32_to    771                                        le32_to_cpu(ix[-1].ei_block));
796                         }                         772                         }
797                         BUG_ON(k && le32_to_cp    773                         BUG_ON(k && le32_to_cpu(ix->ei_block)
798                                            <=     774                                            <= le32_to_cpu(ix[-1].ei_block));
799                         if (block < le32_to_cp    775                         if (block < le32_to_cpu(ix->ei_block))
800                                 break;            776                                 break;
801                         chix = ix;                777                         chix = ix;
802                 }                                 778                 }
803                 BUG_ON(chix != path->p_idx);      779                 BUG_ON(chix != path->p_idx);
804         }                                         780         }
805 #endif                                            781 #endif
806                                                   782 
807 }                                                 783 }
808                                                   784 
809 /*                                                785 /*
810  * ext4_ext_binsearch:                            786  * ext4_ext_binsearch:
811  * binary search for closest extent of the giv    787  * binary search for closest extent of the given block
812  * the header must be checked before calling t    788  * the header must be checked before calling this
813  */                                               789  */
814 static void                                       790 static void
815 ext4_ext_binsearch(struct inode *inode,           791 ext4_ext_binsearch(struct inode *inode,
816                 struct ext4_ext_path *path, ex    792                 struct ext4_ext_path *path, ext4_lblk_t block)
817 {                                                 793 {
818         struct ext4_extent_header *eh = path->    794         struct ext4_extent_header *eh = path->p_hdr;
819         struct ext4_extent *r, *l, *m;            795         struct ext4_extent *r, *l, *m;
820                                                   796 
821         if (eh->eh_entries == 0) {                797         if (eh->eh_entries == 0) {
822                 /*                                798                 /*
823                  * this leaf is empty:            799                  * this leaf is empty:
824                  * we get such a leaf in split    800                  * we get such a leaf in split/add case
825                  */                               801                  */
826                 return;                           802                 return;
827         }                                         803         }
828                                                   804 
829         ext_debug(inode, "binsearch for %u:  " !! 805         ext_debug("binsearch for %u:  ", block);
830                                                   806 
831         l = EXT_FIRST_EXTENT(eh) + 1;             807         l = EXT_FIRST_EXTENT(eh) + 1;
832         r = EXT_LAST_EXTENT(eh);                  808         r = EXT_LAST_EXTENT(eh);
833                                                   809 
834         while (l <= r) {                          810         while (l <= r) {
835                 m = l + (r - l) / 2;              811                 m = l + (r - l) / 2;
836                 ext_debug(inode, "%p(%u):%p(%u << 
837                           le32_to_cpu(l->ee_bl << 
838                           r, le32_to_cpu(r->ee << 
839                                                << 
840                 if (block < le32_to_cpu(m->ee_    812                 if (block < le32_to_cpu(m->ee_block))
841                         r = m - 1;                813                         r = m - 1;
842                 else                              814                 else
843                         l = m + 1;                815                         l = m + 1;
                                                   >> 816                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
                                                   >> 817                                 m, le32_to_cpu(m->ee_block),
                                                   >> 818                                 r, le32_to_cpu(r->ee_block));
844         }                                         819         }
845                                                   820 
846         path->p_ext = l - 1;                      821         path->p_ext = l - 1;
847         ext_debug(inode, "  -> %d:%llu:[%d]%d  !! 822         ext_debug("  -> %d:%llu:[%d]%d ",
848                         le32_to_cpu(path->p_ex    823                         le32_to_cpu(path->p_ext->ee_block),
849                         ext4_ext_pblock(path->    824                         ext4_ext_pblock(path->p_ext),
850                         ext4_ext_is_unwritten(    825                         ext4_ext_is_unwritten(path->p_ext),
851                         ext4_ext_get_actual_le    826                         ext4_ext_get_actual_len(path->p_ext));
852                                                   827 
853 #ifdef CHECK_BINSEARCH                            828 #ifdef CHECK_BINSEARCH
854         {                                         829         {
855                 struct ext4_extent *chex, *ex;    830                 struct ext4_extent *chex, *ex;
856                 int k;                            831                 int k;
857                                                   832 
858                 chex = ex = EXT_FIRST_EXTENT(e    833                 chex = ex = EXT_FIRST_EXTENT(eh);
859                 for (k = 0; k < le16_to_cpu(eh    834                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
860                         BUG_ON(k && le32_to_cp    835                         BUG_ON(k && le32_to_cpu(ex->ee_block)
861                                           <= l    836                                           <= le32_to_cpu(ex[-1].ee_block));
862                         if (block < le32_to_cp    837                         if (block < le32_to_cpu(ex->ee_block))
863                                 break;            838                                 break;
864                         chex = ex;                839                         chex = ex;
865                 }                                 840                 }
866                 BUG_ON(chex != path->p_ext);      841                 BUG_ON(chex != path->p_ext);
867         }                                         842         }
868 #endif                                            843 #endif
869                                                   844 
870 }                                                 845 }
871                                                   846 
872 void ext4_ext_tree_init(handle_t *handle, stru !! 847 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
873 {                                                 848 {
874         struct ext4_extent_header *eh;            849         struct ext4_extent_header *eh;
875                                                   850 
876         eh = ext_inode_hdr(inode);                851         eh = ext_inode_hdr(inode);
877         eh->eh_depth = 0;                         852         eh->eh_depth = 0;
878         eh->eh_entries = 0;                       853         eh->eh_entries = 0;
879         eh->eh_magic = EXT4_EXT_MAGIC;            854         eh->eh_magic = EXT4_EXT_MAGIC;
880         eh->eh_max = cpu_to_le16(ext4_ext_spac    855         eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
881         eh->eh_generation = 0;                 << 
882         ext4_mark_inode_dirty(handle, inode);     856         ext4_mark_inode_dirty(handle, inode);
                                                   >> 857         return 0;
883 }                                                 858 }
884                                                   859 
885 struct ext4_ext_path *                            860 struct ext4_ext_path *
886 ext4_find_extent(struct inode *inode, ext4_lbl    861 ext4_find_extent(struct inode *inode, ext4_lblk_t block,
887                  struct ext4_ext_path *path, i !! 862                  struct ext4_ext_path **orig_path, int flags)
888 {                                                 863 {
889         struct ext4_extent_header *eh;            864         struct ext4_extent_header *eh;
890         struct buffer_head *bh;                   865         struct buffer_head *bh;
                                                   >> 866         struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
891         short int depth, i, ppos = 0;             867         short int depth, i, ppos = 0;
892         int ret;                                  868         int ret;
893         gfp_t gfp_flags = GFP_NOFS;            << 
894                                                << 
895         if (flags & EXT4_EX_NOFAIL)            << 
896                 gfp_flags |= __GFP_NOFAIL;     << 
897                                                   869 
898         eh = ext_inode_hdr(inode);                870         eh = ext_inode_hdr(inode);
899         depth = ext_depth(inode);                 871         depth = ext_depth(inode);
900         if (depth < 0 || depth > EXT4_MAX_EXTE    872         if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
901                 EXT4_ERROR_INODE(inode, "inode    873                 EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
902                                  depth);          874                                  depth);
903                 ret = -EFSCORRUPTED;              875                 ret = -EFSCORRUPTED;
904                 goto err;                         876                 goto err;
905         }                                         877         }
906                                                   878 
907         if (path) {                               879         if (path) {
908                 ext4_ext_drop_refs(path);         880                 ext4_ext_drop_refs(path);
909                 if (depth > path[0].p_maxdepth    881                 if (depth > path[0].p_maxdepth) {
910                         kfree(path);              882                         kfree(path);
911                         path = NULL;           !! 883                         *orig_path = path = NULL;
912                 }                                 884                 }
913         }                                         885         }
914         if (!path) {                              886         if (!path) {
915                 /* account possible depth incr    887                 /* account possible depth increase */
916                 path = kcalloc(depth + 2, size    888                 path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
917                                 gfp_flags);    !! 889                                 GFP_NOFS);
918                 if (unlikely(!path))              890                 if (unlikely(!path))
919                         return ERR_PTR(-ENOMEM    891                         return ERR_PTR(-ENOMEM);
920                 path[0].p_maxdepth = depth + 1    892                 path[0].p_maxdepth = depth + 1;
921         }                                         893         }
922         path[0].p_hdr = eh;                       894         path[0].p_hdr = eh;
923         path[0].p_bh = NULL;                      895         path[0].p_bh = NULL;
924                                                   896 
925         i = depth;                                897         i = depth;
926         if (!(flags & EXT4_EX_NOCACHE) && dept << 
927                 ext4_cache_extents(inode, eh); << 
928         /* walk through the tree */               898         /* walk through the tree */
929         while (i) {                               899         while (i) {
930                 ext_debug(inode, "depth %d: nu !! 900                 ext_debug("depth %d: num %d, max %d\n",
931                           ppos, le16_to_cpu(eh    901                           ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
932                                                   902 
933                 ext4_ext_binsearch_idx(inode,     903                 ext4_ext_binsearch_idx(inode, path + ppos, block);
934                 path[ppos].p_block = ext4_idx_    904                 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
935                 path[ppos].p_depth = i;           905                 path[ppos].p_depth = i;
936                 path[ppos].p_ext = NULL;          906                 path[ppos].p_ext = NULL;
937                                                   907 
938                 bh = read_extent_tree_block(in !! 908                 bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
                                                   >> 909                                             flags);
939                 if (IS_ERR(bh)) {                 910                 if (IS_ERR(bh)) {
940                         ret = PTR_ERR(bh);        911                         ret = PTR_ERR(bh);
941                         goto err;                 912                         goto err;
942                 }                                 913                 }
943                                                   914 
944                 eh = ext_block_hdr(bh);           915                 eh = ext_block_hdr(bh);
945                 ppos++;                           916                 ppos++;
946                 path[ppos].p_bh = bh;             917                 path[ppos].p_bh = bh;
947                 path[ppos].p_hdr = eh;            918                 path[ppos].p_hdr = eh;
948         }                                         919         }
949                                                   920 
950         path[ppos].p_depth = i;                   921         path[ppos].p_depth = i;
951         path[ppos].p_ext = NULL;                  922         path[ppos].p_ext = NULL;
952         path[ppos].p_idx = NULL;                  923         path[ppos].p_idx = NULL;
953                                                   924 
954         /* find extent */                         925         /* find extent */
955         ext4_ext_binsearch(inode, path + ppos,    926         ext4_ext_binsearch(inode, path + ppos, block);
956         /* if not an empty leaf */                927         /* if not an empty leaf */
957         if (path[ppos].p_ext)                     928         if (path[ppos].p_ext)
958                 path[ppos].p_block = ext4_ext_    929                 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
959                                                   930 
960         ext4_ext_show_path(inode, path);          931         ext4_ext_show_path(inode, path);
961                                                   932 
962         return path;                              933         return path;
963                                                   934 
964 err:                                              935 err:
965         ext4_free_ext_path(path);              !! 936         ext4_ext_drop_refs(path);
                                                   >> 937         kfree(path);
                                                   >> 938         if (orig_path)
                                                   >> 939                 *orig_path = NULL;
966         return ERR_PTR(ret);                      940         return ERR_PTR(ret);
967 }                                                 941 }
968                                                   942 
969 /*                                                943 /*
970  * ext4_ext_insert_index:                         944  * ext4_ext_insert_index:
971  * insert new index [@logical;@ptr] into the b    945  * insert new index [@logical;@ptr] into the block at @curp;
972  * check where to insert: before @curp or afte    946  * check where to insert: before @curp or after @curp
973  */                                               947  */
974 static int ext4_ext_insert_index(handle_t *han    948 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
975                                  struct ext4_e    949                                  struct ext4_ext_path *curp,
976                                  int logical,     950                                  int logical, ext4_fsblk_t ptr)
977 {                                                 951 {
978         struct ext4_extent_idx *ix;               952         struct ext4_extent_idx *ix;
979         int len, err;                             953         int len, err;
980                                                   954 
981         err = ext4_ext_get_access(handle, inod    955         err = ext4_ext_get_access(handle, inode, curp);
982         if (err)                                  956         if (err)
983                 return err;                       957                 return err;
984                                                   958 
985         if (unlikely(logical == le32_to_cpu(cu    959         if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
986                 EXT4_ERROR_INODE(inode,           960                 EXT4_ERROR_INODE(inode,
987                                  "logical %d =    961                                  "logical %d == ei_block %d!",
988                                  logical, le32    962                                  logical, le32_to_cpu(curp->p_idx->ei_block));
989                 return -EFSCORRUPTED;             963                 return -EFSCORRUPTED;
990         }                                         964         }
991                                                   965 
992         if (unlikely(le16_to_cpu(curp->p_hdr->    966         if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
993                              >= le16_to_cpu(cu    967                              >= le16_to_cpu(curp->p_hdr->eh_max))) {
994                 EXT4_ERROR_INODE(inode,           968                 EXT4_ERROR_INODE(inode,
995                                  "eh_entries %    969                                  "eh_entries %d >= eh_max %d!",
996                                  le16_to_cpu(c    970                                  le16_to_cpu(curp->p_hdr->eh_entries),
997                                  le16_to_cpu(c    971                                  le16_to_cpu(curp->p_hdr->eh_max));
998                 return -EFSCORRUPTED;             972                 return -EFSCORRUPTED;
999         }                                         973         }
1000                                                  974 
1001         if (logical > le32_to_cpu(curp->p_idx    975         if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
1002                 /* insert after */               976                 /* insert after */
1003                 ext_debug(inode, "insert new  !! 977                 ext_debug("insert new index %d after: %llu\n", logical, ptr);
1004                           logical, ptr);      << 
1005                 ix = curp->p_idx + 1;            978                 ix = curp->p_idx + 1;
1006         } else {                                 979         } else {
1007                 /* insert before */              980                 /* insert before */
1008                 ext_debug(inode, "insert new  !! 981                 ext_debug("insert new index %d before: %llu\n", logical, ptr);
1009                           logical, ptr);      << 
1010                 ix = curp->p_idx;                982                 ix = curp->p_idx;
1011         }                                        983         }
1012                                                  984 
1013         if (unlikely(ix > EXT_MAX_INDEX(curp- << 
1014                 EXT4_ERROR_INODE(inode, "ix > << 
1015                 return -EFSCORRUPTED;         << 
1016         }                                     << 
1017                                               << 
1018         len = EXT_LAST_INDEX(curp->p_hdr) - i    985         len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
1019         BUG_ON(len < 0);                         986         BUG_ON(len < 0);
1020         if (len > 0) {                           987         if (len > 0) {
1021                 ext_debug(inode, "insert new  !! 988                 ext_debug("insert new index %d: "
1022                                 "move %d indi    989                                 "move %d indices from 0x%p to 0x%p\n",
1023                                 logical, len,    990                                 logical, len, ix, ix + 1);
1024                 memmove(ix + 1, ix, len * siz    991                 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
1025         }                                        992         }
1026                                                  993 
                                                   >> 994         if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
                                                   >> 995                 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
                                                   >> 996                 return -EFSCORRUPTED;
                                                   >> 997         }
                                                   >> 998 
1027         ix->ei_block = cpu_to_le32(logical);     999         ix->ei_block = cpu_to_le32(logical);
1028         ext4_idx_store_pblock(ix, ptr);          1000         ext4_idx_store_pblock(ix, ptr);
1029         le16_add_cpu(&curp->p_hdr->eh_entries    1001         le16_add_cpu(&curp->p_hdr->eh_entries, 1);
1030                                                  1002 
1031         if (unlikely(ix > EXT_LAST_INDEX(curp    1003         if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
1032                 EXT4_ERROR_INODE(inode, "ix >    1004                 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
1033                 return -EFSCORRUPTED;            1005                 return -EFSCORRUPTED;
1034         }                                        1006         }
1035                                                  1007 
1036         err = ext4_ext_dirty(handle, inode, c    1008         err = ext4_ext_dirty(handle, inode, curp);
1037         ext4_std_error(inode->i_sb, err);        1009         ext4_std_error(inode->i_sb, err);
1038                                                  1010 
1039         return err;                              1011         return err;
1040 }                                                1012 }
1041                                                  1013 
1042 /*                                               1014 /*
1043  * ext4_ext_split:                               1015  * ext4_ext_split:
1044  * inserts new subtree into the path, using f    1016  * inserts new subtree into the path, using free index entry
1045  * at depth @at:                                 1017  * at depth @at:
1046  * - allocates all needed blocks (new leaf an    1018  * - allocates all needed blocks (new leaf and all intermediate index blocks)
1047  * - makes decision where to split               1019  * - makes decision where to split
1048  * - moves remaining extents and index entrie    1020  * - moves remaining extents and index entries (right to the split point)
1049  *   into the newly allocated blocks             1021  *   into the newly allocated blocks
1050  * - initializes subtree                         1022  * - initializes subtree
1051  */                                              1023  */
1052 static int ext4_ext_split(handle_t *handle, s    1024 static int ext4_ext_split(handle_t *handle, struct inode *inode,
1053                           unsigned int flags,    1025                           unsigned int flags,
1054                           struct ext4_ext_pat    1026                           struct ext4_ext_path *path,
1055                           struct ext4_extent     1027                           struct ext4_extent *newext, int at)
1056 {                                                1028 {
1057         struct buffer_head *bh = NULL;           1029         struct buffer_head *bh = NULL;
1058         int depth = ext_depth(inode);            1030         int depth = ext_depth(inode);
1059         struct ext4_extent_header *neh;          1031         struct ext4_extent_header *neh;
1060         struct ext4_extent_idx *fidx;            1032         struct ext4_extent_idx *fidx;
1061         int i = at, k, m, a;                     1033         int i = at, k, m, a;
1062         ext4_fsblk_t newblock, oldblock;         1034         ext4_fsblk_t newblock, oldblock;
1063         __le32 border;                           1035         __le32 border;
1064         ext4_fsblk_t *ablocks = NULL; /* arra    1036         ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
1065         gfp_t gfp_flags = GFP_NOFS;           << 
1066         int err = 0;                             1037         int err = 0;
1067         size_t ext_size = 0;                  << 
1068                                               << 
1069         if (flags & EXT4_EX_NOFAIL)           << 
1070                 gfp_flags |= __GFP_NOFAIL;    << 
1071                                                  1038 
1072         /* make decision: where to split? */     1039         /* make decision: where to split? */
1073         /* FIXME: now decision is simplest: a    1040         /* FIXME: now decision is simplest: at current extent */
1074                                                  1041 
1075         /* if current leaf will be split, the    1042         /* if current leaf will be split, then we should use
1076          * border from split point */            1043          * border from split point */
1077         if (unlikely(path[depth].p_ext > EXT_    1044         if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1078                 EXT4_ERROR_INODE(inode, "p_ex    1045                 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
1079                 return -EFSCORRUPTED;            1046                 return -EFSCORRUPTED;
1080         }                                        1047         }
1081         if (path[depth].p_ext != EXT_MAX_EXTE    1048         if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1082                 border = path[depth].p_ext[1]    1049                 border = path[depth].p_ext[1].ee_block;
1083                 ext_debug(inode, "leaf will b !! 1050                 ext_debug("leaf will be split."
1084                                 " next leaf s    1051                                 " next leaf starts at %d\n",
1085                                   le32_to_cpu    1052                                   le32_to_cpu(border));
1086         } else {                                 1053         } else {
1087                 border = newext->ee_block;       1054                 border = newext->ee_block;
1088                 ext_debug(inode, "leaf will b !! 1055                 ext_debug("leaf will be added."
1089                                 " next leaf s    1056                                 " next leaf starts at %d\n",
1090                                 le32_to_cpu(b    1057                                 le32_to_cpu(border));
1091         }                                        1058         }
1092                                                  1059 
1093         /*                                       1060         /*
1094          * If error occurs, then we break pro    1061          * If error occurs, then we break processing
1095          * and mark filesystem read-only. ind    1062          * and mark filesystem read-only. index won't
1096          * be inserted and tree will be in co    1063          * be inserted and tree will be in consistent
1097          * state. Next mount will repair buff    1064          * state. Next mount will repair buffers too.
1098          */                                      1065          */
1099                                                  1066 
1100         /*                                       1067         /*
1101          * Get array to track all allocated b    1068          * Get array to track all allocated blocks.
1102          * We need this to handle errors and     1069          * We need this to handle errors and free blocks
1103          * upon them.                            1070          * upon them.
1104          */                                      1071          */
1105         ablocks = kcalloc(depth, sizeof(ext4_ !! 1072         ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), GFP_NOFS);
1106         if (!ablocks)                            1073         if (!ablocks)
1107                 return -ENOMEM;                  1074                 return -ENOMEM;
1108                                                  1075 
1109         /* allocate all needed blocks */         1076         /* allocate all needed blocks */
1110         ext_debug(inode, "allocate %d blocks  !! 1077         ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
1111         for (a = 0; a < depth - at; a++) {       1078         for (a = 0; a < depth - at; a++) {
1112                 newblock = ext4_ext_new_meta_    1079                 newblock = ext4_ext_new_meta_block(handle, inode, path,
1113                                                  1080                                                    newext, &err, flags);
1114                 if (newblock == 0)               1081                 if (newblock == 0)
1115                         goto cleanup;            1082                         goto cleanup;
1116                 ablocks[a] = newblock;           1083                 ablocks[a] = newblock;
1117         }                                        1084         }
1118                                                  1085 
1119         /* initialize new leaf */                1086         /* initialize new leaf */
1120         newblock = ablocks[--a];                 1087         newblock = ablocks[--a];
1121         if (unlikely(newblock == 0)) {           1088         if (unlikely(newblock == 0)) {
1122                 EXT4_ERROR_INODE(inode, "newb    1089                 EXT4_ERROR_INODE(inode, "newblock == 0!");
1123                 err = -EFSCORRUPTED;             1090                 err = -EFSCORRUPTED;
1124                 goto cleanup;                    1091                 goto cleanup;
1125         }                                        1092         }
1126         bh = sb_getblk_gfp(inode->i_sb, newbl    1093         bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1127         if (unlikely(!bh)) {                     1094         if (unlikely(!bh)) {
1128                 err = -ENOMEM;                   1095                 err = -ENOMEM;
1129                 goto cleanup;                    1096                 goto cleanup;
1130         }                                        1097         }
1131         lock_buffer(bh);                         1098         lock_buffer(bh);
1132                                                  1099 
1133         err = ext4_journal_get_create_access( !! 1100         err = ext4_journal_get_create_access(handle, bh);
1134                                               << 
1135         if (err)                                 1101         if (err)
1136                 goto cleanup;                    1102                 goto cleanup;
1137                                                  1103 
1138         neh = ext_block_hdr(bh);                 1104         neh = ext_block_hdr(bh);
1139         neh->eh_entries = 0;                     1105         neh->eh_entries = 0;
1140         neh->eh_max = cpu_to_le16(ext4_ext_sp    1106         neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1141         neh->eh_magic = EXT4_EXT_MAGIC;          1107         neh->eh_magic = EXT4_EXT_MAGIC;
1142         neh->eh_depth = 0;                       1108         neh->eh_depth = 0;
1143         neh->eh_generation = 0;               << 
1144                                                  1109 
1145         /* move remainder of path[depth] to t    1110         /* move remainder of path[depth] to the new leaf */
1146         if (unlikely(path[depth].p_hdr->eh_en    1111         if (unlikely(path[depth].p_hdr->eh_entries !=
1147                      path[depth].p_hdr->eh_ma    1112                      path[depth].p_hdr->eh_max)) {
1148                 EXT4_ERROR_INODE(inode, "eh_e    1113                 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
1149                                  path[depth].    1114                                  path[depth].p_hdr->eh_entries,
1150                                  path[depth].    1115                                  path[depth].p_hdr->eh_max);
1151                 err = -EFSCORRUPTED;             1116                 err = -EFSCORRUPTED;
1152                 goto cleanup;                    1117                 goto cleanup;
1153         }                                        1118         }
1154         /* start copy from next extent */        1119         /* start copy from next extent */
1155         m = EXT_MAX_EXTENT(path[depth].p_hdr)    1120         m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
1156         ext4_ext_show_move(inode, path, newbl    1121         ext4_ext_show_move(inode, path, newblock, depth);
1157         if (m) {                                 1122         if (m) {
1158                 struct ext4_extent *ex;          1123                 struct ext4_extent *ex;
1159                 ex = EXT_FIRST_EXTENT(neh);      1124                 ex = EXT_FIRST_EXTENT(neh);
1160                 memmove(ex, path[depth].p_ext    1125                 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1161                 le16_add_cpu(&neh->eh_entries    1126                 le16_add_cpu(&neh->eh_entries, m);
1162         }                                        1127         }
1163                                                  1128 
1164         /* zero out unused area in the extent << 
1165         ext_size = sizeof(struct ext4_extent_ << 
1166                 sizeof(struct ext4_extent) *  << 
1167         memset(bh->b_data + ext_size, 0, inod << 
1168         ext4_extent_block_csum_set(inode, neh    1129         ext4_extent_block_csum_set(inode, neh);
1169         set_buffer_uptodate(bh);                 1130         set_buffer_uptodate(bh);
1170         unlock_buffer(bh);                       1131         unlock_buffer(bh);
1171                                                  1132 
1172         err = ext4_handle_dirty_metadata(hand    1133         err = ext4_handle_dirty_metadata(handle, inode, bh);
1173         if (err)                                 1134         if (err)
1174                 goto cleanup;                    1135                 goto cleanup;
1175         brelse(bh);                              1136         brelse(bh);
1176         bh = NULL;                               1137         bh = NULL;
1177                                                  1138 
1178         /* correct old leaf */                   1139         /* correct old leaf */
1179         if (m) {                                 1140         if (m) {
1180                 err = ext4_ext_get_access(han    1141                 err = ext4_ext_get_access(handle, inode, path + depth);
1181                 if (err)                         1142                 if (err)
1182                         goto cleanup;            1143                         goto cleanup;
1183                 le16_add_cpu(&path[depth].p_h    1144                 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1184                 err = ext4_ext_dirty(handle,     1145                 err = ext4_ext_dirty(handle, inode, path + depth);
1185                 if (err)                         1146                 if (err)
1186                         goto cleanup;            1147                         goto cleanup;
1187                                                  1148 
1188         }                                        1149         }
1189                                                  1150 
1190         /* create intermediate indexes */        1151         /* create intermediate indexes */
1191         k = depth - at - 1;                      1152         k = depth - at - 1;
1192         if (unlikely(k < 0)) {                   1153         if (unlikely(k < 0)) {
1193                 EXT4_ERROR_INODE(inode, "k %d    1154                 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1194                 err = -EFSCORRUPTED;             1155                 err = -EFSCORRUPTED;
1195                 goto cleanup;                    1156                 goto cleanup;
1196         }                                        1157         }
1197         if (k)                                   1158         if (k)
1198                 ext_debug(inode, "create %d i !! 1159                 ext_debug("create %d intermediate indices\n", k);
1199         /* insert new index into current inde    1160         /* insert new index into current index block */
1200         /* current depth stored in i var */      1161         /* current depth stored in i var */
1201         i = depth - 1;                           1162         i = depth - 1;
1202         while (k--) {                            1163         while (k--) {
1203                 oldblock = newblock;             1164                 oldblock = newblock;
1204                 newblock = ablocks[--a];         1165                 newblock = ablocks[--a];
1205                 bh = sb_getblk(inode->i_sb, n    1166                 bh = sb_getblk(inode->i_sb, newblock);
1206                 if (unlikely(!bh)) {             1167                 if (unlikely(!bh)) {
1207                         err = -ENOMEM;           1168                         err = -ENOMEM;
1208                         goto cleanup;            1169                         goto cleanup;
1209                 }                                1170                 }
1210                 lock_buffer(bh);                 1171                 lock_buffer(bh);
1211                                                  1172 
1212                 err = ext4_journal_get_create !! 1173                 err = ext4_journal_get_create_access(handle, bh);
1213                                               << 
1214                 if (err)                         1174                 if (err)
1215                         goto cleanup;            1175                         goto cleanup;
1216                                                  1176 
1217                 neh = ext_block_hdr(bh);         1177                 neh = ext_block_hdr(bh);
1218                 neh->eh_entries = cpu_to_le16    1178                 neh->eh_entries = cpu_to_le16(1);
1219                 neh->eh_magic = EXT4_EXT_MAGI    1179                 neh->eh_magic = EXT4_EXT_MAGIC;
1220                 neh->eh_max = cpu_to_le16(ext    1180                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1221                 neh->eh_depth = cpu_to_le16(d    1181                 neh->eh_depth = cpu_to_le16(depth - i);
1222                 neh->eh_generation = 0;       << 
1223                 fidx = EXT_FIRST_INDEX(neh);     1182                 fidx = EXT_FIRST_INDEX(neh);
1224                 fidx->ei_block = border;         1183                 fidx->ei_block = border;
1225                 ext4_idx_store_pblock(fidx, o    1184                 ext4_idx_store_pblock(fidx, oldblock);
1226                                                  1185 
1227                 ext_debug(inode, "int.index a !! 1186                 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1228                                 i, newblock,     1187                                 i, newblock, le32_to_cpu(border), oldblock);
1229                                                  1188 
1230                 /* move remainder of path[i]     1189                 /* move remainder of path[i] to the new index block */
1231                 if (unlikely(EXT_MAX_INDEX(pa    1190                 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1232                                         EXT_L    1191                                         EXT_LAST_INDEX(path[i].p_hdr))) {
1233                         EXT4_ERROR_INODE(inod    1192                         EXT4_ERROR_INODE(inode,
1234                                          "EXT    1193                                          "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1235                                          le32    1194                                          le32_to_cpu(path[i].p_ext->ee_block));
1236                         err = -EFSCORRUPTED;     1195                         err = -EFSCORRUPTED;
1237                         goto cleanup;            1196                         goto cleanup;
1238                 }                                1197                 }
1239                 /* start copy indexes */         1198                 /* start copy indexes */
1240                 m = EXT_MAX_INDEX(path[i].p_h    1199                 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1241                 ext_debug(inode, "cur 0x%p, l !! 1200                 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1242                                 EXT_MAX_INDEX    1201                                 EXT_MAX_INDEX(path[i].p_hdr));
1243                 ext4_ext_show_move(inode, pat    1202                 ext4_ext_show_move(inode, path, newblock, i);
1244                 if (m) {                         1203                 if (m) {
1245                         memmove(++fidx, path[    1204                         memmove(++fidx, path[i].p_idx,
1246                                 sizeof(struct    1205                                 sizeof(struct ext4_extent_idx) * m);
1247                         le16_add_cpu(&neh->eh    1206                         le16_add_cpu(&neh->eh_entries, m);
1248                 }                                1207                 }
1249                 /* zero out unused area in th << 
1250                 ext_size = sizeof(struct ext4 << 
1251                    (sizeof(struct ext4_extent << 
1252                 memset(bh->b_data + ext_size, << 
1253                         inode->i_sb->s_blocks << 
1254                 ext4_extent_block_csum_set(in    1208                 ext4_extent_block_csum_set(inode, neh);
1255                 set_buffer_uptodate(bh);         1209                 set_buffer_uptodate(bh);
1256                 unlock_buffer(bh);               1210                 unlock_buffer(bh);
1257                                                  1211 
1258                 err = ext4_handle_dirty_metad    1212                 err = ext4_handle_dirty_metadata(handle, inode, bh);
1259                 if (err)                         1213                 if (err)
1260                         goto cleanup;            1214                         goto cleanup;
1261                 brelse(bh);                      1215                 brelse(bh);
1262                 bh = NULL;                       1216                 bh = NULL;
1263                                                  1217 
1264                 /* correct old index */          1218                 /* correct old index */
1265                 if (m) {                         1219                 if (m) {
1266                         err = ext4_ext_get_ac    1220                         err = ext4_ext_get_access(handle, inode, path + i);
1267                         if (err)                 1221                         if (err)
1268                                 goto cleanup;    1222                                 goto cleanup;
1269                         le16_add_cpu(&path[i]    1223                         le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1270                         err = ext4_ext_dirty(    1224                         err = ext4_ext_dirty(handle, inode, path + i);
1271                         if (err)                 1225                         if (err)
1272                                 goto cleanup;    1226                                 goto cleanup;
1273                 }                                1227                 }
1274                                                  1228 
1275                 i--;                             1229                 i--;
1276         }                                        1230         }
1277                                                  1231 
1278         /* insert new index */                   1232         /* insert new index */
1279         err = ext4_ext_insert_index(handle, i    1233         err = ext4_ext_insert_index(handle, inode, path + at,
1280                                     le32_to_c    1234                                     le32_to_cpu(border), newblock);
1281                                                  1235 
1282 cleanup:                                         1236 cleanup:
1283         if (bh) {                                1237         if (bh) {
1284                 if (buffer_locked(bh))           1238                 if (buffer_locked(bh))
1285                         unlock_buffer(bh);       1239                         unlock_buffer(bh);
1286                 brelse(bh);                      1240                 brelse(bh);
1287         }                                        1241         }
1288                                                  1242 
1289         if (err) {                               1243         if (err) {
1290                 /* free all allocated blocks     1244                 /* free all allocated blocks in error case */
1291                 for (i = 0; i < depth; i++) {    1245                 for (i = 0; i < depth; i++) {
1292                         if (!ablocks[i])         1246                         if (!ablocks[i])
1293                                 continue;        1247                                 continue;
1294                         ext4_free_blocks(hand    1248                         ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1295                                          EXT4    1249                                          EXT4_FREE_BLOCKS_METADATA);
1296                 }                                1250                 }
1297         }                                        1251         }
1298         kfree(ablocks);                          1252         kfree(ablocks);
1299                                                  1253 
1300         return err;                              1254         return err;
1301 }                                                1255 }
1302                                                  1256 
1303 /*                                               1257 /*
1304  * ext4_ext_grow_indepth:                        1258  * ext4_ext_grow_indepth:
1305  * implements tree growing procedure:            1259  * implements tree growing procedure:
1306  * - allocates new block                         1260  * - allocates new block
1307  * - moves top-level data (index block or lea    1261  * - moves top-level data (index block or leaf) into the new block
1308  * - initializes new top-level, creating inde    1262  * - initializes new top-level, creating index that points to the
1309  *   just created block                          1263  *   just created block
1310  */                                              1264  */
1311 static int ext4_ext_grow_indepth(handle_t *ha    1265 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1312                                  unsigned int    1266                                  unsigned int flags)
1313 {                                                1267 {
1314         struct ext4_extent_header *neh;          1268         struct ext4_extent_header *neh;
1315         struct buffer_head *bh;                  1269         struct buffer_head *bh;
1316         ext4_fsblk_t newblock, goal = 0;         1270         ext4_fsblk_t newblock, goal = 0;
1317         struct ext4_super_block *es = EXT4_SB    1271         struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
1318         int err = 0;                             1272         int err = 0;
1319         size_t ext_size = 0;                  << 
1320                                                  1273 
1321         /* Try to prepend new index to old on    1274         /* Try to prepend new index to old one */
1322         if (ext_depth(inode))                    1275         if (ext_depth(inode))
1323                 goal = ext4_idx_pblock(EXT_FI    1276                 goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
1324         if (goal > le32_to_cpu(es->s_first_da    1277         if (goal > le32_to_cpu(es->s_first_data_block)) {
1325                 flags |= EXT4_MB_HINT_TRY_GOA    1278                 flags |= EXT4_MB_HINT_TRY_GOAL;
1326                 goal--;                          1279                 goal--;
1327         } else                                   1280         } else
1328                 goal = ext4_inode_to_goal_blo    1281                 goal = ext4_inode_to_goal_block(inode);
1329         newblock = ext4_new_meta_blocks(handl    1282         newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
1330                                         NULL,    1283                                         NULL, &err);
1331         if (newblock == 0)                       1284         if (newblock == 0)
1332                 return err;                      1285                 return err;
1333                                                  1286 
1334         bh = sb_getblk_gfp(inode->i_sb, newbl    1287         bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1335         if (unlikely(!bh))                       1288         if (unlikely(!bh))
1336                 return -ENOMEM;                  1289                 return -ENOMEM;
1337         lock_buffer(bh);                         1290         lock_buffer(bh);
1338                                                  1291 
1339         err = ext4_journal_get_create_access( !! 1292         err = ext4_journal_get_create_access(handle, bh);
1340                                               << 
1341         if (err) {                               1293         if (err) {
1342                 unlock_buffer(bh);               1294                 unlock_buffer(bh);
1343                 goto out;                        1295                 goto out;
1344         }                                        1296         }
1345                                                  1297 
1346         ext_size = sizeof(EXT4_I(inode)->i_da << 
1347         /* move top-level index/leaf into new    1298         /* move top-level index/leaf into new block */
1348         memmove(bh->b_data, EXT4_I(inode)->i_ !! 1299         memmove(bh->b_data, EXT4_I(inode)->i_data,
1349         /* zero out unused area in the extent !! 1300                 sizeof(EXT4_I(inode)->i_data));
1350         memset(bh->b_data + ext_size, 0, inod << 
1351                                                  1301 
1352         /* set size of new block */              1302         /* set size of new block */
1353         neh = ext_block_hdr(bh);                 1303         neh = ext_block_hdr(bh);
1354         /* old root could have indexes or lea    1304         /* old root could have indexes or leaves
1355          * so calculate e_max right way */       1305          * so calculate e_max right way */
1356         if (ext_depth(inode))                    1306         if (ext_depth(inode))
1357                 neh->eh_max = cpu_to_le16(ext    1307                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1358         else                                     1308         else
1359                 neh->eh_max = cpu_to_le16(ext    1309                 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1360         neh->eh_magic = EXT4_EXT_MAGIC;          1310         neh->eh_magic = EXT4_EXT_MAGIC;
1361         ext4_extent_block_csum_set(inode, neh    1311         ext4_extent_block_csum_set(inode, neh);
1362         set_buffer_uptodate(bh);                 1312         set_buffer_uptodate(bh);
1363         set_buffer_verified(bh);              << 
1364         unlock_buffer(bh);                       1313         unlock_buffer(bh);
1365                                                  1314 
1366         err = ext4_handle_dirty_metadata(hand    1315         err = ext4_handle_dirty_metadata(handle, inode, bh);
1367         if (err)                                 1316         if (err)
1368                 goto out;                        1317                 goto out;
1369                                                  1318 
1370         /* Update top-level index: num,max,po    1319         /* Update top-level index: num,max,pointer */
1371         neh = ext_inode_hdr(inode);              1320         neh = ext_inode_hdr(inode);
1372         neh->eh_entries = cpu_to_le16(1);        1321         neh->eh_entries = cpu_to_le16(1);
1373         ext4_idx_store_pblock(EXT_FIRST_INDEX    1322         ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1374         if (neh->eh_depth == 0) {                1323         if (neh->eh_depth == 0) {
1375                 /* Root extent block becomes     1324                 /* Root extent block becomes index block */
1376                 neh->eh_max = cpu_to_le16(ext    1325                 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1377                 EXT_FIRST_INDEX(neh)->ei_bloc    1326                 EXT_FIRST_INDEX(neh)->ei_block =
1378                         EXT_FIRST_EXTENT(neh)    1327                         EXT_FIRST_EXTENT(neh)->ee_block;
1379         }                                        1328         }
1380         ext_debug(inode, "new root: num %d(%d !! 1329         ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1381                   le16_to_cpu(neh->eh_entries    1330                   le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1382                   le32_to_cpu(EXT_FIRST_INDEX    1331                   le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1383                   ext4_idx_pblock(EXT_FIRST_I    1332                   ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1384                                                  1333 
1385         le16_add_cpu(&neh->eh_depth, 1);         1334         le16_add_cpu(&neh->eh_depth, 1);
1386         err = ext4_mark_inode_dirty(handle, i !! 1335         ext4_mark_inode_dirty(handle, inode);
1387 out:                                             1336 out:
1388         brelse(bh);                              1337         brelse(bh);
1389                                                  1338 
1390         return err;                              1339         return err;
1391 }                                                1340 }
1392                                                  1341 
1393 /*                                               1342 /*
1394  * ext4_ext_create_new_leaf:                     1343  * ext4_ext_create_new_leaf:
1395  * finds empty index and adds new leaf.          1344  * finds empty index and adds new leaf.
1396  * if no free index is found, then it request    1345  * if no free index is found, then it requests in-depth growing.
1397  */                                              1346  */
1398 static struct ext4_ext_path *                 !! 1347 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1399 ext4_ext_create_new_leaf(handle_t *handle, st !! 1348                                     unsigned int mb_flags,
1400                          unsigned int mb_flag !! 1349                                     unsigned int gb_flags,
1401                          struct ext4_ext_path !! 1350                                     struct ext4_ext_path **ppath,
1402                          struct ext4_extent * !! 1351                                     struct ext4_extent *newext)
1403 {                                                1352 {
                                                   >> 1353         struct ext4_ext_path *path = *ppath;
1404         struct ext4_ext_path *curp;              1354         struct ext4_ext_path *curp;
1405         int depth, i, err = 0;                   1355         int depth, i, err = 0;
1406         ext4_lblk_t ee_block = le32_to_cpu(ne << 
1407                                                  1356 
1408 repeat:                                          1357 repeat:
1409         i = depth = ext_depth(inode);            1358         i = depth = ext_depth(inode);
1410                                                  1359 
1411         /* walk up to the tree and look for f    1360         /* walk up to the tree and look for free index entry */
1412         curp = path + depth;                     1361         curp = path + depth;
1413         while (i > 0 && !EXT_HAS_FREE_INDEX(c    1362         while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1414                 i--;                             1363                 i--;
1415                 curp--;                          1364                 curp--;
1416         }                                        1365         }
1417                                                  1366 
1418         /* we use already allocated block for    1367         /* we use already allocated block for index block,
1419          * so subsequent data blocks should b    1368          * so subsequent data blocks should be contiguous */
1420         if (EXT_HAS_FREE_INDEX(curp)) {          1369         if (EXT_HAS_FREE_INDEX(curp)) {
1421                 /* if we found index with fre    1370                 /* if we found index with free entry, then use that
1422                  * entry: create all needed s    1371                  * entry: create all needed subtree and add new leaf */
1423                 err = ext4_ext_split(handle,     1372                 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1424                 if (err)                         1373                 if (err)
1425                         goto errout;          !! 1374                         goto out;
1426                                                  1375 
1427                 /* refill path */                1376                 /* refill path */
1428                 path = ext4_find_extent(inode !! 1377                 path = ext4_find_extent(inode,
1429                 return path;                  !! 1378                                     (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1430         }                                     !! 1379                                     ppath, gb_flags);
1431                                               !! 1380                 if (IS_ERR(path))
1432         /* tree is full, time to grow in dept !! 1381                         err = PTR_ERR(path);
1433         err = ext4_ext_grow_indepth(handle, i !! 1382         } else {
1434         if (err)                              !! 1383                 /* tree is full, time to grow in depth */
1435                 goto errout;                  !! 1384                 err = ext4_ext_grow_indepth(handle, inode, mb_flags);
                                                   >> 1385                 if (err)
                                                   >> 1386                         goto out;
1436                                                  1387 
1437         /* refill path */                     !! 1388                 /* refill path */
1438         path = ext4_find_extent(inode, ee_blo !! 1389                 path = ext4_find_extent(inode,
1439         if (IS_ERR(path))                     !! 1390                                    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1440                 return path;                  !! 1391                                     ppath, gb_flags);
                                                   >> 1392                 if (IS_ERR(path)) {
                                                   >> 1393                         err = PTR_ERR(path);
                                                   >> 1394                         goto out;
                                                   >> 1395                 }
1441                                                  1396 
1442         /*                                    !! 1397                 /*
1443          * only first (depth 0 -> 1) produces !! 1398                  * only first (depth 0 -> 1) produces free space;
1444          * in all other cases we have to spli !! 1399                  * in all other cases we have to split the grown tree
1445          */                                   !! 1400                  */
1446         depth = ext_depth(inode);             !! 1401                 depth = ext_depth(inode);
1447         if (path[depth].p_hdr->eh_entries ==  !! 1402                 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1448                 /* now we need to split */    !! 1403                         /* now we need to split */
1449                 goto repeat;                  !! 1404                         goto repeat;
                                                   >> 1405                 }
1450         }                                        1406         }
1451                                                  1407 
1452         return path;                          !! 1408 out:
1453                                               !! 1409         return err;
1454 errout:                                       << 
1455         ext4_free_ext_path(path);             << 
1456         return ERR_PTR(err);                  << 
1457 }                                                1410 }
1458                                                  1411 
1459 /*                                               1412 /*
1460  * search the closest allocated block to the     1413  * search the closest allocated block to the left for *logical
1461  * and returns it at @logical + it's physical    1414  * and returns it at @logical + it's physical address at @phys
1462  * if *logical is the smallest allocated bloc    1415  * if *logical is the smallest allocated block, the function
1463  * returns 0 at @phys                            1416  * returns 0 at @phys
1464  * return value contains 0 (success) or error    1417  * return value contains 0 (success) or error code
1465  */                                              1418  */
1466 static int ext4_ext_search_left(struct inode     1419 static int ext4_ext_search_left(struct inode *inode,
1467                                 struct ext4_e    1420                                 struct ext4_ext_path *path,
1468                                 ext4_lblk_t *    1421                                 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1469 {                                                1422 {
1470         struct ext4_extent_idx *ix;              1423         struct ext4_extent_idx *ix;
1471         struct ext4_extent *ex;                  1424         struct ext4_extent *ex;
1472         int depth, ee_len;                       1425         int depth, ee_len;
1473                                                  1426 
1474         if (unlikely(path == NULL)) {            1427         if (unlikely(path == NULL)) {
1475                 EXT4_ERROR_INODE(inode, "path    1428                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1476                 return -EFSCORRUPTED;            1429                 return -EFSCORRUPTED;
1477         }                                        1430         }
1478         depth = path->p_depth;                   1431         depth = path->p_depth;
1479         *phys = 0;                               1432         *phys = 0;
1480                                                  1433 
1481         if (depth == 0 && path->p_ext == NULL    1434         if (depth == 0 && path->p_ext == NULL)
1482                 return 0;                        1435                 return 0;
1483                                                  1436 
1484         /* usually extent in the path covers     1437         /* usually extent in the path covers blocks smaller
1485          * then *logical, but it can be that     1438          * then *logical, but it can be that extent is the
1486          * first one in the file */              1439          * first one in the file */
1487                                                  1440 
1488         ex = path[depth].p_ext;                  1441         ex = path[depth].p_ext;
1489         ee_len = ext4_ext_get_actual_len(ex);    1442         ee_len = ext4_ext_get_actual_len(ex);
1490         if (*logical < le32_to_cpu(ex->ee_blo    1443         if (*logical < le32_to_cpu(ex->ee_block)) {
1491                 if (unlikely(EXT_FIRST_EXTENT    1444                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1492                         EXT4_ERROR_INODE(inod    1445                         EXT4_ERROR_INODE(inode,
1493                                          "EXT    1446                                          "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1494                                          *log    1447                                          *logical, le32_to_cpu(ex->ee_block));
1495                         return -EFSCORRUPTED;    1448                         return -EFSCORRUPTED;
1496                 }                                1449                 }
1497                 while (--depth >= 0) {           1450                 while (--depth >= 0) {
1498                         ix = path[depth].p_id    1451                         ix = path[depth].p_idx;
1499                         if (unlikely(ix != EX    1452                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1500                                 EXT4_ERROR_IN    1453                                 EXT4_ERROR_INODE(inode,
1501                                   "ix (%d) !=    1454                                   "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1502                                   ix != NULL     1455                                   ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1503                                   le32_to_cpu !! 1456                                   EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
                                                   >> 1457                 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1504                                   depth);        1458                                   depth);
1505                                 return -EFSCO    1459                                 return -EFSCORRUPTED;
1506                         }                        1460                         }
1507                 }                                1461                 }
1508                 return 0;                        1462                 return 0;
1509         }                                        1463         }
1510                                                  1464 
1511         if (unlikely(*logical < (le32_to_cpu(    1465         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1512                 EXT4_ERROR_INODE(inode,          1466                 EXT4_ERROR_INODE(inode,
1513                                  "logical %d     1467                                  "logical %d < ee_block %d + ee_len %d!",
1514                                  *logical, le    1468                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1515                 return -EFSCORRUPTED;            1469                 return -EFSCORRUPTED;
1516         }                                        1470         }
1517                                                  1471 
1518         *logical = le32_to_cpu(ex->ee_block)     1472         *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1519         *phys = ext4_ext_pblock(ex) + ee_len     1473         *phys = ext4_ext_pblock(ex) + ee_len - 1;
1520         return 0;                                1474         return 0;
1521 }                                                1475 }
1522                                                  1476 
1523 /*                                               1477 /*
1524  * Search the closest allocated block to the  !! 1478  * search the closest allocated block to the right for *logical
1525  * and returns it at @logical + it's physical !! 1479  * and returns it at @logical + it's physical address at @phys
1526  * If not exists, return 0 and @phys is set t !! 1480  * if *logical is the largest allocated block, the function
1527  * 1 which means we found an allocated block  !! 1481  * returns 0 at @phys
1528  * Or return a (< 0) error code.              !! 1482  * return value contains 0 (success) or error code
1529  */                                              1483  */
1530 static int ext4_ext_search_right(struct inode    1484 static int ext4_ext_search_right(struct inode *inode,
1531                                  struct ext4_    1485                                  struct ext4_ext_path *path,
1532                                  ext4_lblk_t     1486                                  ext4_lblk_t *logical, ext4_fsblk_t *phys,
1533                                  struct ext4_ !! 1487                                  struct ext4_extent **ret_ex)
1534 {                                                1488 {
1535         struct buffer_head *bh = NULL;           1489         struct buffer_head *bh = NULL;
1536         struct ext4_extent_header *eh;           1490         struct ext4_extent_header *eh;
1537         struct ext4_extent_idx *ix;              1491         struct ext4_extent_idx *ix;
1538         struct ext4_extent *ex;                  1492         struct ext4_extent *ex;
                                                   >> 1493         ext4_fsblk_t block;
1539         int depth;      /* Note, NOT eh_depth    1494         int depth;      /* Note, NOT eh_depth; depth from top of tree */
1540         int ee_len;                              1495         int ee_len;
1541                                                  1496 
1542         if (unlikely(path == NULL)) {            1497         if (unlikely(path == NULL)) {
1543                 EXT4_ERROR_INODE(inode, "path    1498                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1544                 return -EFSCORRUPTED;            1499                 return -EFSCORRUPTED;
1545         }                                        1500         }
1546         depth = path->p_depth;                   1501         depth = path->p_depth;
1547         *phys = 0;                               1502         *phys = 0;
1548                                                  1503 
1549         if (depth == 0 && path->p_ext == NULL    1504         if (depth == 0 && path->p_ext == NULL)
1550                 return 0;                        1505                 return 0;
1551                                                  1506 
1552         /* usually extent in the path covers     1507         /* usually extent in the path covers blocks smaller
1553          * then *logical, but it can be that     1508          * then *logical, but it can be that extent is the
1554          * first one in the file */              1509          * first one in the file */
1555                                                  1510 
1556         ex = path[depth].p_ext;                  1511         ex = path[depth].p_ext;
1557         ee_len = ext4_ext_get_actual_len(ex);    1512         ee_len = ext4_ext_get_actual_len(ex);
1558         if (*logical < le32_to_cpu(ex->ee_blo    1513         if (*logical < le32_to_cpu(ex->ee_block)) {
1559                 if (unlikely(EXT_FIRST_EXTENT    1514                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1560                         EXT4_ERROR_INODE(inod    1515                         EXT4_ERROR_INODE(inode,
1561                                          "fir    1516                                          "first_extent(path[%d].p_hdr) != ex",
1562                                          dept    1517                                          depth);
1563                         return -EFSCORRUPTED;    1518                         return -EFSCORRUPTED;
1564                 }                                1519                 }
1565                 while (--depth >= 0) {           1520                 while (--depth >= 0) {
1566                         ix = path[depth].p_id    1521                         ix = path[depth].p_idx;
1567                         if (unlikely(ix != EX    1522                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1568                                 EXT4_ERROR_IN    1523                                 EXT4_ERROR_INODE(inode,
1569                                                  1524                                                  "ix != EXT_FIRST_INDEX *logical %d!",
1570                                                  1525                                                  *logical);
1571                                 return -EFSCO    1526                                 return -EFSCORRUPTED;
1572                         }                        1527                         }
1573                 }                                1528                 }
1574                 goto found_extent;               1529                 goto found_extent;
1575         }                                        1530         }
1576                                                  1531 
1577         if (unlikely(*logical < (le32_to_cpu(    1532         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1578                 EXT4_ERROR_INODE(inode,          1533                 EXT4_ERROR_INODE(inode,
1579                                  "logical %d     1534                                  "logical %d < ee_block %d + ee_len %d!",
1580                                  *logical, le    1535                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1581                 return -EFSCORRUPTED;            1536                 return -EFSCORRUPTED;
1582         }                                        1537         }
1583                                                  1538 
1584         if (ex != EXT_LAST_EXTENT(path[depth]    1539         if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1585                 /* next allocated block in th    1540                 /* next allocated block in this leaf */
1586                 ex++;                            1541                 ex++;
1587                 goto found_extent;               1542                 goto found_extent;
1588         }                                        1543         }
1589                                                  1544 
1590         /* go up and search for index to the     1545         /* go up and search for index to the right */
1591         while (--depth >= 0) {                   1546         while (--depth >= 0) {
1592                 ix = path[depth].p_idx;          1547                 ix = path[depth].p_idx;
1593                 if (ix != EXT_LAST_INDEX(path    1548                 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1594                         goto got_index;          1549                         goto got_index;
1595         }                                        1550         }
1596                                                  1551 
1597         /* we've gone up to the root and foun    1552         /* we've gone up to the root and found no index to the right */
1598         return 0;                                1553         return 0;
1599                                                  1554 
1600 got_index:                                       1555 got_index:
1601         /* we've found index to the right, le    1556         /* we've found index to the right, let's
1602          * follow it and find the closest all    1557          * follow it and find the closest allocated
1603          * block to the right */                 1558          * block to the right */
1604         ix++;                                    1559         ix++;
                                                   >> 1560         block = ext4_idx_pblock(ix);
1605         while (++depth < path->p_depth) {        1561         while (++depth < path->p_depth) {
1606                 /* subtract from p_depth to g    1562                 /* subtract from p_depth to get proper eh_depth */
1607                 bh = read_extent_tree_block(i !! 1563                 bh = read_extent_tree_block(inode, block,
                                                   >> 1564                                             path->p_depth - depth, 0);
1608                 if (IS_ERR(bh))                  1565                 if (IS_ERR(bh))
1609                         return PTR_ERR(bh);      1566                         return PTR_ERR(bh);
1610                 eh = ext_block_hdr(bh);          1567                 eh = ext_block_hdr(bh);
1611                 ix = EXT_FIRST_INDEX(eh);        1568                 ix = EXT_FIRST_INDEX(eh);
                                                   >> 1569                 block = ext4_idx_pblock(ix);
1612                 put_bh(bh);                      1570                 put_bh(bh);
1613         }                                        1571         }
1614                                                  1572 
1615         bh = read_extent_tree_block(inode, ix !! 1573         bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
1616         if (IS_ERR(bh))                          1574         if (IS_ERR(bh))
1617                 return PTR_ERR(bh);              1575                 return PTR_ERR(bh);
1618         eh = ext_block_hdr(bh);                  1576         eh = ext_block_hdr(bh);
1619         ex = EXT_FIRST_EXTENT(eh);               1577         ex = EXT_FIRST_EXTENT(eh);
1620 found_extent:                                    1578 found_extent:
1621         *logical = le32_to_cpu(ex->ee_block);    1579         *logical = le32_to_cpu(ex->ee_block);
1622         *phys = ext4_ext_pblock(ex);             1580         *phys = ext4_ext_pblock(ex);
1623         if (ret_ex)                           !! 1581         *ret_ex = ex;
1624                 *ret_ex = *ex;                << 
1625         if (bh)                                  1582         if (bh)
1626                 put_bh(bh);                      1583                 put_bh(bh);
1627         return 1;                             !! 1584         return 0;
1628 }                                                1585 }
1629                                                  1586 
1630 /*                                               1587 /*
1631  * ext4_ext_next_allocated_block:                1588  * ext4_ext_next_allocated_block:
1632  * returns allocated block in subsequent exte    1589  * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1633  * NOTE: it considers block number from index    1590  * NOTE: it considers block number from index entry as
1634  * allocated block. Thus, index entries have     1591  * allocated block. Thus, index entries have to be consistent
1635  * with leaves.                                  1592  * with leaves.
1636  */                                              1593  */
1637 ext4_lblk_t                                      1594 ext4_lblk_t
1638 ext4_ext_next_allocated_block(struct ext4_ext    1595 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1639 {                                                1596 {
1640         int depth;                               1597         int depth;
1641                                                  1598 
1642         BUG_ON(path == NULL);                    1599         BUG_ON(path == NULL);
1643         depth = path->p_depth;                   1600         depth = path->p_depth;
1644                                                  1601 
1645         if (depth == 0 && path->p_ext == NULL    1602         if (depth == 0 && path->p_ext == NULL)
1646                 return EXT_MAX_BLOCKS;           1603                 return EXT_MAX_BLOCKS;
1647                                                  1604 
1648         while (depth >= 0) {                     1605         while (depth >= 0) {
1649                 struct ext4_ext_path *p = &pa << 
1650                                               << 
1651                 if (depth == path->p_depth) {    1606                 if (depth == path->p_depth) {
1652                         /* leaf */               1607                         /* leaf */
1653                         if (p->p_ext && p->p_ !! 1608                         if (path[depth].p_ext &&
1654                                 return le32_t !! 1609                                 path[depth].p_ext !=
                                                   >> 1610                                         EXT_LAST_EXTENT(path[depth].p_hdr))
                                                   >> 1611                           return le32_to_cpu(path[depth].p_ext[1].ee_block);
1655                 } else {                         1612                 } else {
1656                         /* index */              1613                         /* index */
1657                         if (p->p_idx != EXT_L !! 1614                         if (path[depth].p_idx !=
1658                                 return le32_t !! 1615                                         EXT_LAST_INDEX(path[depth].p_hdr))
                                                   >> 1616                           return le32_to_cpu(path[depth].p_idx[1].ei_block);
1659                 }                                1617                 }
1660                 depth--;                         1618                 depth--;
1661         }                                        1619         }
1662                                                  1620 
1663         return EXT_MAX_BLOCKS;                   1621         return EXT_MAX_BLOCKS;
1664 }                                                1622 }
1665                                                  1623 
1666 /*                                               1624 /*
1667  * ext4_ext_next_leaf_block:                     1625  * ext4_ext_next_leaf_block:
1668  * returns first allocated block from next le    1626  * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1669  */                                              1627  */
1670 static ext4_lblk_t ext4_ext_next_leaf_block(s    1628 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1671 {                                                1629 {
1672         int depth;                               1630         int depth;
1673                                                  1631 
1674         BUG_ON(path == NULL);                    1632         BUG_ON(path == NULL);
1675         depth = path->p_depth;                   1633         depth = path->p_depth;
1676                                                  1634 
1677         /* zero-tree has no leaf blocks at al    1635         /* zero-tree has no leaf blocks at all */
1678         if (depth == 0)                          1636         if (depth == 0)
1679                 return EXT_MAX_BLOCKS;           1637                 return EXT_MAX_BLOCKS;
1680                                                  1638 
1681         /* go to index block */                  1639         /* go to index block */
1682         depth--;                                 1640         depth--;
1683                                                  1641 
1684         while (depth >= 0) {                     1642         while (depth >= 0) {
1685                 if (path[depth].p_idx !=         1643                 if (path[depth].p_idx !=
1686                                 EXT_LAST_INDE    1644                                 EXT_LAST_INDEX(path[depth].p_hdr))
1687                         return (ext4_lblk_t)     1645                         return (ext4_lblk_t)
1688                                 le32_to_cpu(p    1646                                 le32_to_cpu(path[depth].p_idx[1].ei_block);
1689                 depth--;                         1647                 depth--;
1690         }                                        1648         }
1691                                                  1649 
1692         return EXT_MAX_BLOCKS;                   1650         return EXT_MAX_BLOCKS;
1693 }                                                1651 }
1694                                                  1652 
1695 /*                                               1653 /*
1696  * ext4_ext_correct_indexes:                     1654  * ext4_ext_correct_indexes:
1697  * if leaf gets modified and modified extent     1655  * if leaf gets modified and modified extent is first in the leaf,
1698  * then we have to correct all indexes above.    1656  * then we have to correct all indexes above.
1699  * TODO: do we need to correct tree in all ca    1657  * TODO: do we need to correct tree in all cases?
1700  */                                              1658  */
1701 static int ext4_ext_correct_indexes(handle_t     1659 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1702                                 struct ext4_e    1660                                 struct ext4_ext_path *path)
1703 {                                                1661 {
1704         struct ext4_extent_header *eh;           1662         struct ext4_extent_header *eh;
1705         int depth = ext_depth(inode);            1663         int depth = ext_depth(inode);
1706         struct ext4_extent *ex;                  1664         struct ext4_extent *ex;
1707         __le32 border;                           1665         __le32 border;
1708         int k, err = 0;                          1666         int k, err = 0;
1709                                                  1667 
1710         eh = path[depth].p_hdr;                  1668         eh = path[depth].p_hdr;
1711         ex = path[depth].p_ext;                  1669         ex = path[depth].p_ext;
1712                                                  1670 
1713         if (unlikely(ex == NULL || eh == NULL    1671         if (unlikely(ex == NULL || eh == NULL)) {
1714                 EXT4_ERROR_INODE(inode,          1672                 EXT4_ERROR_INODE(inode,
1715                                  "ex %p == NU    1673                                  "ex %p == NULL or eh %p == NULL", ex, eh);
1716                 return -EFSCORRUPTED;            1674                 return -EFSCORRUPTED;
1717         }                                        1675         }
1718                                                  1676 
1719         if (depth == 0) {                        1677         if (depth == 0) {
1720                 /* there is no tree at all */    1678                 /* there is no tree at all */
1721                 return 0;                        1679                 return 0;
1722         }                                        1680         }
1723                                                  1681 
1724         if (ex != EXT_FIRST_EXTENT(eh)) {        1682         if (ex != EXT_FIRST_EXTENT(eh)) {
1725                 /* we correct tree if first l    1683                 /* we correct tree if first leaf got modified only */
1726                 return 0;                        1684                 return 0;
1727         }                                        1685         }
1728                                                  1686 
1729         /*                                       1687         /*
1730          * TODO: we need correction if border    1688          * TODO: we need correction if border is smaller than current one
1731          */                                      1689          */
1732         k = depth - 1;                           1690         k = depth - 1;
1733         border = path[depth].p_ext->ee_block;    1691         border = path[depth].p_ext->ee_block;
1734         err = ext4_ext_get_access(handle, ino    1692         err = ext4_ext_get_access(handle, inode, path + k);
1735         if (err)                                 1693         if (err)
1736                 return err;                      1694                 return err;
1737         path[k].p_idx->ei_block = border;        1695         path[k].p_idx->ei_block = border;
1738         err = ext4_ext_dirty(handle, inode, p    1696         err = ext4_ext_dirty(handle, inode, path + k);
1739         if (err)                                 1697         if (err)
1740                 return err;                      1698                 return err;
1741                                                  1699 
1742         while (k--) {                            1700         while (k--) {
1743                 /* change all left-side index    1701                 /* change all left-side indexes */
1744                 if (path[k+1].p_idx != EXT_FI    1702                 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1745                         break;                   1703                         break;
1746                 err = ext4_ext_get_access(han    1704                 err = ext4_ext_get_access(handle, inode, path + k);
1747                 if (err)                         1705                 if (err)
1748                         goto clean;           !! 1706                         break;
1749                 path[k].p_idx->ei_block = bor    1707                 path[k].p_idx->ei_block = border;
1750                 err = ext4_ext_dirty(handle,     1708                 err = ext4_ext_dirty(handle, inode, path + k);
1751                 if (err)                         1709                 if (err)
1752                         goto clean;           !! 1710                         break;
1753         }                                        1711         }
1754         return 0;                             << 
1755                                               << 
1756 clean:                                        << 
1757         /*                                    << 
1758          * The path[k].p_bh is either unmodif << 
1759          * set (see ext4_ext_get_access()). S << 
1760          * of the successfully modified exten << 
1761          * these extents to be checked to avo << 
1762          */                                   << 
1763         while (++k < depth)                   << 
1764                 clear_buffer_verified(path[k] << 
1765                                                  1712 
1766         return err;                              1713         return err;
1767 }                                                1714 }
1768                                                  1715 
1769 static int ext4_can_extents_be_merged(struct  !! 1716 int
1770                                       struct  !! 1717 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1771                                       struct  !! 1718                                 struct ext4_extent *ex2)
1772 {                                                1719 {
1773         unsigned short ext1_ee_len, ext2_ee_l    1720         unsigned short ext1_ee_len, ext2_ee_len;
1774                                                  1721 
1775         if (ext4_ext_is_unwritten(ex1) != ext    1722         if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
1776                 return 0;                        1723                 return 0;
1777                                                  1724 
1778         ext1_ee_len = ext4_ext_get_actual_len    1725         ext1_ee_len = ext4_ext_get_actual_len(ex1);
1779         ext2_ee_len = ext4_ext_get_actual_len    1726         ext2_ee_len = ext4_ext_get_actual_len(ex2);
1780                                                  1727 
1781         if (le32_to_cpu(ex1->ee_block) + ext1    1728         if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1782                         le32_to_cpu(ex2->ee_b    1729                         le32_to_cpu(ex2->ee_block))
1783                 return 0;                        1730                 return 0;
1784                                                  1731 
                                                   >> 1732         /*
                                                   >> 1733          * To allow future support for preallocated extents to be added
                                                   >> 1734          * as an RO_COMPAT feature, refuse to merge to extents if
                                                   >> 1735          * this can result in the top bit of ee_len being set.
                                                   >> 1736          */
1785         if (ext1_ee_len + ext2_ee_len > EXT_I    1737         if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
1786                 return 0;                        1738                 return 0;
1787                                               !! 1739         /*
                                                   >> 1740          * The check for IO to unwritten extent is somewhat racy as we
                                                   >> 1741          * increment i_unwritten / set EXT4_STATE_DIO_UNWRITTEN only after
                                                   >> 1742          * dropping i_data_sem. But reserved blocks should save us in that
                                                   >> 1743          * case.
                                                   >> 1744          */
1788         if (ext4_ext_is_unwritten(ex1) &&        1745         if (ext4_ext_is_unwritten(ex1) &&
1789             ext1_ee_len + ext2_ee_len > EXT_U !! 1746             (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) ||
                                                   >> 1747              atomic_read(&EXT4_I(inode)->i_unwritten) ||
                                                   >> 1748              (ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)))
1790                 return 0;                        1749                 return 0;
1791 #ifdef AGGRESSIVE_TEST                           1750 #ifdef AGGRESSIVE_TEST
1792         if (ext1_ee_len >= 4)                    1751         if (ext1_ee_len >= 4)
1793                 return 0;                        1752                 return 0;
1794 #endif                                           1753 #endif
1795                                                  1754 
1796         if (ext4_ext_pblock(ex1) + ext1_ee_le    1755         if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1797                 return 1;                        1756                 return 1;
1798         return 0;                                1757         return 0;
1799 }                                                1758 }
1800                                                  1759 
1801 /*                                               1760 /*
1802  * This function tries to merge the "ex" exte    1761  * This function tries to merge the "ex" extent to the next extent in the tree.
1803  * It always tries to merge towards right. If    1762  * It always tries to merge towards right. If you want to merge towards
1804  * left, pass "ex - 1" as argument instead of    1763  * left, pass "ex - 1" as argument instead of "ex".
1805  * Returns 0 if the extents (ex and ex+1) wer    1764  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1806  * 1 if they got merged.                         1765  * 1 if they got merged.
1807  */                                              1766  */
1808 static int ext4_ext_try_to_merge_right(struct    1767 static int ext4_ext_try_to_merge_right(struct inode *inode,
1809                                  struct ext4_    1768                                  struct ext4_ext_path *path,
1810                                  struct ext4_    1769                                  struct ext4_extent *ex)
1811 {                                                1770 {
1812         struct ext4_extent_header *eh;           1771         struct ext4_extent_header *eh;
1813         unsigned int depth, len;                 1772         unsigned int depth, len;
1814         int merge_done = 0, unwritten;           1773         int merge_done = 0, unwritten;
1815                                                  1774 
1816         depth = ext_depth(inode);                1775         depth = ext_depth(inode);
1817         BUG_ON(path[depth].p_hdr == NULL);       1776         BUG_ON(path[depth].p_hdr == NULL);
1818         eh = path[depth].p_hdr;                  1777         eh = path[depth].p_hdr;
1819                                                  1778 
1820         while (ex < EXT_LAST_EXTENT(eh)) {       1779         while (ex < EXT_LAST_EXTENT(eh)) {
1821                 if (!ext4_can_extents_be_merg    1780                 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1822                         break;                   1781                         break;
1823                 /* merge with next extent! */    1782                 /* merge with next extent! */
1824                 unwritten = ext4_ext_is_unwri    1783                 unwritten = ext4_ext_is_unwritten(ex);
1825                 ex->ee_len = cpu_to_le16(ext4    1784                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1826                                 + ext4_ext_ge    1785                                 + ext4_ext_get_actual_len(ex + 1));
1827                 if (unwritten)                   1786                 if (unwritten)
1828                         ext4_ext_mark_unwritt    1787                         ext4_ext_mark_unwritten(ex);
1829                                                  1788 
1830                 if (ex + 1 < EXT_LAST_EXTENT(    1789                 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1831                         len = (EXT_LAST_EXTEN    1790                         len = (EXT_LAST_EXTENT(eh) - ex - 1)
1832                                 * sizeof(stru    1791                                 * sizeof(struct ext4_extent);
1833                         memmove(ex + 1, ex +     1792                         memmove(ex + 1, ex + 2, len);
1834                 }                                1793                 }
1835                 le16_add_cpu(&eh->eh_entries,    1794                 le16_add_cpu(&eh->eh_entries, -1);
1836                 merge_done = 1;                  1795                 merge_done = 1;
1837                 WARN_ON(eh->eh_entries == 0);    1796                 WARN_ON(eh->eh_entries == 0);
1838                 if (!eh->eh_entries)             1797                 if (!eh->eh_entries)
1839                         EXT4_ERROR_INODE(inod    1798                         EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1840         }                                        1799         }
1841                                                  1800 
1842         return merge_done;                       1801         return merge_done;
1843 }                                                1802 }
1844                                                  1803 
1845 /*                                               1804 /*
1846  * This function does a very simple check to     1805  * This function does a very simple check to see if we can collapse
1847  * an extent tree with a single extent tree l    1806  * an extent tree with a single extent tree leaf block into the inode.
1848  */                                              1807  */
1849 static void ext4_ext_try_to_merge_up(handle_t    1808 static void ext4_ext_try_to_merge_up(handle_t *handle,
1850                                      struct i    1809                                      struct inode *inode,
1851                                      struct e    1810                                      struct ext4_ext_path *path)
1852 {                                                1811 {
1853         size_t s;                                1812         size_t s;
1854         unsigned max_root = ext4_ext_space_ro    1813         unsigned max_root = ext4_ext_space_root(inode, 0);
1855         ext4_fsblk_t blk;                        1814         ext4_fsblk_t blk;
1856                                                  1815 
1857         if ((path[0].p_depth != 1) ||            1816         if ((path[0].p_depth != 1) ||
1858             (le16_to_cpu(path[0].p_hdr->eh_en    1817             (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1859             (le16_to_cpu(path[1].p_hdr->eh_en    1818             (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1860                 return;                          1819                 return;
1861                                                  1820 
1862         /*                                       1821         /*
1863          * We need to modify the block alloca    1822          * We need to modify the block allocation bitmap and the block
1864          * group descriptor to release the ex    1823          * group descriptor to release the extent tree block.  If we
1865          * can't get the journal credits, giv    1824          * can't get the journal credits, give up.
1866          */                                      1825          */
1867         if (ext4_journal_extend(handle, 2,    !! 1826         if (ext4_journal_extend(handle, 2))
1868                         ext4_free_metadata_re << 
1869                 return;                          1827                 return;
1870                                                  1828 
1871         /*                                       1829         /*
1872          * Copy the extent data up to the ino    1830          * Copy the extent data up to the inode
1873          */                                      1831          */
1874         blk = ext4_idx_pblock(path[0].p_idx);    1832         blk = ext4_idx_pblock(path[0].p_idx);
1875         s = le16_to_cpu(path[1].p_hdr->eh_ent    1833         s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1876                 sizeof(struct ext4_extent_idx    1834                 sizeof(struct ext4_extent_idx);
1877         s += sizeof(struct ext4_extent_header    1835         s += sizeof(struct ext4_extent_header);
1878                                                  1836 
1879         path[1].p_maxdepth = path[0].p_maxdep    1837         path[1].p_maxdepth = path[0].p_maxdepth;
1880         memcpy(path[0].p_hdr, path[1].p_hdr,     1838         memcpy(path[0].p_hdr, path[1].p_hdr, s);
1881         path[0].p_depth = 0;                     1839         path[0].p_depth = 0;
1882         path[0].p_ext = EXT_FIRST_EXTENT(path    1840         path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1883                 (path[1].p_ext - EXT_FIRST_EX    1841                 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1884         path[0].p_hdr->eh_max = cpu_to_le16(m    1842         path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1885                                                  1843 
1886         ext4_ext_path_brelse(path + 1);       !! 1844         brelse(path[1].p_bh);
1887         ext4_free_blocks(handle, inode, NULL,    1845         ext4_free_blocks(handle, inode, NULL, blk, 1,
1888                          EXT4_FREE_BLOCKS_MET    1846                          EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1889 }                                                1847 }
1890                                                  1848 
1891 /*                                               1849 /*
1892  * This function tries to merge the @ex exten !! 1850  * This function tries to merge the @ex extent to neighbours in the tree.
1893  * tries to collapse the extent tree into the !! 1851  * return 1 if merge left else 0.
1894  */                                              1852  */
1895 static void ext4_ext_try_to_merge(handle_t *h    1853 static void ext4_ext_try_to_merge(handle_t *handle,
1896                                   struct inod    1854                                   struct inode *inode,
1897                                   struct ext4    1855                                   struct ext4_ext_path *path,
1898                                   struct ext4 !! 1856                                   struct ext4_extent *ex) {
1899 {                                             << 
1900         struct ext4_extent_header *eh;           1857         struct ext4_extent_header *eh;
1901         unsigned int depth;                      1858         unsigned int depth;
1902         int merge_done = 0;                      1859         int merge_done = 0;
1903                                                  1860 
1904         depth = ext_depth(inode);                1861         depth = ext_depth(inode);
1905         BUG_ON(path[depth].p_hdr == NULL);       1862         BUG_ON(path[depth].p_hdr == NULL);
1906         eh = path[depth].p_hdr;                  1863         eh = path[depth].p_hdr;
1907                                                  1864 
1908         if (ex > EXT_FIRST_EXTENT(eh))           1865         if (ex > EXT_FIRST_EXTENT(eh))
1909                 merge_done = ext4_ext_try_to_    1866                 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1910                                                  1867 
1911         if (!merge_done)                         1868         if (!merge_done)
1912                 (void) ext4_ext_try_to_merge_    1869                 (void) ext4_ext_try_to_merge_right(inode, path, ex);
1913                                                  1870 
1914         ext4_ext_try_to_merge_up(handle, inod    1871         ext4_ext_try_to_merge_up(handle, inode, path);
1915 }                                                1872 }
1916                                                  1873 
1917 /*                                               1874 /*
1918  * check if a portion of the "newext" extent     1875  * check if a portion of the "newext" extent overlaps with an
1919  * existing extent.                              1876  * existing extent.
1920  *                                               1877  *
1921  * If there is an overlap discovered, it upda    1878  * If there is an overlap discovered, it updates the length of the newext
1922  * such that there will be no overlap, and th    1879  * such that there will be no overlap, and then returns 1.
1923  * If there is no overlap found, it returns 0    1880  * If there is no overlap found, it returns 0.
1924  */                                              1881  */
1925 static unsigned int ext4_ext_check_overlap(st    1882 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1926                                            st    1883                                            struct inode *inode,
1927                                            st    1884                                            struct ext4_extent *newext,
1928                                            st    1885                                            struct ext4_ext_path *path)
1929 {                                                1886 {
1930         ext4_lblk_t b1, b2;                      1887         ext4_lblk_t b1, b2;
1931         unsigned int depth, len1;                1888         unsigned int depth, len1;
1932         unsigned int ret = 0;                    1889         unsigned int ret = 0;
1933                                                  1890 
1934         b1 = le32_to_cpu(newext->ee_block);      1891         b1 = le32_to_cpu(newext->ee_block);
1935         len1 = ext4_ext_get_actual_len(newext    1892         len1 = ext4_ext_get_actual_len(newext);
1936         depth = ext_depth(inode);                1893         depth = ext_depth(inode);
1937         if (!path[depth].p_ext)                  1894         if (!path[depth].p_ext)
1938                 goto out;                        1895                 goto out;
1939         b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu    1896         b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1940                                                  1897 
1941         /*                                       1898         /*
1942          * get the next allocated block if th    1899          * get the next allocated block if the extent in the path
1943          * is before the requested block(s)      1900          * is before the requested block(s)
1944          */                                      1901          */
1945         if (b2 < b1) {                           1902         if (b2 < b1) {
1946                 b2 = ext4_ext_next_allocated_    1903                 b2 = ext4_ext_next_allocated_block(path);
1947                 if (b2 == EXT_MAX_BLOCKS)        1904                 if (b2 == EXT_MAX_BLOCKS)
1948                         goto out;                1905                         goto out;
1949                 b2 = EXT4_LBLK_CMASK(sbi, b2)    1906                 b2 = EXT4_LBLK_CMASK(sbi, b2);
1950         }                                        1907         }
1951                                                  1908 
1952         /* check for wrap through zero on ext    1909         /* check for wrap through zero on extent logical start block*/
1953         if (b1 + len1 < b1) {                    1910         if (b1 + len1 < b1) {
1954                 len1 = EXT_MAX_BLOCKS - b1;      1911                 len1 = EXT_MAX_BLOCKS - b1;
1955                 newext->ee_len = cpu_to_le16(    1912                 newext->ee_len = cpu_to_le16(len1);
1956                 ret = 1;                         1913                 ret = 1;
1957         }                                        1914         }
1958                                                  1915 
1959         /* check for overlap */                  1916         /* check for overlap */
1960         if (b1 + len1 > b2) {                    1917         if (b1 + len1 > b2) {
1961                 newext->ee_len = cpu_to_le16(    1918                 newext->ee_len = cpu_to_le16(b2 - b1);
1962                 ret = 1;                         1919                 ret = 1;
1963         }                                        1920         }
1964 out:                                             1921 out:
1965         return ret;                              1922         return ret;
1966 }                                                1923 }
1967                                                  1924 
1968 /*                                               1925 /*
1969  * ext4_ext_insert_extent:                       1926  * ext4_ext_insert_extent:
1970  * tries to merge requested extent into the e !! 1927  * tries to merge requsted extent into the existing extent or
1971  * inserts requested extent as new one into t    1928  * inserts requested extent as new one into the tree,
1972  * creating new leaf in the no-space case.       1929  * creating new leaf in the no-space case.
1973  */                                              1930  */
1974 struct ext4_ext_path *                        !! 1931 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1975 ext4_ext_insert_extent(handle_t *handle, stru !! 1932                                 struct ext4_ext_path **ppath,
1976                        struct ext4_ext_path * !! 1933                                 struct ext4_extent *newext, int gb_flags)
1977                        struct ext4_extent *ne << 
1978 {                                                1934 {
                                                   >> 1935         struct ext4_ext_path *path = *ppath;
1979         struct ext4_extent_header *eh;           1936         struct ext4_extent_header *eh;
1980         struct ext4_extent *ex, *fex;            1937         struct ext4_extent *ex, *fex;
1981         struct ext4_extent *nearex; /* neares    1938         struct ext4_extent *nearex; /* nearest extent */
1982         int depth, len, err = 0;              !! 1939         struct ext4_ext_path *npath = NULL;
                                                   >> 1940         int depth, len, err;
1983         ext4_lblk_t next;                        1941         ext4_lblk_t next;
1984         int mb_flags = 0, unwritten;             1942         int mb_flags = 0, unwritten;
1985                                                  1943 
1986         if (gb_flags & EXT4_GET_BLOCKS_DELALL    1944         if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1987                 mb_flags |= EXT4_MB_DELALLOC_    1945                 mb_flags |= EXT4_MB_DELALLOC_RESERVED;
1988         if (unlikely(ext4_ext_get_actual_len(    1946         if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1989                 EXT4_ERROR_INODE(inode, "ext4    1947                 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1990                 err = -EFSCORRUPTED;          !! 1948                 return -EFSCORRUPTED;
1991                 goto errout;                  << 
1992         }                                        1949         }
1993         depth = ext_depth(inode);                1950         depth = ext_depth(inode);
1994         ex = path[depth].p_ext;                  1951         ex = path[depth].p_ext;
1995         eh = path[depth].p_hdr;                  1952         eh = path[depth].p_hdr;
1996         if (unlikely(path[depth].p_hdr == NUL    1953         if (unlikely(path[depth].p_hdr == NULL)) {
1997                 EXT4_ERROR_INODE(inode, "path    1954                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1998                 err = -EFSCORRUPTED;          !! 1955                 return -EFSCORRUPTED;
1999                 goto errout;                  << 
2000         }                                        1956         }
2001                                                  1957 
2002         /* try to insert block into found ext    1958         /* try to insert block into found extent and return */
2003         if (ex && !(gb_flags & EXT4_GET_BLOCK    1959         if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
2004                                                  1960 
2005                 /*                               1961                 /*
2006                  * Try to see whether we shou    1962                  * Try to see whether we should rather test the extent on
2007                  * right from ex, or from the    1963                  * right from ex, or from the left of ex. This is because
2008                  * ext4_find_extent() can ret    1964                  * ext4_find_extent() can return either extent on the
2009                  * left, or on the right from    1965                  * left, or on the right from the searched position. This
2010                  * will make merging more eff    1966                  * will make merging more effective.
2011                  */                              1967                  */
2012                 if (ex < EXT_LAST_EXTENT(eh)     1968                 if (ex < EXT_LAST_EXTENT(eh) &&
2013                     (le32_to_cpu(ex->ee_block    1969                     (le32_to_cpu(ex->ee_block) +
2014                     ext4_ext_get_actual_len(e    1970                     ext4_ext_get_actual_len(ex) <
2015                     le32_to_cpu(newext->ee_bl    1971                     le32_to_cpu(newext->ee_block))) {
2016                         ex += 1;                 1972                         ex += 1;
2017                         goto prepend;            1973                         goto prepend;
2018                 } else if ((ex > EXT_FIRST_EX    1974                 } else if ((ex > EXT_FIRST_EXTENT(eh)) &&
2019                            (le32_to_cpu(newex    1975                            (le32_to_cpu(newext->ee_block) +
2020                            ext4_ext_get_actua    1976                            ext4_ext_get_actual_len(newext) <
2021                            le32_to_cpu(ex->ee    1977                            le32_to_cpu(ex->ee_block)))
2022                         ex -= 1;                 1978                         ex -= 1;
2023                                                  1979 
2024                 /* Try to append newex to the    1980                 /* Try to append newex to the ex */
2025                 if (ext4_can_extents_be_merge    1981                 if (ext4_can_extents_be_merged(inode, ex, newext)) {
2026                         ext_debug(inode, "app !! 1982                         ext_debug("append [%d]%d block to %u:[%d]%d"
2027                                   "(from %llu    1983                                   "(from %llu)\n",
2028                                   ext4_ext_is    1984                                   ext4_ext_is_unwritten(newext),
2029                                   ext4_ext_ge    1985                                   ext4_ext_get_actual_len(newext),
2030                                   le32_to_cpu    1986                                   le32_to_cpu(ex->ee_block),
2031                                   ext4_ext_is    1987                                   ext4_ext_is_unwritten(ex),
2032                                   ext4_ext_ge    1988                                   ext4_ext_get_actual_len(ex),
2033                                   ext4_ext_pb    1989                                   ext4_ext_pblock(ex));
2034                         err = ext4_ext_get_ac    1990                         err = ext4_ext_get_access(handle, inode,
2035                                                  1991                                                   path + depth);
2036                         if (err)                 1992                         if (err)
2037                                 goto errout;  !! 1993                                 return err;
2038                         unwritten = ext4_ext_    1994                         unwritten = ext4_ext_is_unwritten(ex);
2039                         ex->ee_len = cpu_to_l    1995                         ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2040                                         + ext    1996                                         + ext4_ext_get_actual_len(newext));
2041                         if (unwritten)           1997                         if (unwritten)
2042                                 ext4_ext_mark    1998                                 ext4_ext_mark_unwritten(ex);
                                                   >> 1999                         eh = path[depth].p_hdr;
2043                         nearex = ex;             2000                         nearex = ex;
2044                         goto merge;              2001                         goto merge;
2045                 }                                2002                 }
2046                                                  2003 
2047 prepend:                                         2004 prepend:
2048                 /* Try to prepend newex to th    2005                 /* Try to prepend newex to the ex */
2049                 if (ext4_can_extents_be_merge    2006                 if (ext4_can_extents_be_merged(inode, newext, ex)) {
2050                         ext_debug(inode, "pre !! 2007                         ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
2051                                   "(from %llu    2008                                   "(from %llu)\n",
2052                                   le32_to_cpu    2009                                   le32_to_cpu(newext->ee_block),
2053                                   ext4_ext_is    2010                                   ext4_ext_is_unwritten(newext),
2054                                   ext4_ext_ge    2011                                   ext4_ext_get_actual_len(newext),
2055                                   le32_to_cpu    2012                                   le32_to_cpu(ex->ee_block),
2056                                   ext4_ext_is    2013                                   ext4_ext_is_unwritten(ex),
2057                                   ext4_ext_ge    2014                                   ext4_ext_get_actual_len(ex),
2058                                   ext4_ext_pb    2015                                   ext4_ext_pblock(ex));
2059                         err = ext4_ext_get_ac    2016                         err = ext4_ext_get_access(handle, inode,
2060                                                  2017                                                   path + depth);
2061                         if (err)                 2018                         if (err)
2062                                 goto errout;  !! 2019                                 return err;
2063                                                  2020 
2064                         unwritten = ext4_ext_    2021                         unwritten = ext4_ext_is_unwritten(ex);
2065                         ex->ee_block = newext    2022                         ex->ee_block = newext->ee_block;
2066                         ext4_ext_store_pblock    2023                         ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
2067                         ex->ee_len = cpu_to_l    2024                         ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2068                                         + ext    2025                                         + ext4_ext_get_actual_len(newext));
2069                         if (unwritten)           2026                         if (unwritten)
2070                                 ext4_ext_mark    2027                                 ext4_ext_mark_unwritten(ex);
                                                   >> 2028                         eh = path[depth].p_hdr;
2071                         nearex = ex;             2029                         nearex = ex;
2072                         goto merge;              2030                         goto merge;
2073                 }                                2031                 }
2074         }                                        2032         }
2075                                                  2033 
2076         depth = ext_depth(inode);                2034         depth = ext_depth(inode);
2077         eh = path[depth].p_hdr;                  2035         eh = path[depth].p_hdr;
2078         if (le16_to_cpu(eh->eh_entries) < le1    2036         if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
2079                 goto has_space;                  2037                 goto has_space;
2080                                                  2038 
2081         /* probably next leaf has space for u    2039         /* probably next leaf has space for us? */
2082         fex = EXT_LAST_EXTENT(eh);               2040         fex = EXT_LAST_EXTENT(eh);
2083         next = EXT_MAX_BLOCKS;                   2041         next = EXT_MAX_BLOCKS;
2084         if (le32_to_cpu(newext->ee_block) > l    2042         if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
2085                 next = ext4_ext_next_leaf_blo    2043                 next = ext4_ext_next_leaf_block(path);
2086         if (next != EXT_MAX_BLOCKS) {            2044         if (next != EXT_MAX_BLOCKS) {
2087                 struct ext4_ext_path *npath;  !! 2045                 ext_debug("next leaf block - %u\n", next);
2088                                               !! 2046                 BUG_ON(npath != NULL);
2089                 ext_debug(inode, "next leaf b !! 2047                 npath = ext4_find_extent(inode, next, NULL, 0);
2090                 npath = ext4_find_extent(inod !! 2048                 if (IS_ERR(npath))
2091                 if (IS_ERR(npath)) {          !! 2049                         return PTR_ERR(npath);
2092                         err = PTR_ERR(npath); << 
2093                         goto errout;          << 
2094                 }                             << 
2095                 BUG_ON(npath->p_depth != path    2050                 BUG_ON(npath->p_depth != path->p_depth);
2096                 eh = npath[depth].p_hdr;         2051                 eh = npath[depth].p_hdr;
2097                 if (le16_to_cpu(eh->eh_entrie    2052                 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
2098                         ext_debug(inode, "nex !! 2053                         ext_debug("next leaf isn't full(%d)\n",
2099                                   le16_to_cpu    2054                                   le16_to_cpu(eh->eh_entries));
2100                         ext4_free_ext_path(pa << 
2101                         path = npath;            2055                         path = npath;
2102                         goto has_space;          2056                         goto has_space;
2103                 }                                2057                 }
2104                 ext_debug(inode, "next leaf h !! 2058                 ext_debug("next leaf has no free space(%d,%d)\n",
2105                           le16_to_cpu(eh->eh_    2059                           le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
2106                 ext4_free_ext_path(npath);    << 
2107         }                                        2060         }
2108                                                  2061 
2109         /*                                       2062         /*
2110          * There is no free space in the foun    2063          * There is no free space in the found leaf.
2111          * We're gonna add a new leaf in the     2064          * We're gonna add a new leaf in the tree.
2112          */                                      2065          */
2113         if (gb_flags & EXT4_GET_BLOCKS_METADA    2066         if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2114                 mb_flags |= EXT4_MB_USE_RESER    2067                 mb_flags |= EXT4_MB_USE_RESERVED;
2115         path = ext4_ext_create_new_leaf(handl !! 2068         err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2116                                         path, !! 2069                                        ppath, newext);
2117         if (IS_ERR(path))                     !! 2070         if (err)
2118                 return path;                  !! 2071                 goto cleanup;
2119         depth = ext_depth(inode);                2072         depth = ext_depth(inode);
2120         eh = path[depth].p_hdr;                  2073         eh = path[depth].p_hdr;
2121                                                  2074 
2122 has_space:                                       2075 has_space:
2123         nearex = path[depth].p_ext;              2076         nearex = path[depth].p_ext;
2124                                                  2077 
2125         err = ext4_ext_get_access(handle, ino    2078         err = ext4_ext_get_access(handle, inode, path + depth);
2126         if (err)                                 2079         if (err)
2127                 goto errout;                  !! 2080                 goto cleanup;
2128                                                  2081 
2129         if (!nearex) {                           2082         if (!nearex) {
2130                 /* there is no extent in this    2083                 /* there is no extent in this leaf, create first one */
2131                 ext_debug(inode, "first exten !! 2084                 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
2132                                 le32_to_cpu(n    2085                                 le32_to_cpu(newext->ee_block),
2133                                 ext4_ext_pblo    2086                                 ext4_ext_pblock(newext),
2134                                 ext4_ext_is_u    2087                                 ext4_ext_is_unwritten(newext),
2135                                 ext4_ext_get_    2088                                 ext4_ext_get_actual_len(newext));
2136                 nearex = EXT_FIRST_EXTENT(eh)    2089                 nearex = EXT_FIRST_EXTENT(eh);
2137         } else {                                 2090         } else {
2138                 if (le32_to_cpu(newext->ee_bl    2091                 if (le32_to_cpu(newext->ee_block)
2139                            > le32_to_cpu(near    2092                            > le32_to_cpu(nearex->ee_block)) {
2140                         /* Insert after */       2093                         /* Insert after */
2141                         ext_debug(inode, "ins !! 2094                         ext_debug("insert %u:%llu:[%d]%d before: "
2142                                         "near    2095                                         "nearest %p\n",
2143                                         le32_    2096                                         le32_to_cpu(newext->ee_block),
2144                                         ext4_    2097                                         ext4_ext_pblock(newext),
2145                                         ext4_    2098                                         ext4_ext_is_unwritten(newext),
2146                                         ext4_    2099                                         ext4_ext_get_actual_len(newext),
2147                                         neare    2100                                         nearex);
2148                         nearex++;                2101                         nearex++;
2149                 } else {                         2102                 } else {
2150                         /* Insert before */      2103                         /* Insert before */
2151                         BUG_ON(newext->ee_blo    2104                         BUG_ON(newext->ee_block == nearex->ee_block);
2152                         ext_debug(inode, "ins !! 2105                         ext_debug("insert %u:%llu:[%d]%d after: "
2153                                         "near    2106                                         "nearest %p\n",
2154                                         le32_    2107                                         le32_to_cpu(newext->ee_block),
2155                                         ext4_    2108                                         ext4_ext_pblock(newext),
2156                                         ext4_    2109                                         ext4_ext_is_unwritten(newext),
2157                                         ext4_    2110                                         ext4_ext_get_actual_len(newext),
2158                                         neare    2111                                         nearex);
2159                 }                                2112                 }
2160                 len = EXT_LAST_EXTENT(eh) - n    2113                 len = EXT_LAST_EXTENT(eh) - nearex + 1;
2161                 if (len > 0) {                   2114                 if (len > 0) {
2162                         ext_debug(inode, "ins !! 2115                         ext_debug("insert %u:%llu:[%d]%d: "
2163                                         "move    2116                                         "move %d extents from 0x%p to 0x%p\n",
2164                                         le32_    2117                                         le32_to_cpu(newext->ee_block),
2165                                         ext4_    2118                                         ext4_ext_pblock(newext),
2166                                         ext4_    2119                                         ext4_ext_is_unwritten(newext),
2167                                         ext4_    2120                                         ext4_ext_get_actual_len(newext),
2168                                         len,     2121                                         len, nearex, nearex + 1);
2169                         memmove(nearex + 1, n    2122                         memmove(nearex + 1, nearex,
2170                                 len * sizeof(    2123                                 len * sizeof(struct ext4_extent));
2171                 }                                2124                 }
2172         }                                        2125         }
2173                                                  2126 
2174         le16_add_cpu(&eh->eh_entries, 1);        2127         le16_add_cpu(&eh->eh_entries, 1);
2175         path[depth].p_ext = nearex;              2128         path[depth].p_ext = nearex;
2176         nearex->ee_block = newext->ee_block;     2129         nearex->ee_block = newext->ee_block;
2177         ext4_ext_store_pblock(nearex, ext4_ex    2130         ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2178         nearex->ee_len = newext->ee_len;         2131         nearex->ee_len = newext->ee_len;
2179                                                  2132 
2180 merge:                                           2133 merge:
2181         /* try to merge extents */               2134         /* try to merge extents */
2182         if (!(gb_flags & EXT4_GET_BLOCKS_PRE_    2135         if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
2183                 ext4_ext_try_to_merge(handle,    2136                 ext4_ext_try_to_merge(handle, inode, path, nearex);
2184                                                  2137 
                                                   >> 2138 
2185         /* time to correct all indexes above     2139         /* time to correct all indexes above */
2186         err = ext4_ext_correct_indexes(handle    2140         err = ext4_ext_correct_indexes(handle, inode, path);
2187         if (err)                                 2141         if (err)
2188                 goto errout;                  !! 2142                 goto cleanup;
2189                                                  2143 
2190         err = ext4_ext_dirty(handle, inode, p    2144         err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2191         if (err)                              << 
2192                 goto errout;                  << 
2193                                               << 
2194         return path;                          << 
2195                                                  2145 
2196 errout:                                       !! 2146 cleanup:
2197         ext4_free_ext_path(path);             !! 2147         ext4_ext_drop_refs(npath);
2198         return ERR_PTR(err);                  !! 2148         kfree(npath);
                                                   >> 2149         return err;
2199 }                                                2150 }
2200                                                  2151 
2201 static int ext4_fill_es_cache_info(struct ino !! 2152 static int ext4_fill_fiemap_extents(struct inode *inode,
2202                                    ext4_lblk_ !! 2153                                     ext4_lblk_t block, ext4_lblk_t num,
2203                                    struct fie !! 2154                                     struct fiemap_extent_info *fieinfo)
2204 {                                                2155 {
2205         ext4_lblk_t next, end = block + num - !! 2156         struct ext4_ext_path *path = NULL;
                                                   >> 2157         struct ext4_extent *ex;
2206         struct extent_status es;                 2158         struct extent_status es;
                                                   >> 2159         ext4_lblk_t next, next_del, start = 0, end = 0;
                                                   >> 2160         ext4_lblk_t last = block + num;
                                                   >> 2161         int exists, depth = 0, err = 0;
                                                   >> 2162         unsigned int flags = 0;
2207         unsigned char blksize_bits = inode->i    2163         unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2208         unsigned int flags;                   << 
2209         int err;                              << 
2210                                                  2164 
2211         while (block <= end) {                !! 2165         while (block < last && block != EXT_MAX_BLOCKS) {
2212                 next = 0;                     !! 2166                 num = last - block;
2213                 flags = 0;                    !! 2167                 /* find extent for this block */
2214                 if (!ext4_es_lookup_extent(in !! 2168                 down_read(&EXT4_I(inode)->i_data_sem);
                                                   >> 2169 
                                                   >> 2170                 path = ext4_find_extent(inode, block, &path, 0);
                                                   >> 2171                 if (IS_ERR(path)) {
                                                   >> 2172                         up_read(&EXT4_I(inode)->i_data_sem);
                                                   >> 2173                         err = PTR_ERR(path);
                                                   >> 2174                         path = NULL;
2215                         break;                   2175                         break;
2216                 if (ext4_es_is_unwritten(&es) !! 2176                 }
2217                         flags |= FIEMAP_EXTEN !! 2177 
2218                 if (ext4_es_is_delayed(&es))  !! 2178                 depth = ext_depth(inode);
                                                   >> 2179                 if (unlikely(path[depth].p_hdr == NULL)) {
                                                   >> 2180                         up_read(&EXT4_I(inode)->i_data_sem);
                                                   >> 2181                         EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
                                                   >> 2182                         err = -EFSCORRUPTED;
                                                   >> 2183                         break;
                                                   >> 2184                 }
                                                   >> 2185                 ex = path[depth].p_ext;
                                                   >> 2186                 next = ext4_ext_next_allocated_block(path);
                                                   >> 2187 
                                                   >> 2188                 flags = 0;
                                                   >> 2189                 exists = 0;
                                                   >> 2190                 if (!ex) {
                                                   >> 2191                         /* there is no extent yet, so try to allocate
                                                   >> 2192                          * all requested space */
                                                   >> 2193                         start = block;
                                                   >> 2194                         end = block + num;
                                                   >> 2195                 } else if (le32_to_cpu(ex->ee_block) > block) {
                                                   >> 2196                         /* need to allocate space before found extent */
                                                   >> 2197                         start = block;
                                                   >> 2198                         end = le32_to_cpu(ex->ee_block);
                                                   >> 2199                         if (block + num < end)
                                                   >> 2200                                 end = block + num;
                                                   >> 2201                 } else if (block >= le32_to_cpu(ex->ee_block)
                                                   >> 2202                                         + ext4_ext_get_actual_len(ex)) {
                                                   >> 2203                         /* need to allocate space after found extent */
                                                   >> 2204                         start = block;
                                                   >> 2205                         end = block + num;
                                                   >> 2206                         if (end >= next)
                                                   >> 2207                                 end = next;
                                                   >> 2208                 } else if (block >= le32_to_cpu(ex->ee_block)) {
                                                   >> 2209                         /*
                                                   >> 2210                          * some part of requested space is covered
                                                   >> 2211                          * by found extent
                                                   >> 2212                          */
                                                   >> 2213                         start = block;
                                                   >> 2214                         end = le32_to_cpu(ex->ee_block)
                                                   >> 2215                                 + ext4_ext_get_actual_len(ex);
                                                   >> 2216                         if (block + num < end)
                                                   >> 2217                                 end = block + num;
                                                   >> 2218                         exists = 1;
                                                   >> 2219                 } else {
                                                   >> 2220                         BUG();
                                                   >> 2221                 }
                                                   >> 2222                 BUG_ON(end <= start);
                                                   >> 2223 
                                                   >> 2224                 if (!exists) {
                                                   >> 2225                         es.es_lblk = start;
                                                   >> 2226                         es.es_len = end - start;
                                                   >> 2227                         es.es_pblk = 0;
                                                   >> 2228                 } else {
                                                   >> 2229                         es.es_lblk = le32_to_cpu(ex->ee_block);
                                                   >> 2230                         es.es_len = ext4_ext_get_actual_len(ex);
                                                   >> 2231                         es.es_pblk = ext4_ext_pblock(ex);
                                                   >> 2232                         if (ext4_ext_is_unwritten(ex))
                                                   >> 2233                                 flags |= FIEMAP_EXTENT_UNWRITTEN;
                                                   >> 2234                 }
                                                   >> 2235 
                                                   >> 2236                 /*
                                                   >> 2237                  * Find delayed extent and update es accordingly. We call
                                                   >> 2238                  * it even in !exists case to find out whether es is the
                                                   >> 2239                  * last existing extent or not.
                                                   >> 2240                  */
                                                   >> 2241                 next_del = ext4_find_delayed_extent(inode, &es);
                                                   >> 2242                 if (!exists && next_del) {
                                                   >> 2243                         exists = 1;
2219                         flags |= (FIEMAP_EXTE    2244                         flags |= (FIEMAP_EXTENT_DELALLOC |
2220                                   FIEMAP_EXTE    2245                                   FIEMAP_EXTENT_UNKNOWN);
2221                 if (ext4_es_is_hole(&es))     !! 2246                 }
2222                         flags |= EXT4_FIEMAP_ !! 2247                 up_read(&EXT4_I(inode)->i_data_sem);
2223                 if (next == 0)                !! 2248 
                                                   >> 2249                 if (unlikely(es.es_len == 0)) {
                                                   >> 2250                         EXT4_ERROR_INODE(inode, "es.es_len == 0");
                                                   >> 2251                         err = -EFSCORRUPTED;
                                                   >> 2252                         break;
                                                   >> 2253                 }
                                                   >> 2254 
                                                   >> 2255                 /*
                                                   >> 2256                  * This is possible iff next == next_del == EXT_MAX_BLOCKS.
                                                   >> 2257                  * we need to check next == EXT_MAX_BLOCKS because it is
                                                   >> 2258                  * possible that an extent is with unwritten and delayed
                                                   >> 2259                  * status due to when an extent is delayed allocated and
                                                   >> 2260                  * is allocated by fallocate status tree will track both of
                                                   >> 2261                  * them in a extent.
                                                   >> 2262                  *
                                                   >> 2263                  * So we could return a unwritten and delayed extent, and
                                                   >> 2264                  * its block is equal to 'next'.
                                                   >> 2265                  */
                                                   >> 2266                 if (next == next_del && next == EXT_MAX_BLOCKS) {
2224                         flags |= FIEMAP_EXTEN    2267                         flags |= FIEMAP_EXTENT_LAST;
2225                 if (flags & (FIEMAP_EXTENT_DE !! 2268                         if (unlikely(next_del != EXT_MAX_BLOCKS ||
2226                              EXT4_FIEMAP_EXTE !! 2269                                      next != EXT_MAX_BLOCKS)) {
2227                         es.es_pblk = 0;       !! 2270                                 EXT4_ERROR_INODE(inode,
2228                 else                          !! 2271                                                  "next extent == %u, next "
2229                         es.es_pblk = ext4_es_ !! 2272                                                  "delalloc extent = %u",
2230                 err = fiemap_fill_next_extent !! 2273                                                  next, next_del);
                                                   >> 2274                                 err = -EFSCORRUPTED;
                                                   >> 2275                                 break;
                                                   >> 2276                         }
                                                   >> 2277                 }
                                                   >> 2278 
                                                   >> 2279                 if (exists) {
                                                   >> 2280                         err = fiemap_fill_next_extent(fieinfo,
2231                                 (__u64)es.es_    2281                                 (__u64)es.es_lblk << blksize_bits,
2232                                 (__u64)es.es_    2282                                 (__u64)es.es_pblk << blksize_bits,
2233                                 (__u64)es.es_    2283                                 (__u64)es.es_len << blksize_bits,
2234                                 flags);          2284                                 flags);
2235                 if (next == 0)                !! 2285                         if (err < 0)
2236                         break;                !! 2286                                 break;
2237                 block = next;                 !! 2287                         if (err == 1) {
2238                 if (err < 0)                  !! 2288                                 err = 0;
2239                         return err;           !! 2289                                 break;
2240                 if (err == 1)                 !! 2290                         }
2241                         return 0;             !! 2291                 }
                                                   >> 2292 
                                                   >> 2293                 block = es.es_lblk + es.es_len;
2242         }                                        2294         }
2243         return 0;                             << 
2244 }                                             << 
2245                                                  2295 
                                                   >> 2296         ext4_ext_drop_refs(path);
                                                   >> 2297         kfree(path);
                                                   >> 2298         return err;
                                                   >> 2299 }
2246                                                  2300 
2247 /*                                               2301 /*
2248  * ext4_ext_find_hole - find hole around give !! 2302  * ext4_ext_determine_hole - determine hole around given block
2249  * @inode:      inode we lookup in               2303  * @inode:      inode we lookup in
2250  * @path:       path in extent tree to @lblk     2304  * @path:       path in extent tree to @lblk
2251  * @lblk:       pointer to logical block arou    2305  * @lblk:       pointer to logical block around which we want to determine hole
2252  *                                               2306  *
2253  * Determine hole length (and start if easily    2307  * Determine hole length (and start if easily possible) around given logical
2254  * block. We don't try too hard to find the b    2308  * block. We don't try too hard to find the beginning of the hole but @path
2255  * actually points to extent before @lblk, we    2309  * actually points to extent before @lblk, we provide it.
2256  *                                               2310  *
2257  * The function returns the length of a hole     2311  * The function returns the length of a hole starting at @lblk. We update @lblk
2258  * to the beginning of the hole if we managed    2312  * to the beginning of the hole if we managed to find it.
2259  */                                              2313  */
2260 static ext4_lblk_t ext4_ext_find_hole(struct  !! 2314 static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
2261                                       struct  !! 2315                                            struct ext4_ext_path *path,
2262                                       ext4_lb !! 2316                                            ext4_lblk_t *lblk)
2263 {                                                2317 {
2264         int depth = ext_depth(inode);            2318         int depth = ext_depth(inode);
2265         struct ext4_extent *ex;                  2319         struct ext4_extent *ex;
2266         ext4_lblk_t len;                         2320         ext4_lblk_t len;
2267                                                  2321 
2268         ex = path[depth].p_ext;                  2322         ex = path[depth].p_ext;
2269         if (ex == NULL) {                        2323         if (ex == NULL) {
2270                 /* there is no extent yet, so    2324                 /* there is no extent yet, so gap is [0;-] */
2271                 *lblk = 0;                       2325                 *lblk = 0;
2272                 len = EXT_MAX_BLOCKS;            2326                 len = EXT_MAX_BLOCKS;
2273         } else if (*lblk < le32_to_cpu(ex->ee    2327         } else if (*lblk < le32_to_cpu(ex->ee_block)) {
2274                 len = le32_to_cpu(ex->ee_bloc    2328                 len = le32_to_cpu(ex->ee_block) - *lblk;
2275         } else if (*lblk >= le32_to_cpu(ex->e    2329         } else if (*lblk >= le32_to_cpu(ex->ee_block)
2276                         + ext4_ext_get_actual    2330                         + ext4_ext_get_actual_len(ex)) {
2277                 ext4_lblk_t next;                2331                 ext4_lblk_t next;
2278                                                  2332 
2279                 *lblk = le32_to_cpu(ex->ee_bl    2333                 *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
2280                 next = ext4_ext_next_allocate    2334                 next = ext4_ext_next_allocated_block(path);
2281                 BUG_ON(next == *lblk);           2335                 BUG_ON(next == *lblk);
2282                 len = next - *lblk;              2336                 len = next - *lblk;
2283         } else {                                 2337         } else {
2284                 BUG();                           2338                 BUG();
2285         }                                        2339         }
2286         return len;                              2340         return len;
2287 }                                                2341 }
2288                                                  2342 
2289 /*                                               2343 /*
                                                   >> 2344  * ext4_ext_put_gap_in_cache:
                                                   >> 2345  * calculate boundaries of the gap that the requested block fits into
                                                   >> 2346  * and cache this gap
                                                   >> 2347  */
                                                   >> 2348 static void
                                                   >> 2349 ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
                                                   >> 2350                           ext4_lblk_t hole_len)
                                                   >> 2351 {
                                                   >> 2352         struct extent_status es;
                                                   >> 2353 
                                                   >> 2354         ext4_es_find_delayed_extent_range(inode, hole_start,
                                                   >> 2355                                           hole_start + hole_len - 1, &es);
                                                   >> 2356         if (es.es_len) {
                                                   >> 2357                 /* There's delayed extent containing lblock? */
                                                   >> 2358                 if (es.es_lblk <= hole_start)
                                                   >> 2359                         return;
                                                   >> 2360                 hole_len = min(es.es_lblk - hole_start, hole_len);
                                                   >> 2361         }
                                                   >> 2362         ext_debug(" -> %u:%u\n", hole_start, hole_len);
                                                   >> 2363         ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
                                                   >> 2364                               EXTENT_STATUS_HOLE);
                                                   >> 2365 }
                                                   >> 2366 
                                                   >> 2367 /*
2290  * ext4_ext_rm_idx:                              2368  * ext4_ext_rm_idx:
2291  * removes index from the index block.           2369  * removes index from the index block.
2292  */                                              2370  */
2293 static int ext4_ext_rm_idx(handle_t *handle,     2371 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2294                         struct ext4_ext_path     2372                         struct ext4_ext_path *path, int depth)
2295 {                                                2373 {
2296         int err;                                 2374         int err;
2297         ext4_fsblk_t leaf;                       2375         ext4_fsblk_t leaf;
2298         int k = depth - 1;                    << 
2299                                                  2376 
2300         /* free index block */                   2377         /* free index block */
2301         leaf = ext4_idx_pblock(path[k].p_idx) !! 2378         depth--;
2302         if (unlikely(path[k].p_hdr->eh_entrie !! 2379         path = path + depth;
2303                 EXT4_ERROR_INODE(inode, "path !! 2380         leaf = ext4_idx_pblock(path->p_idx);
                                                   >> 2381         if (unlikely(path->p_hdr->eh_entries == 0)) {
                                                   >> 2382                 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2304                 return -EFSCORRUPTED;            2383                 return -EFSCORRUPTED;
2305         }                                        2384         }
2306         err = ext4_ext_get_access(handle, ino !! 2385         err = ext4_ext_get_access(handle, inode, path);
2307         if (err)                                 2386         if (err)
2308                 return err;                      2387                 return err;
2309                                                  2388 
2310         if (path[k].p_idx != EXT_LAST_INDEX(p !! 2389         if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2311                 int len = EXT_LAST_INDEX(path !! 2390                 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2312                 len *= sizeof(struct ext4_ext    2391                 len *= sizeof(struct ext4_extent_idx);
2313                 memmove(path[k].p_idx, path[k !! 2392                 memmove(path->p_idx, path->p_idx + 1, len);
2314         }                                        2393         }
2315                                                  2394 
2316         le16_add_cpu(&path[k].p_hdr->eh_entri !! 2395         le16_add_cpu(&path->p_hdr->eh_entries, -1);
2317         err = ext4_ext_dirty(handle, inode, p !! 2396         err = ext4_ext_dirty(handle, inode, path);
2318         if (err)                                 2397         if (err)
2319                 return err;                      2398                 return err;
2320         ext_debug(inode, "index is empty, rem !! 2399         ext_debug("index is empty, remove it, free block %llu\n", leaf);
2321         trace_ext4_ext_rm_idx(inode, leaf);      2400         trace_ext4_ext_rm_idx(inode, leaf);
2322                                                  2401 
2323         ext4_free_blocks(handle, inode, NULL,    2402         ext4_free_blocks(handle, inode, NULL, leaf, 1,
2324                          EXT4_FREE_BLOCKS_MET    2403                          EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2325                                                  2404 
2326         while (--k >= 0) {                    !! 2405         while (--depth >= 0) {
2327                 if (path[k + 1].p_idx != EXT_ !! 2406                 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2328                         break;                   2407                         break;
2329                 err = ext4_ext_get_access(han !! 2408                 path--;
                                                   >> 2409                 err = ext4_ext_get_access(handle, inode, path);
2330                 if (err)                         2410                 if (err)
2331                         goto clean;           !! 2411                         break;
2332                 path[k].p_idx->ei_block = pat !! 2412                 path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2333                 err = ext4_ext_dirty(handle,  !! 2413                 err = ext4_ext_dirty(handle, inode, path);
2334                 if (err)                         2414                 if (err)
2335                         goto clean;           !! 2415                         break;
2336         }                                        2416         }
2337         return 0;                             << 
2338                                               << 
2339 clean:                                        << 
2340         /*                                    << 
2341          * The path[k].p_bh is either unmodif << 
2342          * set (see ext4_ext_get_access()). S << 
2343          * of the successfully modified exten << 
2344          * these extents to be checked to avo << 
2345          */                                   << 
2346         while (++k < depth)                   << 
2347                 clear_buffer_verified(path[k] << 
2348                                               << 
2349         return err;                              2417         return err;
2350 }                                                2418 }
2351                                                  2419 
2352 /*                                               2420 /*
2353  * ext4_ext_calc_credits_for_single_extent:      2421  * ext4_ext_calc_credits_for_single_extent:
2354  * This routine returns max. credits that nee    2422  * This routine returns max. credits that needed to insert an extent
2355  * to the extent tree.                           2423  * to the extent tree.
2356  * When pass the actual path, the caller shou    2424  * When pass the actual path, the caller should calculate credits
2357  * under i_data_sem.                             2425  * under i_data_sem.
2358  */                                              2426  */
2359 int ext4_ext_calc_credits_for_single_extent(s    2427 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2360                                                  2428                                                 struct ext4_ext_path *path)
2361 {                                                2429 {
2362         if (path) {                              2430         if (path) {
2363                 int depth = ext_depth(inode);    2431                 int depth = ext_depth(inode);
2364                 int ret = 0;                     2432                 int ret = 0;
2365                                                  2433 
2366                 /* probably there is space in    2434                 /* probably there is space in leaf? */
2367                 if (le16_to_cpu(path[depth].p    2435                 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2368                                 < le16_to_cpu    2436                                 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2369                                                  2437 
2370                         /*                       2438                         /*
2371                          *  There are some sp    2439                          *  There are some space in the leaf tree, no
2372                          *  need to account f    2440                          *  need to account for leaf block credit
2373                          *                       2441                          *
2374                          *  bitmaps and block    2442                          *  bitmaps and block group descriptor blocks
2375                          *  and other metadat    2443                          *  and other metadata blocks still need to be
2376                          *  accounted.           2444                          *  accounted.
2377                          */                      2445                          */
2378                         /* 1 bitmap, 1 block     2446                         /* 1 bitmap, 1 block group descriptor */
2379                         ret = 2 + EXT4_META_T    2447                         ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2380                         return ret;              2448                         return ret;
2381                 }                                2449                 }
2382         }                                        2450         }
2383                                                  2451 
2384         return ext4_chunk_trans_blocks(inode,    2452         return ext4_chunk_trans_blocks(inode, nrblocks);
2385 }                                                2453 }
2386                                                  2454 
2387 /*                                               2455 /*
2388  * How many index/leaf blocks need to change/    2456  * How many index/leaf blocks need to change/allocate to add @extents extents?
2389  *                                               2457  *
2390  * If we add a single extent, then in the wor    2458  * If we add a single extent, then in the worse case, each tree level
2391  * index/leaf need to be changed in case of t    2459  * index/leaf need to be changed in case of the tree split.
2392  *                                               2460  *
2393  * If more extents are inserted, they could c    2461  * If more extents are inserted, they could cause the whole tree split more
2394  * than once, but this is really rare.           2462  * than once, but this is really rare.
2395  */                                              2463  */
2396 int ext4_ext_index_trans_blocks(struct inode     2464 int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2397 {                                                2465 {
2398         int index;                               2466         int index;
2399         int depth;                               2467         int depth;
2400                                                  2468 
2401         /* If we are converting the inline da    2469         /* If we are converting the inline data, only one is needed here. */
2402         if (ext4_has_inline_data(inode))         2470         if (ext4_has_inline_data(inode))
2403                 return 1;                        2471                 return 1;
2404                                                  2472 
2405         depth = ext_depth(inode);                2473         depth = ext_depth(inode);
2406                                                  2474 
2407         if (extents <= 1)                        2475         if (extents <= 1)
2408                 index = depth * 2;               2476                 index = depth * 2;
2409         else                                     2477         else
2410                 index = depth * 3;               2478                 index = depth * 3;
2411                                                  2479 
2412         return index;                            2480         return index;
2413 }                                                2481 }
2414                                                  2482 
2415 static inline int get_default_free_blocks_fla    2483 static inline int get_default_free_blocks_flags(struct inode *inode)
2416 {                                                2484 {
2417         if (S_ISDIR(inode->i_mode) || S_ISLNK    2485         if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
2418             ext4_test_inode_flag(inode, EXT4_    2486             ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
2419                 return EXT4_FREE_BLOCKS_METAD    2487                 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2420         else if (ext4_should_journal_data(ino    2488         else if (ext4_should_journal_data(inode))
2421                 return EXT4_FREE_BLOCKS_FORGE    2489                 return EXT4_FREE_BLOCKS_FORGET;
2422         return 0;                                2490         return 0;
2423 }                                                2491 }
2424                                                  2492 
2425 /*                                            << 
2426  * ext4_rereserve_cluster - increment the res << 
2427  *                          freeing a cluster << 
2428  *                                            << 
2429  * @inode - file containing the cluster       << 
2430  * @lblk - logical block in cluster to be res << 
2431  *                                            << 
2432  * Increments the reserved cluster count and  << 
2433  * file system when freeing a partial cluster << 
2434  * delayed and unwritten block.  A partial cl << 
2435  * requirement will have a pending reservatio << 
2436  * RERESERVE_CLUSTER flag is used when callin << 
2437  * defer reserved and allocated space account << 
2438  * to this function.                          << 
2439  */                                           << 
2440 static void ext4_rereserve_cluster(struct ino << 
2441 {                                             << 
2442         struct ext4_sb_info *sbi = EXT4_SB(in << 
2443         struct ext4_inode_info *ei = EXT4_I(i << 
2444                                               << 
2445         dquot_reclaim_block(inode, EXT4_C2B(s << 
2446                                               << 
2447         spin_lock(&ei->i_block_reservation_lo << 
2448         ei->i_reserved_data_blocks++;         << 
2449         percpu_counter_add(&sbi->s_dirtyclust << 
2450         spin_unlock(&ei->i_block_reservation_ << 
2451                                               << 
2452         percpu_counter_add(&sbi->s_freecluste << 
2453         ext4_remove_pending(inode, lblk);     << 
2454 }                                             << 
2455                                               << 
2456 static int ext4_remove_blocks(handle_t *handl    2493 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2457                               struct ext4_ext    2494                               struct ext4_extent *ex,
2458                               struct partial_ !! 2495                               long long *partial_cluster,
2459                               ext4_lblk_t fro    2496                               ext4_lblk_t from, ext4_lblk_t to)
2460 {                                                2497 {
2461         struct ext4_sb_info *sbi = EXT4_SB(in    2498         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2462         unsigned short ee_len = ext4_ext_get_    2499         unsigned short ee_len = ext4_ext_get_actual_len(ex);
2463         ext4_fsblk_t last_pblk, pblk;         !! 2500         ext4_fsblk_t pblk;
2464         ext4_lblk_t num;                      !! 2501         int flags = get_default_free_blocks_flags(inode);
2465         int flags;                            << 
2466                                               << 
2467         /* only extent tail removal is allowe << 
2468         if (from < le32_to_cpu(ex->ee_block)  << 
2469             to != le32_to_cpu(ex->ee_block) + << 
2470                 ext4_error(sbi->s_sb,         << 
2471                            "strange request:  << 
2472                            from, to, le32_to_ << 
2473                 return 0;                     << 
2474         }                                     << 
2475                                               << 
2476 #ifdef EXTENTS_STATS                          << 
2477         spin_lock(&sbi->s_ext_stats_lock);    << 
2478         sbi->s_ext_blocks += ee_len;          << 
2479         sbi->s_ext_extents++;                 << 
2480         if (ee_len < sbi->s_ext_min)          << 
2481                 sbi->s_ext_min = ee_len;      << 
2482         if (ee_len > sbi->s_ext_max)          << 
2483                 sbi->s_ext_max = ee_len;      << 
2484         if (ext_depth(inode) > sbi->s_depth_m << 
2485                 sbi->s_depth_max = ext_depth( << 
2486         spin_unlock(&sbi->s_ext_stats_lock);  << 
2487 #endif                                        << 
2488                                               << 
2489         trace_ext4_remove_blocks(inode, ex, f << 
2490                                                  2502 
2491         /*                                       2503         /*
2492          * if we have a partial cluster, and  !! 2504          * For bigalloc file systems, we never free a partial cluster
2493          * cluster of the last block in the e !! 2505          * at the beginning of the extent.  Instead, we make a note
                                                   >> 2506          * that we tried freeing the cluster, and check to see if we
                                                   >> 2507          * need to free it on a subsequent call to ext4_remove_blocks,
                                                   >> 2508          * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
2494          */                                      2509          */
2495         last_pblk = ext4_ext_pblock(ex) + ee_ !! 2510         flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2496                                               << 
2497         if (partial->state != initial &&      << 
2498             partial->pclu != EXT4_B2C(sbi, la << 
2499                 if (partial->state == tofree) << 
2500                         flags = get_default_f << 
2501                         if (ext4_is_pending(i << 
2502                                 flags |= EXT4 << 
2503                         ext4_free_blocks(hand << 
2504                                          EXT4 << 
2505                                          sbi- << 
2506                         if (flags & EXT4_FREE << 
2507                                 ext4_rereserv << 
2508                 }                             << 
2509                 partial->state = initial;     << 
2510         }                                     << 
2511                                               << 
2512         num = le32_to_cpu(ex->ee_block) + ee_ << 
2513         pblk = ext4_ext_pblock(ex) + ee_len - << 
2514                                                  2511 
                                                   >> 2512         trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2515         /*                                       2513         /*
2516          * We free the partial cluster at the !! 2514          * If we have a partial cluster, and it's different from the
2517          * unless the cluster is used by anot !! 2515          * cluster of the last block, we need to explicitly free the
2518          * state is nofree).  If a partial cl !! 2516          * partial cluster here.
2519          * shared with the last block in the  !! 2517          */
2520          */                                   !! 2518         pblk = ext4_ext_pblock(ex) + ee_len - 1;
2521         flags = get_default_free_blocks_flags !! 2519         if (*partial_cluster > 0 &&
2522                                               !! 2520             *partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
2523         /* partial, left end cluster aligned, << 
2524         if ((EXT4_LBLK_COFF(sbi, to) != sbi-> << 
2525             (EXT4_LBLK_CMASK(sbi, to) >= from << 
2526             (partial->state != nofree)) {     << 
2527                 if (ext4_is_pending(inode, to << 
2528                         flags |= EXT4_FREE_BL << 
2529                 ext4_free_blocks(handle, inod    2521                 ext4_free_blocks(handle, inode, NULL,
2530                                  EXT4_PBLK_CM !! 2522                                  EXT4_C2B(sbi, *partial_cluster),
2531                                  sbi->s_clust    2523                                  sbi->s_cluster_ratio, flags);
2532                 if (flags & EXT4_FREE_BLOCKS_ !! 2524                 *partial_cluster = 0;
2533                         ext4_rereserve_cluste << 
2534                 partial->state = initial;     << 
2535                 flags = get_default_free_bloc << 
2536         }                                        2525         }
2537                                                  2526 
2538         flags |= EXT4_FREE_BLOCKS_NOFREE_LAST !! 2527 #ifdef EXTENTS_STATS
2539                                               !! 2528         {
2540         /*                                    !! 2529                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2541          * For bigalloc file systems, we neve !! 2530                 spin_lock(&sbi->s_ext_stats_lock);
2542          * at the beginning of the extent.  I !! 2531                 sbi->s_ext_blocks += ee_len;
2543          * need to free it on a subsequent ca !! 2532                 sbi->s_ext_extents++;
2544          * or at the end of ext4_ext_rm_leaf  !! 2533                 if (ee_len < sbi->s_ext_min)
2545          */                                   !! 2534                         sbi->s_ext_min = ee_len;
2546         flags |= EXT4_FREE_BLOCKS_NOFREE_FIRS !! 2535                 if (ee_len > sbi->s_ext_max)
2547         ext4_free_blocks(handle, inode, NULL, !! 2536                         sbi->s_ext_max = ee_len;
2548                                               !! 2537                 if (ext_depth(inode) > sbi->s_depth_max)
2549         /* reset the partial cluster if we've !! 2538                         sbi->s_depth_max = ext_depth(inode);
2550         if (partial->state != initial && part !! 2539                 spin_unlock(&sbi->s_ext_stats_lock);
2551                 partial->state = initial;     << 
2552                                               << 
2553         /*                                    << 
2554          * If we've freed the entire extent b << 
2555          * cluster aligned and is not marked  << 
2556          * record the partial cluster at the  << 
2557          * wasn't freed by the preceding ext4 << 
2558          * need to look farther to the left t << 
2559          * (not shared with another extent).  << 
2560          * cluster - we're either  done freei << 
2561          * extent is left cluster aligned.    << 
2562          */                                   << 
2563         if (EXT4_LBLK_COFF(sbi, from) && num  << 
2564                 if (partial->state == initial << 
2565                         partial->pclu = EXT4_ << 
2566                         partial->lblk = from; << 
2567                         partial->state = tofr << 
2568                 }                             << 
2569         } else {                              << 
2570                 partial->state = initial;     << 
2571         }                                        2540         }
                                                   >> 2541 #endif
                                                   >> 2542         if (from >= le32_to_cpu(ex->ee_block)
                                                   >> 2543             && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
                                                   >> 2544                 /* tail removal */
                                                   >> 2545                 ext4_lblk_t num;
                                                   >> 2546                 long long first_cluster;
2572                                                  2547 
                                                   >> 2548                 num = le32_to_cpu(ex->ee_block) + ee_len - from;
                                                   >> 2549                 pblk = ext4_ext_pblock(ex) + ee_len - num;
                                                   >> 2550                 /*
                                                   >> 2551                  * Usually we want to free partial cluster at the end of the
                                                   >> 2552                  * extent, except for the situation when the cluster is still
                                                   >> 2553                  * used by any other extent (partial_cluster is negative).
                                                   >> 2554                  */
                                                   >> 2555                 if (*partial_cluster < 0 &&
                                                   >> 2556                     *partial_cluster == -(long long) EXT4_B2C(sbi, pblk+num-1))
                                                   >> 2557                         flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
                                                   >> 2558 
                                                   >> 2559                 ext_debug("free last %u blocks starting %llu partial %lld\n",
                                                   >> 2560                           num, pblk, *partial_cluster);
                                                   >> 2561                 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
                                                   >> 2562                 /*
                                                   >> 2563                  * If the block range to be freed didn't start at the
                                                   >> 2564                  * beginning of a cluster, and we removed the entire
                                                   >> 2565                  * extent and the cluster is not used by any other extent,
                                                   >> 2566                  * save the partial cluster here, since we might need to
                                                   >> 2567                  * delete if we determine that the truncate or punch hole
                                                   >> 2568                  * operation has removed all of the blocks in the cluster.
                                                   >> 2569                  * If that cluster is used by another extent, preserve its
                                                   >> 2570                  * negative value so it isn't freed later on.
                                                   >> 2571                  *
                                                   >> 2572                  * If the whole extent wasn't freed, we've reached the
                                                   >> 2573                  * start of the truncated/punched region and have finished
                                                   >> 2574                  * removing blocks.  If there's a partial cluster here it's
                                                   >> 2575                  * shared with the remainder of the extent and is no longer
                                                   >> 2576                  * a candidate for removal.
                                                   >> 2577                  */
                                                   >> 2578                 if (EXT4_PBLK_COFF(sbi, pblk) && ee_len == num) {
                                                   >> 2579                         first_cluster = (long long) EXT4_B2C(sbi, pblk);
                                                   >> 2580                         if (first_cluster != -*partial_cluster)
                                                   >> 2581                                 *partial_cluster = first_cluster;
                                                   >> 2582                 } else {
                                                   >> 2583                         *partial_cluster = 0;
                                                   >> 2584                 }
                                                   >> 2585         } else
                                                   >> 2586                 ext4_error(sbi->s_sb, "strange request: removal(2) "
                                                   >> 2587                            "%u-%u from %u:%u",
                                                   >> 2588                            from, to, le32_to_cpu(ex->ee_block), ee_len);
2573         return 0;                                2589         return 0;
2574 }                                                2590 }
2575                                                  2591 
                                                   >> 2592 
2576 /*                                               2593 /*
2577  * ext4_ext_rm_leaf() Removes the extents ass    2594  * ext4_ext_rm_leaf() Removes the extents associated with the
2578  * blocks appearing between "start" and "end"    2595  * blocks appearing between "start" and "end".  Both "start"
2579  * and "end" must appear in the same extent o    2596  * and "end" must appear in the same extent or EIO is returned.
2580  *                                               2597  *
2581  * @handle: The journal handle                   2598  * @handle: The journal handle
2582  * @inode:  The files inode                      2599  * @inode:  The files inode
2583  * @path:   The path to the leaf                 2600  * @path:   The path to the leaf
2584  * @partial_cluster: The cluster which we'll     2601  * @partial_cluster: The cluster which we'll have to free if all extents
2585  *                   has been released from i    2602  *                   has been released from it.  However, if this value is
2586  *                   negative, it's a cluster    2603  *                   negative, it's a cluster just to the right of the
2587  *                   punched region and it mu    2604  *                   punched region and it must not be freed.
2588  * @start:  The first block to remove            2605  * @start:  The first block to remove
2589  * @end:   The last block to remove              2606  * @end:   The last block to remove
2590  */                                              2607  */
2591 static int                                       2608 static int
2592 ext4_ext_rm_leaf(handle_t *handle, struct ino    2609 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2593                  struct ext4_ext_path *path,     2610                  struct ext4_ext_path *path,
2594                  struct partial_cluster *part !! 2611                  long long *partial_cluster,
2595                  ext4_lblk_t start, ext4_lblk    2612                  ext4_lblk_t start, ext4_lblk_t end)
2596 {                                                2613 {
2597         struct ext4_sb_info *sbi = EXT4_SB(in    2614         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2598         int err = 0, correct_index = 0;          2615         int err = 0, correct_index = 0;
2599         int depth = ext_depth(inode), credits !! 2616         int depth = ext_depth(inode), credits;
2600         struct ext4_extent_header *eh;           2617         struct ext4_extent_header *eh;
2601         ext4_lblk_t a, b;                        2618         ext4_lblk_t a, b;
2602         unsigned num;                            2619         unsigned num;
2603         ext4_lblk_t ex_ee_block;                 2620         ext4_lblk_t ex_ee_block;
2604         unsigned short ex_ee_len;                2621         unsigned short ex_ee_len;
2605         unsigned unwritten = 0;                  2622         unsigned unwritten = 0;
2606         struct ext4_extent *ex;                  2623         struct ext4_extent *ex;
2607         ext4_fsblk_t pblk;                       2624         ext4_fsblk_t pblk;
2608                                                  2625 
2609         /* the header must be checked already    2626         /* the header must be checked already in ext4_ext_remove_space() */
2610         ext_debug(inode, "truncate since %u i !! 2627         ext_debug("truncate since %u in leaf to %u\n", start, end);
2611         if (!path[depth].p_hdr)                  2628         if (!path[depth].p_hdr)
2612                 path[depth].p_hdr = ext_block    2629                 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2613         eh = path[depth].p_hdr;                  2630         eh = path[depth].p_hdr;
2614         if (unlikely(path[depth].p_hdr == NUL    2631         if (unlikely(path[depth].p_hdr == NULL)) {
2615                 EXT4_ERROR_INODE(inode, "path    2632                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2616                 return -EFSCORRUPTED;            2633                 return -EFSCORRUPTED;
2617         }                                        2634         }
2618         /* find where to start removing */       2635         /* find where to start removing */
2619         ex = path[depth].p_ext;                  2636         ex = path[depth].p_ext;
2620         if (!ex)                                 2637         if (!ex)
2621                 ex = EXT_LAST_EXTENT(eh);        2638                 ex = EXT_LAST_EXTENT(eh);
2622                                                  2639 
2623         ex_ee_block = le32_to_cpu(ex->ee_bloc    2640         ex_ee_block = le32_to_cpu(ex->ee_block);
2624         ex_ee_len = ext4_ext_get_actual_len(e    2641         ex_ee_len = ext4_ext_get_actual_len(ex);
2625                                                  2642 
2626         trace_ext4_ext_rm_leaf(inode, start,  !! 2643         trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2627                                                  2644 
2628         while (ex >= EXT_FIRST_EXTENT(eh) &&     2645         while (ex >= EXT_FIRST_EXTENT(eh) &&
2629                         ex_ee_block + ex_ee_l    2646                         ex_ee_block + ex_ee_len > start) {
2630                                                  2647 
2631                 if (ext4_ext_is_unwritten(ex)    2648                 if (ext4_ext_is_unwritten(ex))
2632                         unwritten = 1;           2649                         unwritten = 1;
2633                 else                             2650                 else
2634                         unwritten = 0;           2651                         unwritten = 0;
2635                                                  2652 
2636                 ext_debug(inode, "remove ext  !! 2653                 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2637                           unwritten, ex_ee_le    2654                           unwritten, ex_ee_len);
2638                 path[depth].p_ext = ex;          2655                 path[depth].p_ext = ex;
2639                                                  2656 
2640                 a = max(ex_ee_block, start);  !! 2657                 a = ex_ee_block > start ? ex_ee_block : start;
2641                 b = min(ex_ee_block + ex_ee_l !! 2658                 b = ex_ee_block+ex_ee_len - 1 < end ?
                                                   >> 2659                         ex_ee_block+ex_ee_len - 1 : end;
2642                                                  2660 
2643                 ext_debug(inode, "  border %u !! 2661                 ext_debug("  border %u:%u\n", a, b);
2644                                                  2662 
2645                 /* If this extent is beyond t    2663                 /* If this extent is beyond the end of the hole, skip it */
2646                 if (end < ex_ee_block) {         2664                 if (end < ex_ee_block) {
2647                         /*                       2665                         /*
2648                          * We're going to ski    2666                          * We're going to skip this extent and move to another,
2649                          * so note that its f    2667                          * so note that its first cluster is in use to avoid
2650                          * freeing it when re    2668                          * freeing it when removing blocks.  Eventually, the
2651                          * right edge of the     2669                          * right edge of the truncated/punched region will
2652                          * be just to the lef    2670                          * be just to the left.
2653                          */                      2671                          */
2654                         if (sbi->s_cluster_ra    2672                         if (sbi->s_cluster_ratio > 1) {
2655                                 pblk = ext4_e    2673                                 pblk = ext4_ext_pblock(ex);
2656                                 partial->pclu !! 2674                                 *partial_cluster =
2657                                 partial->stat !! 2675                                         -(long long) EXT4_B2C(sbi, pblk);
2658                         }                        2676                         }
2659                         ex--;                    2677                         ex--;
2660                         ex_ee_block = le32_to    2678                         ex_ee_block = le32_to_cpu(ex->ee_block);
2661                         ex_ee_len = ext4_ext_    2679                         ex_ee_len = ext4_ext_get_actual_len(ex);
2662                         continue;                2680                         continue;
2663                 } else if (b != ex_ee_block +    2681                 } else if (b != ex_ee_block + ex_ee_len - 1) {
2664                         EXT4_ERROR_INODE(inod    2682                         EXT4_ERROR_INODE(inode,
2665                                          "can    2683                                          "can not handle truncate %u:%u "
2666                                          "on     2684                                          "on extent %u:%u",
2667                                          star    2685                                          start, end, ex_ee_block,
2668                                          ex_e    2686                                          ex_ee_block + ex_ee_len - 1);
2669                         err = -EFSCORRUPTED;     2687                         err = -EFSCORRUPTED;
2670                         goto out;                2688                         goto out;
2671                 } else if (a != ex_ee_block)     2689                 } else if (a != ex_ee_block) {
2672                         /* remove tail of the    2690                         /* remove tail of the extent */
2673                         num = a - ex_ee_block    2691                         num = a - ex_ee_block;
2674                 } else {                         2692                 } else {
2675                         /* remove whole exten    2693                         /* remove whole extent: excellent! */
2676                         num = 0;                 2694                         num = 0;
2677                 }                                2695                 }
2678                 /*                               2696                 /*
2679                  * 3 for leaf, sb, and inode     2697                  * 3 for leaf, sb, and inode plus 2 (bmap and group
2680                  * descriptor) for each block    2698                  * descriptor) for each block group; assume two block
2681                  * groups plus ex_ee_len/bloc    2699                  * groups plus ex_ee_len/blocks_per_block_group for
2682                  * the worst case                2700                  * the worst case
2683                  */                              2701                  */
2684                 credits = 7 + 2*(ex_ee_len/EX    2702                 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2685                 if (ex == EXT_FIRST_EXTENT(eh    2703                 if (ex == EXT_FIRST_EXTENT(eh)) {
2686                         correct_index = 1;       2704                         correct_index = 1;
2687                         credits += (ext_depth    2705                         credits += (ext_depth(inode)) + 1;
2688                 }                                2706                 }
2689                 credits += EXT4_MAXQUOTAS_TRA    2707                 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2690                 /*                            !! 2708 
2691                  * We may end up freeing some !! 2709                 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2692                  * punched range. Note that p !! 2710                 if (err)
2693                  * by ext4_free_data_revoke_c << 
2694                  */                           << 
2695                 revoke_credits =              << 
2696                         ext4_free_metadata_re << 
2697                                               << 
2698                         ext4_free_data_revoke << 
2699                                               << 
2700                 err = ext4_datasem_ensure_cre << 
2701                                               << 
2702                 if (err) {                    << 
2703                         if (err > 0)          << 
2704                                 err = -EAGAIN << 
2705                         goto out;                2711                         goto out;
2706                 }                             << 
2707                                                  2712 
2708                 err = ext4_ext_get_access(han    2713                 err = ext4_ext_get_access(handle, inode, path + depth);
2709                 if (err)                         2714                 if (err)
2710                         goto out;                2715                         goto out;
2711                                                  2716 
2712                 err = ext4_remove_blocks(hand !! 2717                 err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
                                                   >> 2718                                          a, b);
2713                 if (err)                         2719                 if (err)
2714                         goto out;                2720                         goto out;
2715                                                  2721 
2716                 if (num == 0)                    2722                 if (num == 0)
2717                         /* this extent is rem    2723                         /* this extent is removed; mark slot entirely unused */
2718                         ext4_ext_store_pblock    2724                         ext4_ext_store_pblock(ex, 0);
2719                                                  2725 
2720                 ex->ee_len = cpu_to_le16(num)    2726                 ex->ee_len = cpu_to_le16(num);
2721                 /*                               2727                 /*
2722                  * Do not mark unwritten if a    2728                  * Do not mark unwritten if all the blocks in the
2723                  * extent have been removed.     2729                  * extent have been removed.
2724                  */                              2730                  */
2725                 if (unwritten && num)            2731                 if (unwritten && num)
2726                         ext4_ext_mark_unwritt    2732                         ext4_ext_mark_unwritten(ex);
2727                 /*                               2733                 /*
2728                  * If the extent was complete    2734                  * If the extent was completely released,
2729                  * we need to remove it from     2735                  * we need to remove it from the leaf
2730                  */                              2736                  */
2731                 if (num == 0) {                  2737                 if (num == 0) {
2732                         if (end != EXT_MAX_BL    2738                         if (end != EXT_MAX_BLOCKS - 1) {
2733                                 /*               2739                                 /*
2734                                  * For hole p    2740                                  * For hole punching, we need to scoot all the
2735                                  * extents up    2741                                  * extents up when an extent is removed so that
2736                                  * we dont ha    2742                                  * we dont have blank extents in the middle
2737                                  */              2743                                  */
2738                                 memmove(ex, e    2744                                 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2739                                         sizeo    2745                                         sizeof(struct ext4_extent));
2740                                                  2746 
2741                                 /* Now get ri    2747                                 /* Now get rid of the one at the end */
2742                                 memset(EXT_LA    2748                                 memset(EXT_LAST_EXTENT(eh), 0,
2743                                         sizeo    2749                                         sizeof(struct ext4_extent));
2744                         }                        2750                         }
2745                         le16_add_cpu(&eh->eh_    2751                         le16_add_cpu(&eh->eh_entries, -1);
2746                 }                                2752                 }
2747                                                  2753 
2748                 err = ext4_ext_dirty(handle,     2754                 err = ext4_ext_dirty(handle, inode, path + depth);
2749                 if (err)                         2755                 if (err)
2750                         goto out;                2756                         goto out;
2751                                                  2757 
2752                 ext_debug(inode, "new extent: !! 2758                 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2753                                 ext4_ext_pblo    2759                                 ext4_ext_pblock(ex));
2754                 ex--;                            2760                 ex--;
2755                 ex_ee_block = le32_to_cpu(ex-    2761                 ex_ee_block = le32_to_cpu(ex->ee_block);
2756                 ex_ee_len = ext4_ext_get_actu    2762                 ex_ee_len = ext4_ext_get_actual_len(ex);
2757         }                                        2763         }
2758                                                  2764 
2759         if (correct_index && eh->eh_entries)     2765         if (correct_index && eh->eh_entries)
2760                 err = ext4_ext_correct_indexe    2766                 err = ext4_ext_correct_indexes(handle, inode, path);
2761                                                  2767 
2762         /*                                       2768         /*
2763          * If there's a partial cluster and a    2769          * If there's a partial cluster and at least one extent remains in
2764          * the leaf, free the partial cluster    2770          * the leaf, free the partial cluster if it isn't shared with the
2765          * current extent.  If it is shared w    2771          * current extent.  If it is shared with the current extent
2766          * we reset the partial cluster becau !! 2772          * we zero partial_cluster because we've reached the start of the
2767          * truncated/punched region and we're    2773          * truncated/punched region and we're done removing blocks.
2768          */                                      2774          */
2769         if (partial->state == tofree && ex >= !! 2775         if (*partial_cluster > 0 && ex >= EXT_FIRST_EXTENT(eh)) {
2770                 pblk = ext4_ext_pblock(ex) +     2776                 pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2771                 if (partial->pclu != EXT4_B2C !! 2777                 if (*partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
2772                         int flags = get_defau << 
2773                                               << 
2774                         if (ext4_is_pending(i << 
2775                                 flags |= EXT4 << 
2776                         ext4_free_blocks(hand    2778                         ext4_free_blocks(handle, inode, NULL,
2777                                          EXT4 !! 2779                                          EXT4_C2B(sbi, *partial_cluster),
2778                                          sbi- !! 2780                                          sbi->s_cluster_ratio,
2779                         if (flags & EXT4_FREE !! 2781                                          get_default_free_blocks_flags(inode));
2780                                 ext4_rereserv << 
2781                 }                                2782                 }
2782                 partial->state = initial;     !! 2783                 *partial_cluster = 0;
2783         }                                        2784         }
2784                                                  2785 
2785         /* if this leaf is free, then we shou    2786         /* if this leaf is free, then we should
2786          * remove it from index block above *    2787          * remove it from index block above */
2787         if (err == 0 && eh->eh_entries == 0 &    2788         if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2788                 err = ext4_ext_rm_idx(handle,    2789                 err = ext4_ext_rm_idx(handle, inode, path, depth);
2789                                                  2790 
2790 out:                                             2791 out:
2791         return err;                              2792         return err;
2792 }                                                2793 }
2793                                                  2794 
2794 /*                                               2795 /*
2795  * ext4_ext_more_to_rm:                          2796  * ext4_ext_more_to_rm:
2796  * returns 1 if current index has to be freed    2797  * returns 1 if current index has to be freed (even partial)
2797  */                                              2798  */
2798 static int                                       2799 static int
2799 ext4_ext_more_to_rm(struct ext4_ext_path *pat    2800 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2800 {                                                2801 {
2801         BUG_ON(path->p_idx == NULL);             2802         BUG_ON(path->p_idx == NULL);
2802                                                  2803 
2803         if (path->p_idx < EXT_FIRST_INDEX(pat    2804         if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2804                 return 0;                        2805                 return 0;
2805                                                  2806 
2806         /*                                       2807         /*
2807          * if truncate on deeper level happen    2808          * if truncate on deeper level happened, it wasn't partial,
2808          * so we have to consider current ind    2809          * so we have to consider current index for truncation
2809          */                                      2810          */
2810         if (le16_to_cpu(path->p_hdr->eh_entri    2811         if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2811                 return 0;                        2812                 return 0;
2812         return 1;                                2813         return 1;
2813 }                                                2814 }
2814                                                  2815 
2815 int ext4_ext_remove_space(struct inode *inode    2816 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2816                           ext4_lblk_t end)       2817                           ext4_lblk_t end)
2817 {                                                2818 {
2818         struct ext4_sb_info *sbi = EXT4_SB(in    2819         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2819         int depth = ext_depth(inode);            2820         int depth = ext_depth(inode);
2820         struct ext4_ext_path *path = NULL;       2821         struct ext4_ext_path *path = NULL;
2821         struct partial_cluster partial;       !! 2822         long long partial_cluster = 0;
2822         handle_t *handle;                        2823         handle_t *handle;
2823         int i = 0, err = 0;                      2824         int i = 0, err = 0;
2824                                                  2825 
2825         partial.pclu = 0;                     !! 2826         ext_debug("truncate since %u to %u\n", start, end);
2826         partial.lblk = 0;                     << 
2827         partial.state = initial;              << 
2828                                               << 
2829         ext_debug(inode, "truncate since %u t << 
2830                                                  2827 
2831         /* probably first extent we're gonna     2828         /* probably first extent we're gonna free will be last in block */
2832         handle = ext4_journal_start_with_revo !! 2829         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
2833                         depth + 1,            << 
2834                         ext4_free_metadata_re << 
2835         if (IS_ERR(handle))                      2830         if (IS_ERR(handle))
2836                 return PTR_ERR(handle);          2831                 return PTR_ERR(handle);
2837                                                  2832 
2838 again:                                           2833 again:
2839         trace_ext4_ext_remove_space(inode, st    2834         trace_ext4_ext_remove_space(inode, start, end, depth);
2840                                                  2835 
2841         /*                                       2836         /*
2842          * Check if we are removing extents i    2837          * Check if we are removing extents inside the extent tree. If that
2843          * is the case, we are going to punch    2838          * is the case, we are going to punch a hole inside the extent tree
2844          * so we have to check whether we nee    2839          * so we have to check whether we need to split the extent covering
2845          * the last block to remove so we can    2840          * the last block to remove so we can easily remove the part of it
2846          * in ext4_ext_rm_leaf().                2841          * in ext4_ext_rm_leaf().
2847          */                                      2842          */
2848         if (end < EXT_MAX_BLOCKS - 1) {          2843         if (end < EXT_MAX_BLOCKS - 1) {
2849                 struct ext4_extent *ex;          2844                 struct ext4_extent *ex;
2850                 ext4_lblk_t ee_block, ex_end,    2845                 ext4_lblk_t ee_block, ex_end, lblk;
2851                 ext4_fsblk_t pblk;               2846                 ext4_fsblk_t pblk;
2852                                                  2847 
2853                 /* find extent for or closest    2848                 /* find extent for or closest extent to this block */
2854                 path = ext4_find_extent(inode !! 2849                 path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
2855                                         EXT4_ << 
2856                 if (IS_ERR(path)) {              2850                 if (IS_ERR(path)) {
2857                         ext4_journal_stop(han    2851                         ext4_journal_stop(handle);
2858                         return PTR_ERR(path);    2852                         return PTR_ERR(path);
2859                 }                                2853                 }
2860                 depth = ext_depth(inode);        2854                 depth = ext_depth(inode);
2861                 /* Leaf not may not exist onl    2855                 /* Leaf not may not exist only if inode has no blocks at all */
2862                 ex = path[depth].p_ext;          2856                 ex = path[depth].p_ext;
2863                 if (!ex) {                       2857                 if (!ex) {
2864                         if (depth) {             2858                         if (depth) {
2865                                 EXT4_ERROR_IN    2859                                 EXT4_ERROR_INODE(inode,
2866                                                  2860                                                  "path[%d].p_hdr == NULL",
2867                                                  2861                                                  depth);
2868                                 err = -EFSCOR    2862                                 err = -EFSCORRUPTED;
2869                         }                        2863                         }
2870                         goto out;                2864                         goto out;
2871                 }                                2865                 }
2872                                                  2866 
2873                 ee_block = le32_to_cpu(ex->ee    2867                 ee_block = le32_to_cpu(ex->ee_block);
2874                 ex_end = ee_block + ext4_ext_    2868                 ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
2875                                                  2869 
2876                 /*                               2870                 /*
2877                  * See if the last block is i    2871                  * See if the last block is inside the extent, if so split
2878                  * the extent at 'end' block     2872                  * the extent at 'end' block so we can easily remove the
2879                  * tail of the first part of     2873                  * tail of the first part of the split extent in
2880                  * ext4_ext_rm_leaf().           2874                  * ext4_ext_rm_leaf().
2881                  */                              2875                  */
2882                 if (end >= ee_block && end <     2876                 if (end >= ee_block && end < ex_end) {
2883                                                  2877 
2884                         /*                       2878                         /*
2885                          * If we're going to     2879                          * If we're going to split the extent, note that
2886                          * the cluster contai    2880                          * the cluster containing the block after 'end' is
2887                          * in use to avoid fr    2881                          * in use to avoid freeing it when removing blocks.
2888                          */                      2882                          */
2889                         if (sbi->s_cluster_ra    2883                         if (sbi->s_cluster_ratio > 1) {
2890                                 pblk = ext4_e !! 2884                                 pblk = ext4_ext_pblock(ex) + end - ee_block + 2;
2891                                 partial.pclu  !! 2885                                 partial_cluster =
2892                                 partial.state !! 2886                                         -(long long) EXT4_B2C(sbi, pblk);
2893                         }                        2887                         }
2894                                                  2888 
2895                         /*                       2889                         /*
2896                          * Split the extent i    2890                          * Split the extent in two so that 'end' is the last
2897                          * block in the first    2891                          * block in the first new extent. Also we should not
2898                          * fail removing spac    2892                          * fail removing space due to ENOSPC so try to use
2899                          * reserved block if     2893                          * reserved block if that happens.
2900                          */                      2894                          */
2901                         path = ext4_force_spl !! 2895                         err = ext4_force_split_extent_at(handle, inode, &path,
2902                                               !! 2896                                                          end + 1, 1);
2903                         if (IS_ERR(path)) {   !! 2897                         if (err < 0)
2904                                 err = PTR_ERR << 
2905                                 goto out;        2898                                 goto out;
2906                         }                     !! 2899 
2907                 } else if (sbi->s_cluster_rat !! 2900                 } else if (sbi->s_cluster_ratio > 1 && end >= ex_end) {
2908                            partial.state == i << 
2909                         /*                       2901                         /*
2910                          * If we're punching, !! 2902                          * If there's an extent to the right its first cluster
2911                          * If the partial clu !! 2903                          * contains the immediate right boundary of the
2912                          * that extent's firs !! 2904                          * truncated/punched region.  Set partial_cluster to
2913                          * so it won't be fre !! 2905                          * its negative value so it won't be freed if shared
2914                          * removed. If it's a !! 2906                          * with the current extent.  The end < ee_block case
2915                          * retrying and keep  !! 2907                          * is handled in ext4_ext_rm_leaf().
2916                          * so a cluster marke << 
2917                          * extent removal is  << 
2918                          */                      2908                          */
2919                         lblk = ex_end + 1;       2909                         lblk = ex_end + 1;
2920                         err = ext4_ext_search    2910                         err = ext4_ext_search_right(inode, path, &lblk, &pblk,
2921                                               !! 2911                                                     &ex);
2922                         if (err < 0)          !! 2912                         if (err)
2923                                 goto out;        2913                                 goto out;
2924                         if (pblk) {           !! 2914                         if (pblk)
2925                                 partial.pclu  !! 2915                                 partial_cluster =
2926                                 partial.state !! 2916                                         -(long long) EXT4_B2C(sbi, pblk);
2927                         }                     << 
2928                 }                                2917                 }
2929         }                                        2918         }
2930         /*                                       2919         /*
2931          * We start scanning from right side,    2920          * We start scanning from right side, freeing all the blocks
2932          * after i_size and walking into the     2921          * after i_size and walking into the tree depth-wise.
2933          */                                      2922          */
2934         depth = ext_depth(inode);                2923         depth = ext_depth(inode);
2935         if (path) {                              2924         if (path) {
2936                 int k = i = depth;               2925                 int k = i = depth;
2937                 while (--k > 0)                  2926                 while (--k > 0)
2938                         path[k].p_block =        2927                         path[k].p_block =
2939                                 le16_to_cpu(p    2928                                 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2940         } else {                                 2929         } else {
2941                 path = kcalloc(depth + 1, siz    2930                 path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
2942                                GFP_NOFS | __G !! 2931                                GFP_NOFS);
2943                 if (path == NULL) {              2932                 if (path == NULL) {
2944                         ext4_journal_stop(han    2933                         ext4_journal_stop(handle);
2945                         return -ENOMEM;          2934                         return -ENOMEM;
2946                 }                                2935                 }
2947                 path[0].p_maxdepth = path[0].    2936                 path[0].p_maxdepth = path[0].p_depth = depth;
2948                 path[0].p_hdr = ext_inode_hdr    2937                 path[0].p_hdr = ext_inode_hdr(inode);
2949                 i = 0;                           2938                 i = 0;
2950                                                  2939 
2951                 if (ext4_ext_check(inode, pat    2940                 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
2952                         err = -EFSCORRUPTED;     2941                         err = -EFSCORRUPTED;
2953                         goto out;                2942                         goto out;
2954                 }                                2943                 }
2955         }                                        2944         }
2956         err = 0;                                 2945         err = 0;
2957                                                  2946 
2958         while (i >= 0 && err == 0) {             2947         while (i >= 0 && err == 0) {
2959                 if (i == depth) {                2948                 if (i == depth) {
2960                         /* this is leaf block    2949                         /* this is leaf block */
2961                         err = ext4_ext_rm_lea    2950                         err = ext4_ext_rm_leaf(handle, inode, path,
2962                                               !! 2951                                                &partial_cluster, start,
                                                   >> 2952                                                end);
2963                         /* root level has p_b    2953                         /* root level has p_bh == NULL, brelse() eats this */
2964                         ext4_ext_path_brelse( !! 2954                         brelse(path[i].p_bh);
                                                   >> 2955                         path[i].p_bh = NULL;
2965                         i--;                     2956                         i--;
2966                         continue;                2957                         continue;
2967                 }                                2958                 }
2968                                                  2959 
2969                 /* this is index block */        2960                 /* this is index block */
2970                 if (!path[i].p_hdr) {            2961                 if (!path[i].p_hdr) {
2971                         ext_debug(inode, "ini !! 2962                         ext_debug("initialize header\n");
2972                         path[i].p_hdr = ext_b    2963                         path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2973                 }                                2964                 }
2974                                                  2965 
2975                 if (!path[i].p_idx) {            2966                 if (!path[i].p_idx) {
2976                         /* this level hasn't     2967                         /* this level hasn't been touched yet */
2977                         path[i].p_idx = EXT_L    2968                         path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2978                         path[i].p_block = le1    2969                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2979                         ext_debug(inode, "ini !! 2970                         ext_debug("init index ptr: hdr 0x%p, num %d\n",
2980                                   path[i].p_h    2971                                   path[i].p_hdr,
2981                                   le16_to_cpu    2972                                   le16_to_cpu(path[i].p_hdr->eh_entries));
2982                 } else {                         2973                 } else {
2983                         /* we were already he    2974                         /* we were already here, see at next index */
2984                         path[i].p_idx--;         2975                         path[i].p_idx--;
2985                 }                                2976                 }
2986                                                  2977 
2987                 ext_debug(inode, "level %d -  !! 2978                 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2988                                 i, EXT_FIRST_    2979                                 i, EXT_FIRST_INDEX(path[i].p_hdr),
2989                                 path[i].p_idx    2980                                 path[i].p_idx);
2990                 if (ext4_ext_more_to_rm(path     2981                 if (ext4_ext_more_to_rm(path + i)) {
2991                         struct buffer_head *b    2982                         struct buffer_head *bh;
2992                         /* go to the next lev    2983                         /* go to the next level */
2993                         ext_debug(inode, "mov !! 2984                         ext_debug("move to level %d (block %llu)\n",
2994                                   i + 1, ext4    2985                                   i + 1, ext4_idx_pblock(path[i].p_idx));
2995                         memset(path + i + 1,     2986                         memset(path + i + 1, 0, sizeof(*path));
2996                         bh = read_extent_tree !! 2987                         bh = read_extent_tree_block(inode,
2997                                               !! 2988                                 ext4_idx_pblock(path[i].p_idx), depth - i - 1,
2998                                               !! 2989                                 EXT4_EX_NOCACHE);
2999                         if (IS_ERR(bh)) {        2990                         if (IS_ERR(bh)) {
3000                                 /* should we     2991                                 /* should we reset i_size? */
3001                                 err = PTR_ERR    2992                                 err = PTR_ERR(bh);
3002                                 break;           2993                                 break;
3003                         }                        2994                         }
3004                         /* Yield here to deal    2995                         /* Yield here to deal with large extent trees.
3005                          * Should be a no-op     2996                          * Should be a no-op if we did IO above. */
3006                         cond_resched();          2997                         cond_resched();
3007                         if (WARN_ON(i + 1 > d    2998                         if (WARN_ON(i + 1 > depth)) {
3008                                 err = -EFSCOR    2999                                 err = -EFSCORRUPTED;
3009                                 break;           3000                                 break;
3010                         }                        3001                         }
3011                         path[i + 1].p_bh = bh    3002                         path[i + 1].p_bh = bh;
3012                                                  3003 
3013                         /* save actual number    3004                         /* save actual number of indexes since this
3014                          * number is changed     3005                          * number is changed at the next iteration */
3015                         path[i].p_block = le1    3006                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
3016                         i++;                     3007                         i++;
3017                 } else {                         3008                 } else {
3018                         /* we finished proces    3009                         /* we finished processing this index, go up */
3019                         if (path[i].p_hdr->eh    3010                         if (path[i].p_hdr->eh_entries == 0 && i > 0) {
3020                                 /* index is e    3011                                 /* index is empty, remove it;
3021                                  * handle mus    3012                                  * handle must be already prepared by the
3022                                  * truncatei_    3013                                  * truncatei_leaf() */
3023                                 err = ext4_ex    3014                                 err = ext4_ext_rm_idx(handle, inode, path, i);
3024                         }                        3015                         }
3025                         /* root level has p_b    3016                         /* root level has p_bh == NULL, brelse() eats this */
3026                         ext4_ext_path_brelse( !! 3017                         brelse(path[i].p_bh);
                                                   >> 3018                         path[i].p_bh = NULL;
3027                         i--;                     3019                         i--;
3028                         ext_debug(inode, "ret !! 3020                         ext_debug("return to level %d\n", i);
3029                 }                                3021                 }
3030         }                                        3022         }
3031                                                  3023 
3032         trace_ext4_ext_remove_space_done(inod !! 3024         trace_ext4_ext_remove_space_done(inode, start, end, depth,
3033                                          path !! 3025                         partial_cluster, path->p_hdr->eh_entries);
3034                                                  3026 
3035         /*                                       3027         /*
3036          * if there's a partial cluster and w !! 3028          * If we still have something in the partial cluster and we have removed
3037          * in the file, then we also free the !! 3029          * even the first extent, then we should free the blocks in the partial
                                                   >> 3030          * cluster as well.  (This code will only run when there are no leaves
                                                   >> 3031          * to the immediate left of the truncated/punched region.)
3038          */                                      3032          */
3039         if (partial.state == tofree && err == !! 3033         if (partial_cluster > 0 && err == 0) {
3040                 int flags = get_default_free_ !! 3034                 /* don't zero partial_cluster since it's not used afterwards */
3041                                               << 
3042                 if (ext4_is_pending(inode, pa << 
3043                         flags |= EXT4_FREE_BL << 
3044                 ext4_free_blocks(handle, inod    3035                 ext4_free_blocks(handle, inode, NULL,
3045                                  EXT4_C2B(sbi !! 3036                                  EXT4_C2B(sbi, partial_cluster),
3046                                  sbi->s_clust !! 3037                                  sbi->s_cluster_ratio,
3047                 if (flags & EXT4_FREE_BLOCKS_ !! 3038                                  get_default_free_blocks_flags(inode));
3048                         ext4_rereserve_cluste << 
3049                 partial.state = initial;      << 
3050         }                                        3039         }
3051                                                  3040 
3052         /* TODO: flexible tree reduction shou    3041         /* TODO: flexible tree reduction should be here */
3053         if (path->p_hdr->eh_entries == 0) {      3042         if (path->p_hdr->eh_entries == 0) {
3054                 /*                               3043                 /*
3055                  * truncate to zero freed all    3044                  * truncate to zero freed all the tree,
3056                  * so we need to correct eh_d    3045                  * so we need to correct eh_depth
3057                  */                              3046                  */
3058                 err = ext4_ext_get_access(han    3047                 err = ext4_ext_get_access(handle, inode, path);
3059                 if (err == 0) {                  3048                 if (err == 0) {
3060                         ext_inode_hdr(inode)-    3049                         ext_inode_hdr(inode)->eh_depth = 0;
3061                         ext_inode_hdr(inode)-    3050                         ext_inode_hdr(inode)->eh_max =
3062                                 cpu_to_le16(e    3051                                 cpu_to_le16(ext4_ext_space_root(inode, 0));
3063                         err = ext4_ext_dirty(    3052                         err = ext4_ext_dirty(handle, inode, path);
3064                 }                                3053                 }
3065         }                                        3054         }
3066 out:                                             3055 out:
3067         ext4_free_ext_path(path);             !! 3056         ext4_ext_drop_refs(path);
                                                   >> 3057         kfree(path);
3068         path = NULL;                             3058         path = NULL;
3069         if (err == -EAGAIN)                      3059         if (err == -EAGAIN)
3070                 goto again;                      3060                 goto again;
3071         ext4_journal_stop(handle);               3061         ext4_journal_stop(handle);
3072                                                  3062 
3073         return err;                              3063         return err;
3074 }                                                3064 }
3075                                                  3065 
3076 /*                                               3066 /*
3077  * called at mount time                          3067  * called at mount time
3078  */                                              3068  */
3079 void ext4_ext_init(struct super_block *sb)       3069 void ext4_ext_init(struct super_block *sb)
3080 {                                                3070 {
3081         /*                                       3071         /*
3082          * possible initialization would be h    3072          * possible initialization would be here
3083          */                                      3073          */
3084                                                  3074 
3085         if (ext4_has_feature_extents(sb)) {      3075         if (ext4_has_feature_extents(sb)) {
3086 #if defined(AGGRESSIVE_TEST) || defined(CHECK    3076 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
3087                 printk(KERN_INFO "EXT4-fs: fi    3077                 printk(KERN_INFO "EXT4-fs: file extents enabled"
3088 #ifdef AGGRESSIVE_TEST                           3078 #ifdef AGGRESSIVE_TEST
3089                        ", aggressive tests"      3079                        ", aggressive tests"
3090 #endif                                           3080 #endif
3091 #ifdef CHECK_BINSEARCH                           3081 #ifdef CHECK_BINSEARCH
3092                        ", check binsearch"       3082                        ", check binsearch"
3093 #endif                                           3083 #endif
3094 #ifdef EXTENTS_STATS                             3084 #ifdef EXTENTS_STATS
3095                        ", stats"                 3085                        ", stats"
3096 #endif                                           3086 #endif
3097                        "\n");                    3087                        "\n");
3098 #endif                                           3088 #endif
3099 #ifdef EXTENTS_STATS                             3089 #ifdef EXTENTS_STATS
3100                 spin_lock_init(&EXT4_SB(sb)->    3090                 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
3101                 EXT4_SB(sb)->s_ext_min = 1 <<    3091                 EXT4_SB(sb)->s_ext_min = 1 << 30;
3102                 EXT4_SB(sb)->s_ext_max = 0;      3092                 EXT4_SB(sb)->s_ext_max = 0;
3103 #endif                                           3093 #endif
3104         }                                        3094         }
3105 }                                                3095 }
3106                                                  3096 
3107 /*                                               3097 /*
3108  * called at umount time                         3098  * called at umount time
3109  */                                              3099  */
3110 void ext4_ext_release(struct super_block *sb)    3100 void ext4_ext_release(struct super_block *sb)
3111 {                                                3101 {
3112         if (!ext4_has_feature_extents(sb))       3102         if (!ext4_has_feature_extents(sb))
3113                 return;                          3103                 return;
3114                                                  3104 
3115 #ifdef EXTENTS_STATS                             3105 #ifdef EXTENTS_STATS
3116         if (EXT4_SB(sb)->s_ext_blocks && EXT4    3106         if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
3117                 struct ext4_sb_info *sbi = EX    3107                 struct ext4_sb_info *sbi = EXT4_SB(sb);
3118                 printk(KERN_ERR "EXT4-fs: %lu    3108                 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3119                         sbi->s_ext_blocks, sb    3109                         sbi->s_ext_blocks, sbi->s_ext_extents,
3120                         sbi->s_ext_blocks / s    3110                         sbi->s_ext_blocks / sbi->s_ext_extents);
3121                 printk(KERN_ERR "EXT4-fs: ext    3111                 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3122                         sbi->s_ext_min, sbi->    3112                         sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
3123         }                                        3113         }
3124 #endif                                           3114 #endif
3125 }                                                3115 }
3126                                                  3116 
3127 static void ext4_zeroout_es(struct inode *ino !! 3117 static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
3128 {                                                3118 {
3129         ext4_lblk_t  ee_block;                   3119         ext4_lblk_t  ee_block;
3130         ext4_fsblk_t ee_pblock;                  3120         ext4_fsblk_t ee_pblock;
3131         unsigned int ee_len;                     3121         unsigned int ee_len;
3132                                                  3122 
3133         ee_block  = le32_to_cpu(ex->ee_block)    3123         ee_block  = le32_to_cpu(ex->ee_block);
3134         ee_len    = ext4_ext_get_actual_len(e    3124         ee_len    = ext4_ext_get_actual_len(ex);
3135         ee_pblock = ext4_ext_pblock(ex);         3125         ee_pblock = ext4_ext_pblock(ex);
3136                                                  3126 
3137         if (ee_len == 0)                         3127         if (ee_len == 0)
3138                 return;                       !! 3128                 return 0;
3139                                                  3129 
3140         ext4_es_insert_extent(inode, ee_block !! 3130         return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
3141                               EXTENT_STATUS_W !! 3131                                      EXTENT_STATUS_WRITTEN);
3142 }                                                3132 }
3143                                                  3133 
3144 /* FIXME!! we need to try to merge to left or    3134 /* FIXME!! we need to try to merge to left or right after zero-out  */
3145 static int ext4_ext_zeroout(struct inode *ino    3135 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3146 {                                                3136 {
3147         ext4_fsblk_t ee_pblock;                  3137         ext4_fsblk_t ee_pblock;
3148         unsigned int ee_len;                     3138         unsigned int ee_len;
3149                                                  3139 
3150         ee_len    = ext4_ext_get_actual_len(e    3140         ee_len    = ext4_ext_get_actual_len(ex);
3151         ee_pblock = ext4_ext_pblock(ex);         3141         ee_pblock = ext4_ext_pblock(ex);
3152         return ext4_issue_zeroout(inode, le32    3142         return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
3153                                   ee_len);       3143                                   ee_len);
3154 }                                                3144 }
3155                                                  3145 
3156 /*                                               3146 /*
3157  * ext4_split_extent_at() splits an extent at    3147  * ext4_split_extent_at() splits an extent at given block.
3158  *                                               3148  *
3159  * @handle: the journal handle                   3149  * @handle: the journal handle
3160  * @inode: the file inode                        3150  * @inode: the file inode
3161  * @path: the path to the extent                 3151  * @path: the path to the extent
3162  * @split: the logical block where the extent    3152  * @split: the logical block where the extent is splitted.
3163  * @split_flags: indicates if the extent coul    3153  * @split_flags: indicates if the extent could be zeroout if split fails, and
3164  *               the states(init or unwritten    3154  *               the states(init or unwritten) of new extents.
3165  * @flags: flags used to insert new extent to    3155  * @flags: flags used to insert new extent to extent tree.
3166  *                                               3156  *
3167  *                                               3157  *
3168  * Splits extent [a, b] into two extents [a,     3158  * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
3169  * of which are determined by split_flag.     !! 3159  * of which are deterimined by split_flag.
3170  *                                               3160  *
3171  * There are two cases:                          3161  * There are two cases:
3172  *  a> the extent are splitted into two exten    3162  *  a> the extent are splitted into two extent.
3173  *  b> split is not needed, and just mark the    3163  *  b> split is not needed, and just mark the extent.
3174  *                                               3164  *
3175  * Return an extent path pointer on success,  !! 3165  * return 0 on success.
3176  */                                              3166  */
3177 static struct ext4_ext_path *ext4_split_exten !! 3167 static int ext4_split_extent_at(handle_t *handle,
3178                                               !! 3168                              struct inode *inode,
3179                                               !! 3169                              struct ext4_ext_path **ppath,
3180                                               !! 3170                              ext4_lblk_t split,
3181                                               !! 3171                              int split_flag,
                                                   >> 3172                              int flags)
3182 {                                                3173 {
                                                   >> 3174         struct ext4_ext_path *path = *ppath;
3183         ext4_fsblk_t newblock;                   3175         ext4_fsblk_t newblock;
3184         ext4_lblk_t ee_block;                    3176         ext4_lblk_t ee_block;
3185         struct ext4_extent *ex, newex, orig_e    3177         struct ext4_extent *ex, newex, orig_ex, zero_ex;
3186         struct ext4_extent *ex2 = NULL;          3178         struct ext4_extent *ex2 = NULL;
3187         unsigned int ee_len, depth;              3179         unsigned int ee_len, depth;
3188         int err = 0;                             3180         int err = 0;
3189                                                  3181 
3190         BUG_ON((split_flag & (EXT4_EXT_DATA_V    3182         BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3191                (EXT4_EXT_DATA_VALID1 | EXT4_E    3183                (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3192                                                  3184 
3193         ext_debug(inode, "logical block %llu\ !! 3185         ext_debug("ext4_split_extents_at: inode %lu, logical"
                                                   >> 3186                 "block %llu\n", inode->i_ino, (unsigned long long)split);
3194                                                  3187 
3195         ext4_ext_show_leaf(inode, path);         3188         ext4_ext_show_leaf(inode, path);
3196                                                  3189 
3197         depth = ext_depth(inode);                3190         depth = ext_depth(inode);
3198         ex = path[depth].p_ext;                  3191         ex = path[depth].p_ext;
3199         ee_block = le32_to_cpu(ex->ee_block);    3192         ee_block = le32_to_cpu(ex->ee_block);
3200         ee_len = ext4_ext_get_actual_len(ex);    3193         ee_len = ext4_ext_get_actual_len(ex);
3201         newblock = split - ee_block + ext4_ex    3194         newblock = split - ee_block + ext4_ext_pblock(ex);
3202                                                  3195 
3203         BUG_ON(split < ee_block || split >= (    3196         BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3204         BUG_ON(!ext4_ext_is_unwritten(ex) &&     3197         BUG_ON(!ext4_ext_is_unwritten(ex) &&
3205                split_flag & (EXT4_EXT_MAY_ZER    3198                split_flag & (EXT4_EXT_MAY_ZEROOUT |
3206                              EXT4_EXT_MARK_UN    3199                              EXT4_EXT_MARK_UNWRIT1 |
3207                              EXT4_EXT_MARK_UN    3200                              EXT4_EXT_MARK_UNWRIT2));
3208                                                  3201 
3209         err = ext4_ext_get_access(handle, ino    3202         err = ext4_ext_get_access(handle, inode, path + depth);
3210         if (err)                                 3203         if (err)
3211                 goto out;                        3204                 goto out;
3212                                                  3205 
3213         if (split == ee_block) {                 3206         if (split == ee_block) {
3214                 /*                               3207                 /*
3215                  * case b: block @split is th    3208                  * case b: block @split is the block that the extent begins with
3216                  * then we just change the st    3209                  * then we just change the state of the extent, and splitting
3217                  * is not needed.                3210                  * is not needed.
3218                  */                              3211                  */
3219                 if (split_flag & EXT4_EXT_MAR    3212                 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3220                         ext4_ext_mark_unwritt    3213                         ext4_ext_mark_unwritten(ex);
3221                 else                             3214                 else
3222                         ext4_ext_mark_initial    3215                         ext4_ext_mark_initialized(ex);
3223                                                  3216 
3224                 if (!(flags & EXT4_GET_BLOCKS    3217                 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3225                         ext4_ext_try_to_merge    3218                         ext4_ext_try_to_merge(handle, inode, path, ex);
3226                                                  3219 
3227                 err = ext4_ext_dirty(handle,     3220                 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3228                 goto out;                        3221                 goto out;
3229         }                                        3222         }
3230                                                  3223 
3231         /* case a */                             3224         /* case a */
3232         memcpy(&orig_ex, ex, sizeof(orig_ex))    3225         memcpy(&orig_ex, ex, sizeof(orig_ex));
3233         ex->ee_len = cpu_to_le16(split - ee_b    3226         ex->ee_len = cpu_to_le16(split - ee_block);
3234         if (split_flag & EXT4_EXT_MARK_UNWRIT    3227         if (split_flag & EXT4_EXT_MARK_UNWRIT1)
3235                 ext4_ext_mark_unwritten(ex);     3228                 ext4_ext_mark_unwritten(ex);
3236                                                  3229 
3237         /*                                       3230         /*
3238          * path may lead to new leaf, not to     3231          * path may lead to new leaf, not to original leaf any more
3239          * after ext4_ext_insert_extent() ret    3232          * after ext4_ext_insert_extent() returns,
3240          */                                      3233          */
3241         err = ext4_ext_dirty(handle, inode, p    3234         err = ext4_ext_dirty(handle, inode, path + depth);
3242         if (err)                                 3235         if (err)
3243                 goto fix_extent_len;             3236                 goto fix_extent_len;
3244                                                  3237 
3245         ex2 = &newex;                            3238         ex2 = &newex;
3246         ex2->ee_block = cpu_to_le32(split);      3239         ex2->ee_block = cpu_to_le32(split);
3247         ex2->ee_len   = cpu_to_le16(ee_len -     3240         ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
3248         ext4_ext_store_pblock(ex2, newblock);    3241         ext4_ext_store_pblock(ex2, newblock);
3249         if (split_flag & EXT4_EXT_MARK_UNWRIT    3242         if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3250                 ext4_ext_mark_unwritten(ex2);    3243                 ext4_ext_mark_unwritten(ex2);
3251                                                  3244 
3252         path = ext4_ext_insert_extent(handle, !! 3245         err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
3253         if (!IS_ERR(path))                    !! 3246         if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3254                 goto out;                     << 
3255                                               << 
3256         err = PTR_ERR(path);                  << 
3257         if (err != -ENOSPC && err != -EDQUOT  << 
3258                 return path;                  << 
3259                                               << 
3260         /*                                    << 
3261          * Get a new path to try to zeroout o << 
3262          * Using EXT4_EX_NOFAIL guarantees th << 
3263          * will not return -ENOMEM, otherwise << 
3264          * retry in do_writepages(), and a WA << 
3265          * in ext4_da_update_reserve_space()  << 
3266          * ee_len causing the i_reserved_data << 
3267          */                                   << 
3268         path = ext4_find_extent(inode, ee_blo << 
3269         if (IS_ERR(path)) {                   << 
3270                 EXT4_ERROR_INODE(inode, "Fail << 
3271                                  split, PTR_E << 
3272                 return path;                  << 
3273         }                                     << 
3274         depth = ext_depth(inode);             << 
3275         ex = path[depth].p_ext;               << 
3276                                               << 
3277         if (EXT4_EXT_MAY_ZEROOUT & split_flag << 
3278                 if (split_flag & (EXT4_EXT_DA    3247                 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3279                         if (split_flag & EXT4    3248                         if (split_flag & EXT4_EXT_DATA_VALID1) {
3280                                 err = ext4_ex    3249                                 err = ext4_ext_zeroout(inode, ex2);
3281                                 zero_ex.ee_bl    3250                                 zero_ex.ee_block = ex2->ee_block;
3282                                 zero_ex.ee_le    3251                                 zero_ex.ee_len = cpu_to_le16(
3283                                                  3252                                                 ext4_ext_get_actual_len(ex2));
3284                                 ext4_ext_stor    3253                                 ext4_ext_store_pblock(&zero_ex,
3285                                                  3254                                                       ext4_ext_pblock(ex2));
3286                         } else {                 3255                         } else {
3287                                 err = ext4_ex    3256                                 err = ext4_ext_zeroout(inode, ex);
3288                                 zero_ex.ee_bl    3257                                 zero_ex.ee_block = ex->ee_block;
3289                                 zero_ex.ee_le    3258                                 zero_ex.ee_len = cpu_to_le16(
3290                                                  3259                                                 ext4_ext_get_actual_len(ex));
3291                                 ext4_ext_stor    3260                                 ext4_ext_store_pblock(&zero_ex,
3292                                                  3261                                                       ext4_ext_pblock(ex));
3293                         }                        3262                         }
3294                 } else {                         3263                 } else {
3295                         err = ext4_ext_zeroou    3264                         err = ext4_ext_zeroout(inode, &orig_ex);
3296                         zero_ex.ee_block = or    3265                         zero_ex.ee_block = orig_ex.ee_block;
3297                         zero_ex.ee_len = cpu_    3266                         zero_ex.ee_len = cpu_to_le16(
3298                                                  3267                                                 ext4_ext_get_actual_len(&orig_ex));
3299                         ext4_ext_store_pblock    3268                         ext4_ext_store_pblock(&zero_ex,
3300                                                  3269                                               ext4_ext_pblock(&orig_ex));
3301                 }                                3270                 }
3302                                                  3271 
3303                 if (!err) {                   !! 3272                 if (err)
3304                         /* update the extent  !! 3273                         goto fix_extent_len;
3305                         ex->ee_len = cpu_to_l !! 3274                 /* update the extent length and mark as initialized */
3306                         ext4_ext_try_to_merge !! 3275                 ex->ee_len = cpu_to_le16(ee_len);
3307                         err = ext4_ext_dirty( !! 3276                 ext4_ext_try_to_merge(handle, inode, path, ex);
3308                         if (!err)             !! 3277                 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3309                                 /* update ext !! 3278                 if (err)
3310                                 ext4_zeroout_ !! 3279                         goto fix_extent_len;
3311                         /* If we failed at th !! 3280 
3312                          * state the extent t !! 3281                 /* update extent status tree */
3313                          * length of the orig !! 3282                 err = ext4_zeroout_es(inode, &zero_ex);
3314                          * damage.            !! 3283 
3315                          */                   !! 3284                 goto out;
3316                         goto out;             !! 3285         } else if (err)
3317                 }                             !! 3286                 goto fix_extent_len;
3318         }                                     !! 3287 
                                                   >> 3288 out:
                                                   >> 3289         ext4_ext_show_leaf(inode, path);
                                                   >> 3290         return err;
3319                                                  3291 
3320 fix_extent_len:                                  3292 fix_extent_len:
3321         ex->ee_len = orig_ex.ee_len;             3293         ex->ee_len = orig_ex.ee_len;
3322         /*                                    << 
3323          * Ignore ext4_ext_dirty return value << 
3324          * and err is a non-zero error code.  << 
3325          */                                   << 
3326         ext4_ext_dirty(handle, inode, path +     3294         ext4_ext_dirty(handle, inode, path + path->p_depth);
3327 out:                                          !! 3295         return err;
3328         if (err) {                            << 
3329                 ext4_free_ext_path(path);     << 
3330                 path = ERR_PTR(err);          << 
3331         }                                     << 
3332         ext4_ext_show_leaf(inode, path);      << 
3333         return path;                          << 
3334 }                                                3296 }
3335                                                  3297 
3336 /*                                               3298 /*
3337  * ext4_split_extent() splits an extent and m !! 3299  * ext4_split_extents() splits an extent and mark extent which is covered
3338  * by @map as split_flags indicates              3300  * by @map as split_flags indicates
3339  *                                               3301  *
3340  * It may result in splitting the extent into    3302  * It may result in splitting the extent into multiple extents (up to three)
3341  * There are three possibilities:                3303  * There are three possibilities:
3342  *   a> There is no split required               3304  *   a> There is no split required
3343  *   b> Splits in two extents: Split is happe    3305  *   b> Splits in two extents: Split is happening at either end of the extent
3344  *   c> Splits in three extents: Somone is sp    3306  *   c> Splits in three extents: Somone is splitting in middle of the extent
3345  *                                               3307  *
3346  */                                              3308  */
3347 static struct ext4_ext_path *ext4_split_exten !! 3309 static int ext4_split_extent(handle_t *handle,
3348                                               !! 3310                               struct inode *inode,
3349                                               !! 3311                               struct ext4_ext_path **ppath,
3350                                               !! 3312                               struct ext4_map_blocks *map,
3351                                               !! 3313                               int split_flag,
3352                                               !! 3314                               int flags)
3353 {                                                3315 {
                                                   >> 3316         struct ext4_ext_path *path = *ppath;
3354         ext4_lblk_t ee_block;                    3317         ext4_lblk_t ee_block;
3355         struct ext4_extent *ex;                  3318         struct ext4_extent *ex;
3356         unsigned int ee_len, depth;              3319         unsigned int ee_len, depth;
                                                   >> 3320         int err = 0;
3357         int unwritten;                           3321         int unwritten;
3358         int split_flag1, flags1;                 3322         int split_flag1, flags1;
                                                   >> 3323         int allocated = map->m_len;
3359                                                  3324 
3360         depth = ext_depth(inode);                3325         depth = ext_depth(inode);
3361         ex = path[depth].p_ext;                  3326         ex = path[depth].p_ext;
3362         ee_block = le32_to_cpu(ex->ee_block);    3327         ee_block = le32_to_cpu(ex->ee_block);
3363         ee_len = ext4_ext_get_actual_len(ex);    3328         ee_len = ext4_ext_get_actual_len(ex);
3364         unwritten = ext4_ext_is_unwritten(ex)    3329         unwritten = ext4_ext_is_unwritten(ex);
3365                                                  3330 
3366         if (map->m_lblk + map->m_len < ee_blo    3331         if (map->m_lblk + map->m_len < ee_block + ee_len) {
3367                 split_flag1 = split_flag & EX    3332                 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3368                 flags1 = flags | EXT4_GET_BLO    3333                 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3369                 if (unwritten)                   3334                 if (unwritten)
3370                         split_flag1 |= EXT4_E    3335                         split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
3371                                        EXT4_E    3336                                        EXT4_EXT_MARK_UNWRIT2;
3372                 if (split_flag & EXT4_EXT_DAT    3337                 if (split_flag & EXT4_EXT_DATA_VALID2)
3373                         split_flag1 |= EXT4_E    3338                         split_flag1 |= EXT4_EXT_DATA_VALID1;
3374                 path = ext4_split_extent_at(h !! 3339                 err = ext4_split_extent_at(handle, inode, ppath,
3375                                 map->m_lblk +    3340                                 map->m_lblk + map->m_len, split_flag1, flags1);
3376                 if (IS_ERR(path))             !! 3341                 if (err)
3377                         return path;          !! 3342                         goto out;
3378                 /*                            !! 3343         } else {
3379                  * Update path is required be !! 3344                 allocated = ee_len - (map->m_lblk - ee_block);
3380                  * may result in split of ori !! 3345         }
3381                  */                           !! 3346         /*
3382                 path = ext4_find_extent(inode !! 3347          * Update path is required because previous ext4_split_extent_at() may
3383                 if (IS_ERR(path))             !! 3348          * result in split of original leaf or extent zeroout.
3384                         return path;          !! 3349          */
3385                 depth = ext_depth(inode);     !! 3350         path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3386                 ex = path[depth].p_ext;       !! 3351         if (IS_ERR(path))
3387                 if (!ex) {                    !! 3352                 return PTR_ERR(path);
3388                         EXT4_ERROR_INODE(inod !! 3353         depth = ext_depth(inode);
3389                                         (unsi !! 3354         ex = path[depth].p_ext;
3390                         ext4_free_ext_path(pa !! 3355         if (!ex) {
3391                         return ERR_PTR(-EFSCO !! 3356                 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3392                 }                             !! 3357                                  (unsigned long) map->m_lblk);
3393                 unwritten = ext4_ext_is_unwri !! 3358                 return -EFSCORRUPTED;
3394         }                                        3359         }
                                                   >> 3360         unwritten = ext4_ext_is_unwritten(ex);
                                                   >> 3361         split_flag1 = 0;
3395                                                  3362 
3396         if (map->m_lblk >= ee_block) {           3363         if (map->m_lblk >= ee_block) {
3397                 split_flag1 = split_flag & EX    3364                 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3398                 if (unwritten) {                 3365                 if (unwritten) {
3399                         split_flag1 |= EXT4_E    3366                         split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
3400                         split_flag1 |= split_    3367                         split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3401                                                  3368                                                      EXT4_EXT_MARK_UNWRIT2);
3402                 }                                3369                 }
3403                 path = ext4_split_extent_at(h !! 3370                 err = ext4_split_extent_at(handle, inode, ppath,
3404                                 map->m_lblk,     3371                                 map->m_lblk, split_flag1, flags);
3405                 if (IS_ERR(path))             !! 3372                 if (err)
3406                         return path;          !! 3373                         goto out;
3407         }                                        3374         }
3408                                                  3375 
3409         if (allocated) {                      << 
3410                 if (map->m_lblk + map->m_len  << 
3411                         *allocated = ee_len - << 
3412                 else                          << 
3413                         *allocated = map->m_l << 
3414         }                                     << 
3415         ext4_ext_show_leaf(inode, path);         3376         ext4_ext_show_leaf(inode, path);
3416         return path;                          !! 3377 out:
                                                   >> 3378         return err ? err : allocated;
3417 }                                                3379 }
3418                                                  3380 
3419 /*                                               3381 /*
3420  * This function is called by ext4_ext_map_bl    3382  * This function is called by ext4_ext_map_blocks() if someone tries to write
3421  * to an unwritten extent. It may result in s    3383  * to an unwritten extent. It may result in splitting the unwritten
3422  * extent into multiple extents (up to three     3384  * extent into multiple extents (up to three - one initialized and two
3423  * unwritten).                                   3385  * unwritten).
3424  * There are three possibilities:                3386  * There are three possibilities:
3425  *   a> There is no split required: Entire ex    3387  *   a> There is no split required: Entire extent should be initialized
3426  *   b> Splits in two extents: Write is happe    3388  *   b> Splits in two extents: Write is happening at either end of the extent
3427  *   c> Splits in three extents: Somone is wr    3389  *   c> Splits in three extents: Somone is writing in middle of the extent
3428  *                                               3390  *
3429  * Pre-conditions:                               3391  * Pre-conditions:
3430  *  - The extent pointed to by 'path' is unwr    3392  *  - The extent pointed to by 'path' is unwritten.
3431  *  - The extent pointed to by 'path' contain    3393  *  - The extent pointed to by 'path' contains a superset
3432  *    of the logical span [map->m_lblk, map->    3394  *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3433  *                                               3395  *
3434  * Post-conditions on success:                   3396  * Post-conditions on success:
3435  *  - the returned value is the number of blo    3397  *  - the returned value is the number of blocks beyond map->l_lblk
3436  *    that are allocated and initialized.        3398  *    that are allocated and initialized.
3437  *    It is guaranteed to be >= map->m_len.      3399  *    It is guaranteed to be >= map->m_len.
3438  */                                              3400  */
3439 static struct ext4_ext_path *                 !! 3401 static int ext4_ext_convert_to_initialized(handle_t *handle,
3440 ext4_ext_convert_to_initialized(handle_t *han !! 3402                                            struct inode *inode,
3441                         struct ext4_map_block !! 3403                                            struct ext4_map_blocks *map,
3442                         int flags, unsigned i !! 3404                                            struct ext4_ext_path **ppath,
                                                   >> 3405                                            int flags)
3443 {                                                3406 {
                                                   >> 3407         struct ext4_ext_path *path = *ppath;
3444         struct ext4_sb_info *sbi;                3408         struct ext4_sb_info *sbi;
3445         struct ext4_extent_header *eh;           3409         struct ext4_extent_header *eh;
3446         struct ext4_map_blocks split_map;        3410         struct ext4_map_blocks split_map;
3447         struct ext4_extent zero_ex1, zero_ex2    3411         struct ext4_extent zero_ex1, zero_ex2;
3448         struct ext4_extent *ex, *abut_ex;        3412         struct ext4_extent *ex, *abut_ex;
3449         ext4_lblk_t ee_block, eof_block;         3413         ext4_lblk_t ee_block, eof_block;
3450         unsigned int ee_len, depth, map_len =    3414         unsigned int ee_len, depth, map_len = map->m_len;
                                                   >> 3415         int allocated = 0, max_zeroout = 0;
3451         int err = 0;                             3416         int err = 0;
3452         int split_flag = EXT4_EXT_DATA_VALID2    3417         int split_flag = EXT4_EXT_DATA_VALID2;
3453         unsigned int max_zeroout = 0;         << 
3454                                                  3418 
3455         ext_debug(inode, "logical block %llu, !! 3419         ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3456                   (unsigned long long)map->m_ !! 3420                 "block %llu, max_blocks %u\n", inode->i_ino,
                                                   >> 3421                 (unsigned long long)map->m_lblk, map_len);
3457                                                  3422 
3458         sbi = EXT4_SB(inode->i_sb);              3423         sbi = EXT4_SB(inode->i_sb);
3459         eof_block = (EXT4_I(inode)->i_disksiz !! 3424         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3460                         >> inode->i_sb->s_blo !! 3425                 inode->i_sb->s_blocksize_bits;
3461         if (eof_block < map->m_lblk + map_len    3426         if (eof_block < map->m_lblk + map_len)
3462                 eof_block = map->m_lblk + map    3427                 eof_block = map->m_lblk + map_len;
3463                                                  3428 
3464         depth = ext_depth(inode);                3429         depth = ext_depth(inode);
3465         eh = path[depth].p_hdr;                  3430         eh = path[depth].p_hdr;
3466         ex = path[depth].p_ext;                  3431         ex = path[depth].p_ext;
3467         ee_block = le32_to_cpu(ex->ee_block);    3432         ee_block = le32_to_cpu(ex->ee_block);
3468         ee_len = ext4_ext_get_actual_len(ex);    3433         ee_len = ext4_ext_get_actual_len(ex);
3469         zero_ex1.ee_len = 0;                     3434         zero_ex1.ee_len = 0;
3470         zero_ex2.ee_len = 0;                     3435         zero_ex2.ee_len = 0;
3471                                                  3436 
3472         trace_ext4_ext_convert_to_initialized    3437         trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3473                                                  3438 
3474         /* Pre-conditions */                     3439         /* Pre-conditions */
3475         BUG_ON(!ext4_ext_is_unwritten(ex));      3440         BUG_ON(!ext4_ext_is_unwritten(ex));
3476         BUG_ON(!in_range(map->m_lblk, ee_bloc    3441         BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3477                                                  3442 
3478         /*                                       3443         /*
3479          * Attempt to transfer newly initiali    3444          * Attempt to transfer newly initialized blocks from the currently
3480          * unwritten extent to its neighbor.     3445          * unwritten extent to its neighbor. This is much cheaper
3481          * than an insertion followed by a me    3446          * than an insertion followed by a merge as those involve costly
3482          * memmove() calls. Transferring to t    3447          * memmove() calls. Transferring to the left is the common case in
3483          * steady state for workloads doing f    3448          * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
3484          * followed by append writes.            3449          * followed by append writes.
3485          *                                       3450          *
3486          * Limitations of the current logic:     3451          * Limitations of the current logic:
3487          *  - L1: we do not deal with writes     3452          *  - L1: we do not deal with writes covering the whole extent.
3488          *    This would require removing the    3453          *    This would require removing the extent if the transfer
3489          *    is possible.                       3454          *    is possible.
3490          *  - L2: we only attempt to merge wi    3455          *  - L2: we only attempt to merge with an extent stored in the
3491          *    same extent tree node.             3456          *    same extent tree node.
3492          */                                      3457          */
3493         *allocated = 0;                       << 
3494         if ((map->m_lblk == ee_block) &&         3458         if ((map->m_lblk == ee_block) &&
3495                 /* See if we can merge left *    3459                 /* See if we can merge left */
3496                 (map_len < ee_len) &&            3460                 (map_len < ee_len) &&           /*L1*/
3497                 (ex > EXT_FIRST_EXTENT(eh)))     3461                 (ex > EXT_FIRST_EXTENT(eh))) {  /*L2*/
3498                 ext4_lblk_t prev_lblk;           3462                 ext4_lblk_t prev_lblk;
3499                 ext4_fsblk_t prev_pblk, ee_pb    3463                 ext4_fsblk_t prev_pblk, ee_pblk;
3500                 unsigned int prev_len;           3464                 unsigned int prev_len;
3501                                                  3465 
3502                 abut_ex = ex - 1;                3466                 abut_ex = ex - 1;
3503                 prev_lblk = le32_to_cpu(abut_    3467                 prev_lblk = le32_to_cpu(abut_ex->ee_block);
3504                 prev_len = ext4_ext_get_actua    3468                 prev_len = ext4_ext_get_actual_len(abut_ex);
3505                 prev_pblk = ext4_ext_pblock(a    3469                 prev_pblk = ext4_ext_pblock(abut_ex);
3506                 ee_pblk = ext4_ext_pblock(ex)    3470                 ee_pblk = ext4_ext_pblock(ex);
3507                                                  3471 
3508                 /*                               3472                 /*
3509                  * A transfer of blocks from     3473                  * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3510                  * upon those conditions:        3474                  * upon those conditions:
3511                  * - C1: abut_ex is initializ    3475                  * - C1: abut_ex is initialized,
3512                  * - C2: abut_ex is logically    3476                  * - C2: abut_ex is logically abutting ex,
3513                  * - C3: abut_ex is physicall    3477                  * - C3: abut_ex is physically abutting ex,
3514                  * - C4: abut_ex can receive     3478                  * - C4: abut_ex can receive the additional blocks without
3515                  *   overflowing the (initial    3479                  *   overflowing the (initialized) length limit.
3516                  */                              3480                  */
3517                 if ((!ext4_ext_is_unwritten(a    3481                 if ((!ext4_ext_is_unwritten(abut_ex)) &&                /*C1*/
3518                         ((prev_lblk + prev_le    3482                         ((prev_lblk + prev_len) == ee_block) &&         /*C2*/
3519                         ((prev_pblk + prev_le    3483                         ((prev_pblk + prev_len) == ee_pblk) &&          /*C3*/
3520                         (prev_len < (EXT_INIT    3484                         (prev_len < (EXT_INIT_MAX_LEN - map_len))) {    /*C4*/
3521                         err = ext4_ext_get_ac    3485                         err = ext4_ext_get_access(handle, inode, path + depth);
3522                         if (err)                 3486                         if (err)
3523                                 goto errout;  !! 3487                                 goto out;
3524                                                  3488 
3525                         trace_ext4_ext_conver    3489                         trace_ext4_ext_convert_to_initialized_fastpath(inode,
3526                                 map, ex, abut    3490                                 map, ex, abut_ex);
3527                                                  3491 
3528                         /* Shift the start of    3492                         /* Shift the start of ex by 'map_len' blocks */
3529                         ex->ee_block = cpu_to    3493                         ex->ee_block = cpu_to_le32(ee_block + map_len);
3530                         ext4_ext_store_pblock    3494                         ext4_ext_store_pblock(ex, ee_pblk + map_len);
3531                         ex->ee_len = cpu_to_l    3495                         ex->ee_len = cpu_to_le16(ee_len - map_len);
3532                         ext4_ext_mark_unwritt    3496                         ext4_ext_mark_unwritten(ex); /* Restore the flag */
3533                                                  3497 
3534                         /* Extend abut_ex by     3498                         /* Extend abut_ex by 'map_len' blocks */
3535                         abut_ex->ee_len = cpu    3499                         abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
3536                                                  3500 
3537                         /* Result: number of     3501                         /* Result: number of initialized blocks past m_lblk */
3538                         *allocated = map_len; !! 3502                         allocated = map_len;
3539                 }                                3503                 }
3540         } else if (((map->m_lblk + map_len) =    3504         } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3541                    (map_len < ee_len) &&         3505                    (map_len < ee_len) &&        /*L1*/
3542                    ex < EXT_LAST_EXTENT(eh))     3506                    ex < EXT_LAST_EXTENT(eh)) {  /*L2*/
3543                 /* See if we can merge right     3507                 /* See if we can merge right */
3544                 ext4_lblk_t next_lblk;           3508                 ext4_lblk_t next_lblk;
3545                 ext4_fsblk_t next_pblk, ee_pb    3509                 ext4_fsblk_t next_pblk, ee_pblk;
3546                 unsigned int next_len;           3510                 unsigned int next_len;
3547                                                  3511 
3548                 abut_ex = ex + 1;                3512                 abut_ex = ex + 1;
3549                 next_lblk = le32_to_cpu(abut_    3513                 next_lblk = le32_to_cpu(abut_ex->ee_block);
3550                 next_len = ext4_ext_get_actua    3514                 next_len = ext4_ext_get_actual_len(abut_ex);
3551                 next_pblk = ext4_ext_pblock(a    3515                 next_pblk = ext4_ext_pblock(abut_ex);
3552                 ee_pblk = ext4_ext_pblock(ex)    3516                 ee_pblk = ext4_ext_pblock(ex);
3553                                                  3517 
3554                 /*                               3518                 /*
3555                  * A transfer of blocks from     3519                  * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3556                  * upon those conditions:        3520                  * upon those conditions:
3557                  * - C1: abut_ex is initializ    3521                  * - C1: abut_ex is initialized,
3558                  * - C2: abut_ex is logically    3522                  * - C2: abut_ex is logically abutting ex,
3559                  * - C3: abut_ex is physicall    3523                  * - C3: abut_ex is physically abutting ex,
3560                  * - C4: abut_ex can receive     3524                  * - C4: abut_ex can receive the additional blocks without
3561                  *   overflowing the (initial    3525                  *   overflowing the (initialized) length limit.
3562                  */                              3526                  */
3563                 if ((!ext4_ext_is_unwritten(a    3527                 if ((!ext4_ext_is_unwritten(abut_ex)) &&                /*C1*/
3564                     ((map->m_lblk + map_len)     3528                     ((map->m_lblk + map_len) == next_lblk) &&           /*C2*/
3565                     ((ee_pblk + ee_len) == ne    3529                     ((ee_pblk + ee_len) == next_pblk) &&                /*C3*/
3566                     (next_len < (EXT_INIT_MAX    3530                     (next_len < (EXT_INIT_MAX_LEN - map_len))) {        /*C4*/
3567                         err = ext4_ext_get_ac    3531                         err = ext4_ext_get_access(handle, inode, path + depth);
3568                         if (err)                 3532                         if (err)
3569                                 goto errout;  !! 3533                                 goto out;
3570                                                  3534 
3571                         trace_ext4_ext_conver    3535                         trace_ext4_ext_convert_to_initialized_fastpath(inode,
3572                                 map, ex, abut    3536                                 map, ex, abut_ex);
3573                                                  3537 
3574                         /* Shift the start of    3538                         /* Shift the start of abut_ex by 'map_len' blocks */
3575                         abut_ex->ee_block = c    3539                         abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3576                         ext4_ext_store_pblock    3540                         ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3577                         ex->ee_len = cpu_to_l    3541                         ex->ee_len = cpu_to_le16(ee_len - map_len);
3578                         ext4_ext_mark_unwritt    3542                         ext4_ext_mark_unwritten(ex); /* Restore the flag */
3579                                                  3543 
3580                         /* Extend abut_ex by     3544                         /* Extend abut_ex by 'map_len' blocks */
3581                         abut_ex->ee_len = cpu    3545                         abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3582                                                  3546 
3583                         /* Result: number of     3547                         /* Result: number of initialized blocks past m_lblk */
3584                         *allocated = map_len; !! 3548                         allocated = map_len;
3585                 }                                3549                 }
3586         }                                        3550         }
3587         if (*allocated) {                     !! 3551         if (allocated) {
3588                 /* Mark the block containing     3552                 /* Mark the block containing both extents as dirty */
3589                 err = ext4_ext_dirty(handle,  !! 3553                 ext4_ext_dirty(handle, inode, path + depth);
3590                                                  3554 
3591                 /* Update path to point to th    3555                 /* Update path to point to the right extent */
3592                 path[depth].p_ext = abut_ex;     3556                 path[depth].p_ext = abut_ex;
3593                 if (err)                      << 
3594                         goto errout;          << 
3595                 goto out;                        3557                 goto out;
3596         } else                                   3558         } else
3597                 *allocated = ee_len - (map->m !! 3559                 allocated = ee_len - (map->m_lblk - ee_block);
3598                                                  3560 
3599         WARN_ON(map->m_lblk < ee_block);         3561         WARN_ON(map->m_lblk < ee_block);
3600         /*                                       3562         /*
3601          * It is safe to convert extent to in    3563          * It is safe to convert extent to initialized via explicit
3602          * zeroout only if extent is fully in    3564          * zeroout only if extent is fully inside i_size or new_size.
3603          */                                      3565          */
3604         split_flag |= ee_block + ee_len <= eo    3566         split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3605                                                  3567 
3606         if (EXT4_EXT_MAY_ZEROOUT & split_flag    3568         if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3607                 max_zeroout = sbi->s_extent_m    3569                 max_zeroout = sbi->s_extent_max_zeroout_kb >>
3608                         (inode->i_sb->s_block    3570                         (inode->i_sb->s_blocksize_bits - 10);
3609                                                  3571 
                                                   >> 3572         if (ext4_encrypted_inode(inode))
                                                   >> 3573                 max_zeroout = 0;
                                                   >> 3574 
3610         /*                                       3575         /*
3611          * five cases:                           3576          * five cases:
3612          * 1. split the extent into three ext    3577          * 1. split the extent into three extents.
3613          * 2. split the extent into two exten    3578          * 2. split the extent into two extents, zeroout the head of the first
3614          *    extent.                            3579          *    extent.
3615          * 3. split the extent into two exten    3580          * 3. split the extent into two extents, zeroout the tail of the second
3616          *    extent.                            3581          *    extent.
3617          * 4. split the extent into two exten    3582          * 4. split the extent into two extents with out zeroout.
3618          * 5. no splitting needed, just possi    3583          * 5. no splitting needed, just possibly zeroout the head and / or the
3619          *    tail of the extent.                3584          *    tail of the extent.
3620          */                                      3585          */
3621         split_map.m_lblk = map->m_lblk;          3586         split_map.m_lblk = map->m_lblk;
3622         split_map.m_len = map->m_len;            3587         split_map.m_len = map->m_len;
3623                                                  3588 
3624         if (max_zeroout && (*allocated > spli !! 3589         if (max_zeroout && (allocated > split_map.m_len)) {
3625                 if (*allocated <= max_zeroout !! 3590                 if (allocated <= max_zeroout) {
3626                         /* case 3 or 5 */        3591                         /* case 3 or 5 */
3627                         zero_ex1.ee_block =      3592                         zero_ex1.ee_block =
3628                                  cpu_to_le32(    3593                                  cpu_to_le32(split_map.m_lblk +
3629                                                  3594                                              split_map.m_len);
3630                         zero_ex1.ee_len =        3595                         zero_ex1.ee_len =
3631                                 cpu_to_le16(* !! 3596                                 cpu_to_le16(allocated - split_map.m_len);
3632                         ext4_ext_store_pblock    3597                         ext4_ext_store_pblock(&zero_ex1,
3633                                 ext4_ext_pblo    3598                                 ext4_ext_pblock(ex) + split_map.m_lblk +
3634                                 split_map.m_l    3599                                 split_map.m_len - ee_block);
3635                         err = ext4_ext_zeroou    3600                         err = ext4_ext_zeroout(inode, &zero_ex1);
3636                         if (err)                 3601                         if (err)
3637                                 goto fallback !! 3602                                 goto out;
3638                         split_map.m_len = *al !! 3603                         split_map.m_len = allocated;
3639                 }                                3604                 }
3640                 if (split_map.m_lblk - ee_blo    3605                 if (split_map.m_lblk - ee_block + split_map.m_len <
3641                                                  3606                                                                 max_zeroout) {
3642                         /* case 2 or 5 */        3607                         /* case 2 or 5 */
3643                         if (split_map.m_lblk     3608                         if (split_map.m_lblk != ee_block) {
3644                                 zero_ex2.ee_b    3609                                 zero_ex2.ee_block = ex->ee_block;
3645                                 zero_ex2.ee_l    3610                                 zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
3646                                                  3611                                                         ee_block);
3647                                 ext4_ext_stor    3612                                 ext4_ext_store_pblock(&zero_ex2,
3648                                                  3613                                                       ext4_ext_pblock(ex));
3649                                 err = ext4_ex    3614                                 err = ext4_ext_zeroout(inode, &zero_ex2);
3650                                 if (err)         3615                                 if (err)
3651                                         goto  !! 3616                                         goto out;
3652                         }                        3617                         }
3653                                                  3618 
3654                         split_map.m_len += sp    3619                         split_map.m_len += split_map.m_lblk - ee_block;
3655                         split_map.m_lblk = ee    3620                         split_map.m_lblk = ee_block;
3656                         *allocated = map->m_l !! 3621                         allocated = map->m_len;
3657                 }                                3622                 }
3658         }                                        3623         }
3659                                                  3624 
3660 fallback:                                     !! 3625         err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
3661         path = ext4_split_extent(handle, inod !! 3626                                 flags);
3662                                  flags, NULL) !! 3627         if (err > 0)
3663         if (IS_ERR(path))                     !! 3628                 err = 0;
3664                 return path;                  << 
3665 out:                                             3629 out:
3666         /* If we have gotten a failure, don't    3630         /* If we have gotten a failure, don't zero out status tree */
3667         ext4_zeroout_es(inode, &zero_ex1);    !! 3631         if (!err) {
3668         ext4_zeroout_es(inode, &zero_ex2);    !! 3632                 err = ext4_zeroout_es(inode, &zero_ex1);
3669         return path;                          !! 3633                 if (!err)
3670                                               !! 3634                         err = ext4_zeroout_es(inode, &zero_ex2);
3671 errout:                                       !! 3635         }
3672         ext4_free_ext_path(path);             !! 3636         return err ? err : allocated;
3673         return ERR_PTR(err);                  << 
3674 }                                                3637 }
3675                                                  3638 
3676 /*                                               3639 /*
3677  * This function is called by ext4_ext_map_bl    3640  * This function is called by ext4_ext_map_blocks() from
3678  * ext4_get_blocks_dio_write() when DIO to wr    3641  * ext4_get_blocks_dio_write() when DIO to write
3679  * to an unwritten extent.                       3642  * to an unwritten extent.
3680  *                                               3643  *
3681  * Writing to an unwritten extent may result     3644  * Writing to an unwritten extent may result in splitting the unwritten
3682  * extent into multiple initialized/unwritten    3645  * extent into multiple initialized/unwritten extents (up to three)
3683  * There are three possibilities:                3646  * There are three possibilities:
3684  *   a> There is no split required: Entire ex    3647  *   a> There is no split required: Entire extent should be unwritten
3685  *   b> Splits in two extents: Write is happe    3648  *   b> Splits in two extents: Write is happening at either end of the extent
3686  *   c> Splits in three extents: Somone is wr    3649  *   c> Splits in three extents: Somone is writing in middle of the extent
3687  *                                               3650  *
3688  * This works the same way in the case of ini    3651  * This works the same way in the case of initialized -> unwritten conversion.
3689  *                                               3652  *
3690  * One of more index blocks maybe needed if t    3653  * One of more index blocks maybe needed if the extent tree grow after
3691  * the unwritten extent split. To prevent ENO    3654  * the unwritten extent split. To prevent ENOSPC occur at the IO
3692  * complete, we need to split the unwritten e    3655  * complete, we need to split the unwritten extent before DIO submit
3693  * the IO. The unwritten extent called at thi    3656  * the IO. The unwritten extent called at this time will be split
3694  * into three unwritten extent(at most). Afte    3657  * into three unwritten extent(at most). After IO complete, the part
3695  * being filled will be convert to initialize    3658  * being filled will be convert to initialized by the end_io callback function
3696  * via ext4_convert_unwritten_extents().         3659  * via ext4_convert_unwritten_extents().
3697  *                                               3660  *
3698  * The size of unwritten extent to be written !! 3661  * Returns the size of unwritten extent to be written on success.
3699  * allocated pointer. Return an extent path p << 
3700  * pointer on failure.                        << 
3701  */                                              3662  */
3702 static struct ext4_ext_path *ext4_split_conve !! 3663 static int ext4_split_convert_extents(handle_t *handle,
3703                                         struc    3664                                         struct inode *inode,
3704                                         struc    3665                                         struct ext4_map_blocks *map,
3705                                         struc !! 3666                                         struct ext4_ext_path **ppath,
3706                                         int f !! 3667                                         int flags)
3707 {                                                3668 {
                                                   >> 3669         struct ext4_ext_path *path = *ppath;
3708         ext4_lblk_t eof_block;                   3670         ext4_lblk_t eof_block;
3709         ext4_lblk_t ee_block;                    3671         ext4_lblk_t ee_block;
3710         struct ext4_extent *ex;                  3672         struct ext4_extent *ex;
3711         unsigned int ee_len;                     3673         unsigned int ee_len;
3712         int split_flag = 0, depth;               3674         int split_flag = 0, depth;
3713                                                  3675 
3714         ext_debug(inode, "logical block %llu, !! 3676         ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n",
                                                   >> 3677                   __func__, inode->i_ino,
3715                   (unsigned long long)map->m_    3678                   (unsigned long long)map->m_lblk, map->m_len);
3716                                                  3679 
3717         eof_block = (EXT4_I(inode)->i_disksiz !! 3680         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3718                         >> inode->i_sb->s_blo !! 3681                 inode->i_sb->s_blocksize_bits;
3719         if (eof_block < map->m_lblk + map->m_    3682         if (eof_block < map->m_lblk + map->m_len)
3720                 eof_block = map->m_lblk + map    3683                 eof_block = map->m_lblk + map->m_len;
3721         /*                                       3684         /*
3722          * It is safe to convert extent to in    3685          * It is safe to convert extent to initialized via explicit
3723          * zeroout only if extent is fully in !! 3686          * zeroout only if extent is fully insde i_size or new_size.
3724          */                                      3687          */
3725         depth = ext_depth(inode);                3688         depth = ext_depth(inode);
3726         ex = path[depth].p_ext;                  3689         ex = path[depth].p_ext;
3727         ee_block = le32_to_cpu(ex->ee_block);    3690         ee_block = le32_to_cpu(ex->ee_block);
3728         ee_len = ext4_ext_get_actual_len(ex);    3691         ee_len = ext4_ext_get_actual_len(ex);
3729                                                  3692 
3730         /* Convert to unwritten */               3693         /* Convert to unwritten */
3731         if (flags & EXT4_GET_BLOCKS_CONVERT_U    3694         if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
3732                 split_flag |= EXT4_EXT_DATA_V    3695                 split_flag |= EXT4_EXT_DATA_VALID1;
3733         /* Convert to initialized */             3696         /* Convert to initialized */
3734         } else if (flags & EXT4_GET_BLOCKS_CO    3697         } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
3735                 split_flag |= ee_block + ee_l    3698                 split_flag |= ee_block + ee_len <= eof_block ?
3736                               EXT4_EXT_MAY_ZE    3699                               EXT4_EXT_MAY_ZEROOUT : 0;
3737                 split_flag |= (EXT4_EXT_MARK_    3700                 split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
3738         }                                        3701         }
3739         flags |= EXT4_GET_BLOCKS_PRE_IO;         3702         flags |= EXT4_GET_BLOCKS_PRE_IO;
3740         return ext4_split_extent(handle, inod !! 3703         return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
3741                                  allocated);  << 
3742 }                                                3704 }
3743                                                  3705 
3744 static struct ext4_ext_path *                 !! 3706 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3745 ext4_convert_unwritten_extents_endio(handle_t !! 3707                                                 struct inode *inode,
3746                                      struct e !! 3708                                                 struct ext4_map_blocks *map,
3747                                      struct e !! 3709                                                 struct ext4_ext_path **ppath)
3748 {                                                3710 {
                                                   >> 3711         struct ext4_ext_path *path = *ppath;
3749         struct ext4_extent *ex;                  3712         struct ext4_extent *ex;
3750         ext4_lblk_t ee_block;                    3713         ext4_lblk_t ee_block;
3751         unsigned int ee_len;                     3714         unsigned int ee_len;
3752         int depth;                               3715         int depth;
3753         int err = 0;                             3716         int err = 0;
3754                                                  3717 
3755         depth = ext_depth(inode);                3718         depth = ext_depth(inode);
3756         ex = path[depth].p_ext;                  3719         ex = path[depth].p_ext;
3757         ee_block = le32_to_cpu(ex->ee_block);    3720         ee_block = le32_to_cpu(ex->ee_block);
3758         ee_len = ext4_ext_get_actual_len(ex);    3721         ee_len = ext4_ext_get_actual_len(ex);
3759                                                  3722 
3760         ext_debug(inode, "logical block %llu, !! 3723         ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
                                                   >> 3724                 "block %llu, max_blocks %u\n", inode->i_ino,
3761                   (unsigned long long)ee_bloc    3725                   (unsigned long long)ee_block, ee_len);
3762                                                  3726 
3763         /* If extent is larger than requested    3727         /* If extent is larger than requested it is a clear sign that we still
3764          * have some extent state machine iss    3728          * have some extent state machine issues left. So extent_split is still
3765          * required.                             3729          * required.
3766          * TODO: Once all related issues will    3730          * TODO: Once all related issues will be fixed this situation should be
3767          * illegal.                              3731          * illegal.
3768          */                                      3732          */
3769         if (ee_block != map->m_lblk || ee_len    3733         if (ee_block != map->m_lblk || ee_len > map->m_len) {
3770 #ifdef CONFIG_EXT4_DEBUG                      !! 3734 #ifdef EXT4_DEBUG
3771                 ext4_warning(inode->i_sb, "In !! 3735                 ext4_warning("Inode (%ld) finished: extent logical block %llu,"
3772                              " len %u; IO log    3736                              " len %u; IO logical block %llu, len %u",
3773                              inode->i_ino, (u    3737                              inode->i_ino, (unsigned long long)ee_block, ee_len,
3774                              (unsigned long l    3738                              (unsigned long long)map->m_lblk, map->m_len);
3775 #endif                                           3739 #endif
3776                 path = ext4_split_convert_ext !! 3740                 err = ext4_split_convert_extents(handle, inode, map, ppath,
3777                                               !! 3741                                                  EXT4_GET_BLOCKS_CONVERT);
3778                 if (IS_ERR(path))             !! 3742                 if (err < 0)
3779                         return path;          !! 3743                         return err;
3780                                               !! 3744                 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3781                 path = ext4_find_extent(inode << 
3782                 if (IS_ERR(path))                3745                 if (IS_ERR(path))
3783                         return path;          !! 3746                         return PTR_ERR(path);
3784                 depth = ext_depth(inode);        3747                 depth = ext_depth(inode);
3785                 ex = path[depth].p_ext;          3748                 ex = path[depth].p_ext;
3786         }                                        3749         }
3787                                                  3750 
3788         err = ext4_ext_get_access(handle, ino    3751         err = ext4_ext_get_access(handle, inode, path + depth);
3789         if (err)                                 3752         if (err)
3790                 goto errout;                  !! 3753                 goto out;
3791         /* first mark the extent as initializ    3754         /* first mark the extent as initialized */
3792         ext4_ext_mark_initialized(ex);           3755         ext4_ext_mark_initialized(ex);
3793                                                  3756 
3794         /* note: ext4_ext_correct_indexes() i    3757         /* note: ext4_ext_correct_indexes() isn't needed here because
3795          * borders are not changed               3758          * borders are not changed
3796          */                                      3759          */
3797         ext4_ext_try_to_merge(handle, inode,     3760         ext4_ext_try_to_merge(handle, inode, path, ex);
3798                                                  3761 
3799         /* Mark modified extent as dirty */      3762         /* Mark modified extent as dirty */
3800         err = ext4_ext_dirty(handle, inode, p    3763         err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3801         if (err)                              !! 3764 out:
3802                 goto errout;                  << 
3803                                               << 
3804         ext4_ext_show_leaf(inode, path);         3765         ext4_ext_show_leaf(inode, path);
3805         return path;                          !! 3766         return err;
                                                   >> 3767 }
3806                                                  3768 
3807 errout:                                       !! 3769 /*
3808         ext4_free_ext_path(path);             !! 3770  * Handle EOFBLOCKS_FL flag, clearing it if necessary
3809         return ERR_PTR(err);                  !! 3771  */
                                                   >> 3772 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
                                                   >> 3773                               ext4_lblk_t lblk,
                                                   >> 3774                               struct ext4_ext_path *path,
                                                   >> 3775                               unsigned int len)
                                                   >> 3776 {
                                                   >> 3777         int i, depth;
                                                   >> 3778         struct ext4_extent_header *eh;
                                                   >> 3779         struct ext4_extent *last_ex;
                                                   >> 3780 
                                                   >> 3781         if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
                                                   >> 3782                 return 0;
                                                   >> 3783 
                                                   >> 3784         depth = ext_depth(inode);
                                                   >> 3785         eh = path[depth].p_hdr;
                                                   >> 3786 
                                                   >> 3787         /*
                                                   >> 3788          * We're going to remove EOFBLOCKS_FL entirely in future so we
                                                   >> 3789          * do not care for this case anymore. Simply remove the flag
                                                   >> 3790          * if there are no extents.
                                                   >> 3791          */
                                                   >> 3792         if (unlikely(!eh->eh_entries))
                                                   >> 3793                 goto out;
                                                   >> 3794         last_ex = EXT_LAST_EXTENT(eh);
                                                   >> 3795         /*
                                                   >> 3796          * We should clear the EOFBLOCKS_FL flag if we are writing the
                                                   >> 3797          * last block in the last extent in the file.  We test this by
                                                   >> 3798          * first checking to see if the caller to
                                                   >> 3799          * ext4_ext_get_blocks() was interested in the last block (or
                                                   >> 3800          * a block beyond the last block) in the current extent.  If
                                                   >> 3801          * this turns out to be false, we can bail out from this
                                                   >> 3802          * function immediately.
                                                   >> 3803          */
                                                   >> 3804         if (lblk + len < le32_to_cpu(last_ex->ee_block) +
                                                   >> 3805             ext4_ext_get_actual_len(last_ex))
                                                   >> 3806                 return 0;
                                                   >> 3807         /*
                                                   >> 3808          * If the caller does appear to be planning to write at or
                                                   >> 3809          * beyond the end of the current extent, we then test to see
                                                   >> 3810          * if the current extent is the last extent in the file, by
                                                   >> 3811          * checking to make sure it was reached via the rightmost node
                                                   >> 3812          * at each level of the tree.
                                                   >> 3813          */
                                                   >> 3814         for (i = depth-1; i >= 0; i--)
                                                   >> 3815                 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
                                                   >> 3816                         return 0;
                                                   >> 3817 out:
                                                   >> 3818         ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
                                                   >> 3819         return ext4_mark_inode_dirty(handle, inode);
                                                   >> 3820 }
                                                   >> 3821 
                                                   >> 3822 /**
                                                   >> 3823  * ext4_find_delalloc_range: find delayed allocated block in the given range.
                                                   >> 3824  *
                                                   >> 3825  * Return 1 if there is a delalloc block in the range, otherwise 0.
                                                   >> 3826  */
                                                   >> 3827 int ext4_find_delalloc_range(struct inode *inode,
                                                   >> 3828                              ext4_lblk_t lblk_start,
                                                   >> 3829                              ext4_lblk_t lblk_end)
                                                   >> 3830 {
                                                   >> 3831         struct extent_status es;
                                                   >> 3832 
                                                   >> 3833         ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
                                                   >> 3834         if (es.es_len == 0)
                                                   >> 3835                 return 0; /* there is no delay extent in this tree */
                                                   >> 3836         else if (es.es_lblk <= lblk_start &&
                                                   >> 3837                  lblk_start < es.es_lblk + es.es_len)
                                                   >> 3838                 return 1;
                                                   >> 3839         else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
                                                   >> 3840                 return 1;
                                                   >> 3841         else
                                                   >> 3842                 return 0;
                                                   >> 3843 }
                                                   >> 3844 
                                                   >> 3845 int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
                                                   >> 3846 {
                                                   >> 3847         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
                                                   >> 3848         ext4_lblk_t lblk_start, lblk_end;
                                                   >> 3849         lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
                                                   >> 3850         lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
                                                   >> 3851 
                                                   >> 3852         return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
                                                   >> 3853 }
                                                   >> 3854 
                                                   >> 3855 /**
                                                   >> 3856  * Determines how many complete clusters (out of those specified by the 'map')
                                                   >> 3857  * are under delalloc and were reserved quota for.
                                                   >> 3858  * This function is called when we are writing out the blocks that were
                                                   >> 3859  * originally written with their allocation delayed, but then the space was
                                                   >> 3860  * allocated using fallocate() before the delayed allocation could be resolved.
                                                   >> 3861  * The cases to look for are:
                                                   >> 3862  * ('=' indicated delayed allocated blocks
                                                   >> 3863  *  '-' indicates non-delayed allocated blocks)
                                                   >> 3864  * (a) partial clusters towards beginning and/or end outside of allocated range
                                                   >> 3865  *     are not delalloc'ed.
                                                   >> 3866  *      Ex:
                                                   >> 3867  *      |----c---=|====c====|====c====|===-c----|
                                                   >> 3868  *               |++++++ allocated ++++++|
                                                   >> 3869  *      ==> 4 complete clusters in above example
                                                   >> 3870  *
                                                   >> 3871  * (b) partial cluster (outside of allocated range) towards either end is
                                                   >> 3872  *     marked for delayed allocation. In this case, we will exclude that
                                                   >> 3873  *     cluster.
                                                   >> 3874  *      Ex:
                                                   >> 3875  *      |----====c========|========c========|
                                                   >> 3876  *           |++++++ allocated ++++++|
                                                   >> 3877  *      ==> 1 complete clusters in above example
                                                   >> 3878  *
                                                   >> 3879  *      Ex:
                                                   >> 3880  *      |================c================|
                                                   >> 3881  *            |++++++ allocated ++++++|
                                                   >> 3882  *      ==> 0 complete clusters in above example
                                                   >> 3883  *
                                                   >> 3884  * The ext4_da_update_reserve_space will be called only if we
                                                   >> 3885  * determine here that there were some "entire" clusters that span
                                                   >> 3886  * this 'allocated' range.
                                                   >> 3887  * In the non-bigalloc case, this function will just end up returning num_blks
                                                   >> 3888  * without ever calling ext4_find_delalloc_range.
                                                   >> 3889  */
                                                   >> 3890 static unsigned int
                                                   >> 3891 get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
                                                   >> 3892                            unsigned int num_blks)
                                                   >> 3893 {
                                                   >> 3894         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
                                                   >> 3895         ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
                                                   >> 3896         ext4_lblk_t lblk_from, lblk_to, c_offset;
                                                   >> 3897         unsigned int allocated_clusters = 0;
                                                   >> 3898 
                                                   >> 3899         alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
                                                   >> 3900         alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
                                                   >> 3901 
                                                   >> 3902         /* max possible clusters for this allocation */
                                                   >> 3903         allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
                                                   >> 3904 
                                                   >> 3905         trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
                                                   >> 3906 
                                                   >> 3907         /* Check towards left side */
                                                   >> 3908         c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
                                                   >> 3909         if (c_offset) {
                                                   >> 3910                 lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
                                                   >> 3911                 lblk_to = lblk_from + c_offset - 1;
                                                   >> 3912 
                                                   >> 3913                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
                                                   >> 3914                         allocated_clusters--;
                                                   >> 3915         }
                                                   >> 3916 
                                                   >> 3917         /* Now check towards right. */
                                                   >> 3918         c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
                                                   >> 3919         if (allocated_clusters && c_offset) {
                                                   >> 3920                 lblk_from = lblk_start + num_blks;
                                                   >> 3921                 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
                                                   >> 3922 
                                                   >> 3923                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
                                                   >> 3924                         allocated_clusters--;
                                                   >> 3925         }
                                                   >> 3926 
                                                   >> 3927         return allocated_clusters;
3810 }                                                3928 }
3811                                                  3929 
3812 static struct ext4_ext_path *                 !! 3930 static int
3813 convert_initialized_extent(handle_t *handle,     3931 convert_initialized_extent(handle_t *handle, struct inode *inode,
3814                            struct ext4_map_bl    3932                            struct ext4_map_blocks *map,
3815                            struct ext4_ext_pa !! 3933                            struct ext4_ext_path **ppath,
3816                            unsigned int *allo !! 3934                            unsigned int allocated)
3817 {                                                3935 {
                                                   >> 3936         struct ext4_ext_path *path = *ppath;
3818         struct ext4_extent *ex;                  3937         struct ext4_extent *ex;
3819         ext4_lblk_t ee_block;                    3938         ext4_lblk_t ee_block;
3820         unsigned int ee_len;                     3939         unsigned int ee_len;
3821         int depth;                               3940         int depth;
3822         int err = 0;                             3941         int err = 0;
3823                                                  3942 
3824         /*                                       3943         /*
3825          * Make sure that the extent is no bi    3944          * Make sure that the extent is no bigger than we support with
3826          * unwritten extent                      3945          * unwritten extent
3827          */                                      3946          */
3828         if (map->m_len > EXT_UNWRITTEN_MAX_LE    3947         if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
3829                 map->m_len = EXT_UNWRITTEN_MA    3948                 map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
3830                                                  3949 
3831         depth = ext_depth(inode);                3950         depth = ext_depth(inode);
3832         ex = path[depth].p_ext;                  3951         ex = path[depth].p_ext;
3833         ee_block = le32_to_cpu(ex->ee_block);    3952         ee_block = le32_to_cpu(ex->ee_block);
3834         ee_len = ext4_ext_get_actual_len(ex);    3953         ee_len = ext4_ext_get_actual_len(ex);
3835                                                  3954 
3836         ext_debug(inode, "logical block %llu, !! 3955         ext_debug("%s: inode %lu, logical"
                                                   >> 3956                 "block %llu, max_blocks %u\n", __func__, inode->i_ino,
3837                   (unsigned long long)ee_bloc    3957                   (unsigned long long)ee_block, ee_len);
3838                                                  3958 
3839         if (ee_block != map->m_lblk || ee_len    3959         if (ee_block != map->m_lblk || ee_len > map->m_len) {
3840                 path = ext4_split_convert_ext !! 3960                 err = ext4_split_convert_extents(handle, inode, map, ppath,
3841                                 EXT4_GET_BLOC !! 3961                                 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
3842                 if (IS_ERR(path))             !! 3962                 if (err < 0)
3843                         return path;          !! 3963                         return err;
3844                                               !! 3964                 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3845                 path = ext4_find_extent(inode << 
3846                 if (IS_ERR(path))                3965                 if (IS_ERR(path))
3847                         return path;          !! 3966                         return PTR_ERR(path);
3848                 depth = ext_depth(inode);        3967                 depth = ext_depth(inode);
3849                 ex = path[depth].p_ext;          3968                 ex = path[depth].p_ext;
3850                 if (!ex) {                       3969                 if (!ex) {
3851                         EXT4_ERROR_INODE(inod    3970                         EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3852                                          (uns    3971                                          (unsigned long) map->m_lblk);
3853                         err = -EFSCORRUPTED;  !! 3972                         return -EFSCORRUPTED;
3854                         goto errout;          << 
3855                 }                                3973                 }
3856         }                                        3974         }
3857                                                  3975 
3858         err = ext4_ext_get_access(handle, ino    3976         err = ext4_ext_get_access(handle, inode, path + depth);
3859         if (err)                                 3977         if (err)
3860                 goto errout;                  !! 3978                 return err;
3861         /* first mark the extent as unwritten    3979         /* first mark the extent as unwritten */
3862         ext4_ext_mark_unwritten(ex);             3980         ext4_ext_mark_unwritten(ex);
3863                                                  3981 
3864         /* note: ext4_ext_correct_indexes() i    3982         /* note: ext4_ext_correct_indexes() isn't needed here because
3865          * borders are not changed               3983          * borders are not changed
3866          */                                      3984          */
3867         ext4_ext_try_to_merge(handle, inode,     3985         ext4_ext_try_to_merge(handle, inode, path, ex);
3868                                                  3986 
3869         /* Mark modified extent as dirty */      3987         /* Mark modified extent as dirty */
3870         err = ext4_ext_dirty(handle, inode, p    3988         err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3871         if (err)                                 3989         if (err)
3872                 goto errout;                  !! 3990                 return err;
3873         ext4_ext_show_leaf(inode, path);         3991         ext4_ext_show_leaf(inode, path);
3874                                                  3992 
3875         ext4_update_inode_fsync_trans(handle,    3993         ext4_update_inode_fsync_trans(handle, inode, 1);
3876                                               !! 3994         err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
                                                   >> 3995         if (err)
                                                   >> 3996                 return err;
3877         map->m_flags |= EXT4_MAP_UNWRITTEN;      3997         map->m_flags |= EXT4_MAP_UNWRITTEN;
3878         if (*allocated > map->m_len)          !! 3998         if (allocated > map->m_len)
3879                 *allocated = map->m_len;      !! 3999                 allocated = map->m_len;
3880         map->m_len = *allocated;              !! 4000         map->m_len = allocated;
3881         return path;                          !! 4001         return allocated;
3882                                               << 
3883 errout:                                       << 
3884         ext4_free_ext_path(path);             << 
3885         return ERR_PTR(err);                  << 
3886 }                                                4002 }
3887                                                  4003 
3888 static struct ext4_ext_path *                 !! 4004 static int
3889 ext4_ext_handle_unwritten_extents(handle_t *h    4005 ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
3890                         struct ext4_map_block    4006                         struct ext4_map_blocks *map,
3891                         struct ext4_ext_path  !! 4007                         struct ext4_ext_path **ppath, int flags,
3892                         unsigned int *allocat !! 4008                         unsigned int allocated, ext4_fsblk_t newblock)
3893 {                                                4009 {
                                                   >> 4010         struct ext4_ext_path *path = *ppath;
                                                   >> 4011         int ret = 0;
3894         int err = 0;                             4012         int err = 0;
3895                                                  4013 
3896         ext_debug(inode, "logical block %llu, !! 4014         ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical "
3897                   (unsigned long long)map->m_ !! 4015                   "block %llu, max_blocks %u, flags %x, allocated %u\n",
3898                   *allocated);                !! 4016                   inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
                                                   >> 4017                   flags, allocated);
3899         ext4_ext_show_leaf(inode, path);         4018         ext4_ext_show_leaf(inode, path);
3900                                                  4019 
3901         /*                                       4020         /*
3902          * When writing into unwritten space,    4021          * When writing into unwritten space, we should not fail to
3903          * allocate metadata blocks for the n    4022          * allocate metadata blocks for the new extent block if needed.
3904          */                                      4023          */
3905         flags |= EXT4_GET_BLOCKS_METADATA_NOF    4024         flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
3906                                                  4025 
3907         trace_ext4_ext_handle_unwritten_exten    4026         trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
3908                                               !! 4027                                                     allocated, newblock);
3909                                                  4028 
3910         /* get_block() before submitting IO,  !! 4029         /* get_block() before submit the IO, split the extent */
3911         if (flags & EXT4_GET_BLOCKS_PRE_IO) {    4030         if (flags & EXT4_GET_BLOCKS_PRE_IO) {
3912                 path = ext4_split_convert_ext !! 4031                 ret = ext4_split_convert_extents(handle, inode, map, ppath,
3913                                 flags | EXT4_ !! 4032                                          flags | EXT4_GET_BLOCKS_CONVERT);
3914                 if (IS_ERR(path))             !! 4033                 if (ret <= 0)
3915                         return path;          !! 4034                         goto out;
3916                 /*                            << 
3917                  * shouldn't get a 0 allocate << 
3918                  * m_len is 0 (bug) or extent << 
3919                  */                           << 
3920                 if (unlikely(*allocated == 0) << 
3921                         EXT4_ERROR_INODE(inod << 
3922                                          "une << 
3923                                          map- << 
3924                         err = -EFSCORRUPTED;  << 
3925                         goto errout;          << 
3926                 }                             << 
3927                 map->m_flags |= EXT4_MAP_UNWR    4035                 map->m_flags |= EXT4_MAP_UNWRITTEN;
3928                 goto out;                        4036                 goto out;
3929         }                                        4037         }
3930         /* IO end_io complete, convert the fi    4038         /* IO end_io complete, convert the filled extent to written */
3931         if (flags & EXT4_GET_BLOCKS_CONVERT)     4039         if (flags & EXT4_GET_BLOCKS_CONVERT) {
3932                 path = ext4_convert_unwritten !! 4040                 if (flags & EXT4_GET_BLOCKS_ZERO) {
3933                                               !! 4041                         if (allocated > map->m_len)
3934                 if (IS_ERR(path))             !! 4042                                 allocated = map->m_len;
3935                         return path;          !! 4043                         err = ext4_issue_zeroout(inode, map->m_lblk, newblock,
3936                 ext4_update_inode_fsync_trans !! 4044                                                  allocated);
3937                 goto map_out;                 !! 4045                         if (err < 0)
                                                   >> 4046                                 goto out2;
                                                   >> 4047                 }
                                                   >> 4048                 ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
                                                   >> 4049                                                            ppath);
                                                   >> 4050                 if (ret >= 0) {
                                                   >> 4051                         ext4_update_inode_fsync_trans(handle, inode, 1);
                                                   >> 4052                         err = check_eofblocks_fl(handle, inode, map->m_lblk,
                                                   >> 4053                                                  path, map->m_len);
                                                   >> 4054                 } else
                                                   >> 4055                         err = ret;
                                                   >> 4056                 map->m_flags |= EXT4_MAP_MAPPED;
                                                   >> 4057                 map->m_pblk = newblock;
                                                   >> 4058                 if (allocated > map->m_len)
                                                   >> 4059                         allocated = map->m_len;
                                                   >> 4060                 map->m_len = allocated;
                                                   >> 4061                 goto out2;
3938         }                                        4062         }
3939         /* buffered IO cases */               !! 4063         /* buffered IO case */
3940         /*                                       4064         /*
3941          * repeat fallocate creation request     4065          * repeat fallocate creation request
3942          * we already have an unwritten exten    4066          * we already have an unwritten extent
3943          */                                      4067          */
3944         if (flags & EXT4_GET_BLOCKS_UNWRIT_EX    4068         if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
3945                 map->m_flags |= EXT4_MAP_UNWR    4069                 map->m_flags |= EXT4_MAP_UNWRITTEN;
3946                 goto map_out;                    4070                 goto map_out;
3947         }                                        4071         }
3948                                                  4072 
3949         /* buffered READ or buffered write_be    4073         /* buffered READ or buffered write_begin() lookup */
3950         if ((flags & EXT4_GET_BLOCKS_CREATE)     4074         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3951                 /*                               4075                 /*
3952                  * We have blocks reserved al    4076                  * We have blocks reserved already.  We
3953                  * return allocated blocks so    4077                  * return allocated blocks so that delalloc
3954                  * won't do block reservation    4078                  * won't do block reservation for us.  But
3955                  * the buffer head will be un    4079                  * the buffer head will be unmapped so that
3956                  * a read from the block retu    4080                  * a read from the block returns 0s.
3957                  */                              4081                  */
3958                 map->m_flags |= EXT4_MAP_UNWR    4082                 map->m_flags |= EXT4_MAP_UNWRITTEN;
3959                 goto out1;                       4083                 goto out1;
3960         }                                        4084         }
3961                                                  4085 
                                                   >> 4086         /* buffered write, writepage time, convert*/
                                                   >> 4087         ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
                                                   >> 4088         if (ret >= 0)
                                                   >> 4089                 ext4_update_inode_fsync_trans(handle, inode, 1);
                                                   >> 4090 out:
                                                   >> 4091         if (ret <= 0) {
                                                   >> 4092                 err = ret;
                                                   >> 4093                 goto out2;
                                                   >> 4094         } else
                                                   >> 4095                 allocated = ret;
                                                   >> 4096         map->m_flags |= EXT4_MAP_NEW;
3962         /*                                       4097         /*
3963          * Default case when (flags & EXT4_GE !! 4098          * if we allocated more blocks than requested
3964          * For buffered writes, at writepage  !! 4099          * we need to make sure we unmap the extra block
3965          * discovered unwritten extent to wri !! 4100          * allocated. The actual needed block will get
3966          */                                   !! 4101          * unmapped later when we find the buffer_head marked
3967         path = ext4_ext_convert_to_initialize !! 4102          * new.
3968                                               !! 4103          */
3969         if (IS_ERR(path))                     !! 4104         if (allocated > map->m_len) {
3970                 return path;                  !! 4105                 clean_bdev_aliases(inode->i_sb->s_bdev, newblock + map->m_len,
3971         ext4_update_inode_fsync_trans(handle, !! 4106                                    allocated - map->m_len);
                                                   >> 4107                 allocated = map->m_len;
                                                   >> 4108         }
                                                   >> 4109         map->m_len = allocated;
                                                   >> 4110 
3972         /*                                       4111         /*
3973          * shouldn't get a 0 allocated when c !! 4112          * If we have done fallocate with the offset that is already
3974          * unless m_len is 0 (bug) or extent  !! 4113          * delayed allocated, we would have block reservation
3975          */                                   !! 4114          * and quota reservation done in the delayed write path.
3976         if (unlikely(*allocated == 0)) {      !! 4115          * But fallocate would have already updated quota and block
3977                 EXT4_ERROR_INODE(inode, "unex !! 4116          * count for this offset. So cancel these reservation
3978                                  map->m_len); !! 4117          */
3979                 err = -EFSCORRUPTED;          !! 4118         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
3980                 goto errout;                  !! 4119                 unsigned int reserved_clusters;
                                                   >> 4120                 reserved_clusters = get_reserved_cluster_alloc(inode,
                                                   >> 4121                                 map->m_lblk, map->m_len);
                                                   >> 4122                 if (reserved_clusters)
                                                   >> 4123                         ext4_da_update_reserve_space(inode,
                                                   >> 4124                                                      reserved_clusters,
                                                   >> 4125                                                      0);
3981         }                                        4126         }
3982                                                  4127 
3983 out:                                          << 
3984         map->m_flags |= EXT4_MAP_NEW;         << 
3985 map_out:                                         4128 map_out:
3986         map->m_flags |= EXT4_MAP_MAPPED;         4129         map->m_flags |= EXT4_MAP_MAPPED;
                                                   >> 4130         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
                                                   >> 4131                 err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
                                                   >> 4132                                          map->m_len);
                                                   >> 4133                 if (err < 0)
                                                   >> 4134                         goto out2;
                                                   >> 4135         }
3987 out1:                                            4136 out1:
3988         map->m_pblk = newblock;               !! 4137         if (allocated > map->m_len)
3989         if (*allocated > map->m_len)          !! 4138                 allocated = map->m_len;
3990                 *allocated = map->m_len;      << 
3991         map->m_len = *allocated;              << 
3992         ext4_ext_show_leaf(inode, path);         4139         ext4_ext_show_leaf(inode, path);
3993         return path;                          !! 4140         map->m_pblk = newblock;
3994                                               !! 4141         map->m_len = allocated;
3995 errout:                                       !! 4142 out2:
3996         ext4_free_ext_path(path);             !! 4143         return err ? err : allocated;
3997         return ERR_PTR(err);                  << 
3998 }                                                4144 }
3999                                                  4145 
4000 /*                                               4146 /*
4001  * get_implied_cluster_alloc - check to see i    4147  * get_implied_cluster_alloc - check to see if the requested
4002  * allocation (in the map structure) overlaps    4148  * allocation (in the map structure) overlaps with a cluster already
4003  * allocated in an extent.                       4149  * allocated in an extent.
4004  *      @sb     The filesystem superblock str    4150  *      @sb     The filesystem superblock structure
4005  *      @map    The requested lblk->pblk mapp    4151  *      @map    The requested lblk->pblk mapping
4006  *      @ex     The extent structure which mi    4152  *      @ex     The extent structure which might contain an implied
4007  *                      cluster allocation       4153  *                      cluster allocation
4008  *                                               4154  *
4009  * This function is called by ext4_ext_map_bl    4155  * This function is called by ext4_ext_map_blocks() after we failed to
4010  * find blocks that were already in the inode    4156  * find blocks that were already in the inode's extent tree.  Hence,
4011  * we know that the beginning of the requeste    4157  * we know that the beginning of the requested region cannot overlap
4012  * the extent from the inode's extent tree.      4158  * the extent from the inode's extent tree.  There are three cases we
4013  * want to catch.  The first is this case:       4159  * want to catch.  The first is this case:
4014  *                                               4160  *
4015  *               |--- cluster # N--|             4161  *               |--- cluster # N--|
4016  *    |--- extent ---|  |---- requested regio    4162  *    |--- extent ---|  |---- requested region ---|
4017  *                      |==========|             4163  *                      |==========|
4018  *                                               4164  *
4019  * The second case that we need to test for i    4165  * The second case that we need to test for is this one:
4020  *                                               4166  *
4021  *   |--------- cluster # N ----------------|    4167  *   |--------- cluster # N ----------------|
4022  *         |--- requested region --|   |-----    4168  *         |--- requested region --|   |------- extent ----|
4023  *         |=======================|             4169  *         |=======================|
4024  *                                               4170  *
4025  * The third case is when the requested regio    4171  * The third case is when the requested region lies between two extents
4026  * within the same cluster:                      4172  * within the same cluster:
4027  *          |------------- cluster # N-------    4173  *          |------------- cluster # N-------------|
4028  * |----- ex -----|                  |---- ex    4174  * |----- ex -----|                  |---- ex_right ----|
4029  *                  |------ requested region     4175  *                  |------ requested region ------|
4030  *                  |================|           4176  *                  |================|
4031  *                                               4177  *
4032  * In each of the above cases, we need to set    4178  * In each of the above cases, we need to set the map->m_pblk and
4033  * map->m_len so it corresponds to the return    4179  * map->m_len so it corresponds to the return the extent labelled as
4034  * "|====|" from cluster #N, since it is alre    4180  * "|====|" from cluster #N, since it is already in use for data in
4035  * cluster EXT4_B2C(sbi, map->m_lblk).  We wi    4181  * cluster EXT4_B2C(sbi, map->m_lblk).  We will then return 1 to
4036  * signal to ext4_ext_map_blocks() that map->    4182  * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
4037  * as a new "allocated" block region.  Otherw    4183  * as a new "allocated" block region.  Otherwise, we will return 0 and
4038  * ext4_ext_map_blocks() will then allocate o    4184  * ext4_ext_map_blocks() will then allocate one or more new clusters
4039  * by calling ext4_mb_new_blocks().              4185  * by calling ext4_mb_new_blocks().
4040  */                                              4186  */
4041 static int get_implied_cluster_alloc(struct s    4187 static int get_implied_cluster_alloc(struct super_block *sb,
4042                                      struct e    4188                                      struct ext4_map_blocks *map,
4043                                      struct e    4189                                      struct ext4_extent *ex,
4044                                      struct e    4190                                      struct ext4_ext_path *path)
4045 {                                                4191 {
4046         struct ext4_sb_info *sbi = EXT4_SB(sb    4192         struct ext4_sb_info *sbi = EXT4_SB(sb);
4047         ext4_lblk_t c_offset = EXT4_LBLK_COFF    4193         ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4048         ext4_lblk_t ex_cluster_start, ex_clus    4194         ext4_lblk_t ex_cluster_start, ex_cluster_end;
4049         ext4_lblk_t rr_cluster_start;            4195         ext4_lblk_t rr_cluster_start;
4050         ext4_lblk_t ee_block = le32_to_cpu(ex    4196         ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4051         ext4_fsblk_t ee_start = ext4_ext_pblo    4197         ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4052         unsigned short ee_len = ext4_ext_get_    4198         unsigned short ee_len = ext4_ext_get_actual_len(ex);
4053                                                  4199 
4054         /* The extent passed in that we are t    4200         /* The extent passed in that we are trying to match */
4055         ex_cluster_start = EXT4_B2C(sbi, ee_b    4201         ex_cluster_start = EXT4_B2C(sbi, ee_block);
4056         ex_cluster_end = EXT4_B2C(sbi, ee_blo    4202         ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
4057                                                  4203 
4058         /* The requested region passed into e    4204         /* The requested region passed into ext4_map_blocks() */
4059         rr_cluster_start = EXT4_B2C(sbi, map-    4205         rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
4060                                                  4206 
4061         if ((rr_cluster_start == ex_cluster_e    4207         if ((rr_cluster_start == ex_cluster_end) ||
4062             (rr_cluster_start == ex_cluster_s    4208             (rr_cluster_start == ex_cluster_start)) {
4063                 if (rr_cluster_start == ex_cl    4209                 if (rr_cluster_start == ex_cluster_end)
4064                         ee_start += ee_len -     4210                         ee_start += ee_len - 1;
4065                 map->m_pblk = EXT4_PBLK_CMASK    4211                 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
4066                 map->m_len = min(map->m_len,     4212                 map->m_len = min(map->m_len,
4067                                  (unsigned) s    4213                                  (unsigned) sbi->s_cluster_ratio - c_offset);
4068                 /*                               4214                 /*
4069                  * Check for and handle this     4215                  * Check for and handle this case:
4070                  *                               4216                  *
4071                  *   |--------- cluster # N--    4217                  *   |--------- cluster # N-------------|
4072                  *                     |-----    4218                  *                     |------- extent ----|
4073                  *         |--- requested reg    4219                  *         |--- requested region ---|
4074                  *         |===========|         4220                  *         |===========|
4075                  */                              4221                  */
4076                                                  4222 
4077                 if (map->m_lblk < ee_block)      4223                 if (map->m_lblk < ee_block)
4078                         map->m_len = min(map-    4224                         map->m_len = min(map->m_len, ee_block - map->m_lblk);
4079                                                  4225 
4080                 /*                               4226                 /*
4081                  * Check for the case where t    4227                  * Check for the case where there is already another allocated
4082                  * block to the right of 'ex'    4228                  * block to the right of 'ex' but before the end of the cluster.
4083                  *                               4229                  *
4084                  *          |------------- cl    4230                  *          |------------- cluster # N-------------|
4085                  * |----- ex -----|              4231                  * |----- ex -----|                  |---- ex_right ----|
4086                  *                  |------ r    4232                  *                  |------ requested region ------|
4087                  *                  |========    4233                  *                  |================|
4088                  */                              4234                  */
4089                 if (map->m_lblk > ee_block) {    4235                 if (map->m_lblk > ee_block) {
4090                         ext4_lblk_t next = ex    4236                         ext4_lblk_t next = ext4_ext_next_allocated_block(path);
4091                         map->m_len = min(map-    4237                         map->m_len = min(map->m_len, next - map->m_lblk);
4092                 }                                4238                 }
4093                                                  4239 
4094                 trace_ext4_get_implied_cluste    4240                 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
4095                 return 1;                        4241                 return 1;
4096         }                                        4242         }
4097                                                  4243 
4098         trace_ext4_get_implied_cluster_alloc_    4244         trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
4099         return 0;                                4245         return 0;
4100 }                                                4246 }
4101                                                  4247 
4102 /*                                            << 
4103  * Determine hole length around the given log << 
4104  * locate and expand the hole from the given  << 
4105  * if it's partially or completely converted  << 
4106  * it into the extent cache tree if it's inde << 
4107  * the length of the determined extent.       << 
4108  */                                           << 
4109 static ext4_lblk_t ext4_ext_determine_insert_ << 
4110                                               << 
4111                                               << 
4112 {                                             << 
4113         ext4_lblk_t hole_start, len;          << 
4114         struct extent_status es;              << 
4115                                               << 
4116         hole_start = lblk;                    << 
4117         len = ext4_ext_find_hole(inode, path, << 
4118 again:                                        << 
4119         ext4_es_find_extent_range(inode, &ext << 
4120                                   hole_start  << 
4121         if (!es.es_len)                       << 
4122                 goto insert_hole;             << 
4123                                               << 
4124         /*                                    << 
4125          * There's a delalloc extent in the h << 
4126          * extent is in front of, behind and  << 
4127          */                                   << 
4128         if (lblk >= es.es_lblk + es.es_len) { << 
4129                 /*                            << 
4130                  * The delalloc extent is in  << 
4131                  * find again from the querie << 
4132                  */                           << 
4133                 len -= lblk - hole_start;     << 
4134                 hole_start = lblk;            << 
4135                 goto again;                   << 
4136         } else if (in_range(lblk, es.es_lblk, << 
4137                 /*                            << 
4138                  * The delalloc extent contai << 
4139                  * added after ext4_map_block << 
4140                  * tree so we are not holding << 
4141                  * only stabilized by i_data_ << 
4142                  * soon. Don't modify the ext << 
4143                  * extent as a hole, just adj << 
4144                  * extent's after lblk.       << 
4145                  */                           << 
4146                 len = es.es_lblk + es.es_len  << 
4147                 return len;                   << 
4148         } else {                              << 
4149                 /*                            << 
4150                  * The delalloc extent is par << 
4151                  * the queried range, update  << 
4152                  * beginning of the delalloc  << 
4153                  */                           << 
4154                 len = min(es.es_lblk - hole_s << 
4155         }                                     << 
4156                                               << 
4157 insert_hole:                                  << 
4158         /* Put just found gap into cache to s << 
4159         ext_debug(inode, " -> %u:%u\n", hole_ << 
4160         ext4_es_insert_extent(inode, hole_sta << 
4161                               EXTENT_STATUS_H << 
4162                                               << 
4163         /* Update hole_len to reflect hole si << 
4164         if (hole_start != lblk)               << 
4165                 len -= lblk - hole_start;     << 
4166                                               << 
4167         return len;                           << 
4168 }                                             << 
4169                                                  4248 
4170 /*                                               4249 /*
4171  * Block allocation/map/preallocation routine    4250  * Block allocation/map/preallocation routine for extents based files
4172  *                                               4251  *
4173  *                                               4252  *
4174  * Need to be called with                        4253  * Need to be called with
4175  * down_read(&EXT4_I(inode)->i_data_sem) if n    4254  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
4176  * (ie, flags is zero). Otherwise down_write( !! 4255  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
4177  *                                               4256  *
4178  * return > 0, number of blocks already mappe !! 4257  * return > 0, number of of blocks already mapped/allocated
4179  *          if flags doesn't contain EXT4_GET !! 4258  *          if create == 0 and these are pre-allocated blocks
4180  *              buffer head is unmapped          4259  *              buffer head is unmapped
4181  *          otherwise blocks are mapped          4260  *          otherwise blocks are mapped
4182  *                                               4261  *
4183  * return = 0, if plain look up failed (block    4262  * return = 0, if plain look up failed (blocks have not been allocated)
4184  *          buffer head is unmapped              4263  *          buffer head is unmapped
4185  *                                               4264  *
4186  * return < 0, error case.                       4265  * return < 0, error case.
4187  */                                              4266  */
4188 int ext4_ext_map_blocks(handle_t *handle, str    4267 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4189                         struct ext4_map_block    4268                         struct ext4_map_blocks *map, int flags)
4190 {                                                4269 {
4191         struct ext4_ext_path *path = NULL;       4270         struct ext4_ext_path *path = NULL;
4192         struct ext4_extent newex, *ex, ex2;   !! 4271         struct ext4_extent newex, *ex, *ex2;
4193         struct ext4_sb_info *sbi = EXT4_SB(in    4272         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4194         ext4_fsblk_t newblock = 0, pblk;      !! 4273         ext4_fsblk_t newblock = 0;
4195         int err = 0, depth;                   !! 4274         int free_on_err = 0, err = 0, depth, ret;
4196         unsigned int allocated = 0, offset =     4275         unsigned int allocated = 0, offset = 0;
4197         unsigned int allocated_clusters = 0;     4276         unsigned int allocated_clusters = 0;
4198         struct ext4_allocation_request ar;       4277         struct ext4_allocation_request ar;
4199         ext4_lblk_t cluster_offset;              4278         ext4_lblk_t cluster_offset;
                                                   >> 4279         bool map_from_cluster = false;
4200                                                  4280 
4201         ext_debug(inode, "blocks %u/%u reques !! 4281         ext_debug("blocks %u/%u requested for inode %lu\n",
                                                   >> 4282                   map->m_lblk, map->m_len, inode->i_ino);
4202         trace_ext4_ext_map_blocks_enter(inode    4283         trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4203                                                  4284 
4204         /* find extent for this block */         4285         /* find extent for this block */
4205         path = ext4_find_extent(inode, map->m    4286         path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
4206         if (IS_ERR(path)) {                      4287         if (IS_ERR(path)) {
4207                 err = PTR_ERR(path);             4288                 err = PTR_ERR(path);
4208                 goto out;                     !! 4289                 path = NULL;
                                                   >> 4290                 goto out2;
4209         }                                        4291         }
4210                                                  4292 
4211         depth = ext_depth(inode);                4293         depth = ext_depth(inode);
4212                                                  4294 
4213         /*                                       4295         /*
4214          * consistent leaf must not be empty;    4296          * consistent leaf must not be empty;
4215          * this situation is possible, though    4297          * this situation is possible, though, _during_ tree modification;
4216          * this is why assert can't be put in    4298          * this is why assert can't be put in ext4_find_extent()
4217          */                                      4299          */
4218         if (unlikely(path[depth].p_ext == NUL    4300         if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4219                 EXT4_ERROR_INODE(inode, "bad     4301                 EXT4_ERROR_INODE(inode, "bad extent address "
4220                                  "lblock: %lu    4302                                  "lblock: %lu, depth: %d pblock %lld",
4221                                  (unsigned lo    4303                                  (unsigned long) map->m_lblk, depth,
4222                                  path[depth].    4304                                  path[depth].p_block);
4223                 err = -EFSCORRUPTED;             4305                 err = -EFSCORRUPTED;
4224                 goto out;                     !! 4306                 goto out2;
4225         }                                        4307         }
4226                                                  4308 
4227         ex = path[depth].p_ext;                  4309         ex = path[depth].p_ext;
4228         if (ex) {                                4310         if (ex) {
4229                 ext4_lblk_t ee_block = le32_t    4311                 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4230                 ext4_fsblk_t ee_start = ext4_    4312                 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4231                 unsigned short ee_len;           4313                 unsigned short ee_len;
4232                                                  4314 
4233                                                  4315 
4234                 /*                               4316                 /*
4235                  * unwritten extents are trea    4317                  * unwritten extents are treated as holes, except that
4236                  * we split out initialized p    4318                  * we split out initialized portions during a write.
4237                  */                              4319                  */
4238                 ee_len = ext4_ext_get_actual_    4320                 ee_len = ext4_ext_get_actual_len(ex);
4239                                                  4321 
4240                 trace_ext4_ext_show_extent(in    4322                 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4241                                                  4323 
4242                 /* if found extent covers blo    4324                 /* if found extent covers block, simply return it */
4243                 if (in_range(map->m_lblk, ee_    4325                 if (in_range(map->m_lblk, ee_block, ee_len)) {
4244                         newblock = map->m_lbl    4326                         newblock = map->m_lblk - ee_block + ee_start;
4245                         /* number of remainin    4327                         /* number of remaining blocks in the extent */
4246                         allocated = ee_len -     4328                         allocated = ee_len - (map->m_lblk - ee_block);
4247                         ext_debug(inode, "%u  !! 4329                         ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
4248                                   map->m_lblk !! 4330                                   ee_block, ee_len, newblock);
4249                                                  4331 
4250                         /*                       4332                         /*
4251                          * If the extent is i    4333                          * If the extent is initialized check whether the
4252                          * caller wants to co    4334                          * caller wants to convert it to unwritten.
4253                          */                      4335                          */
4254                         if ((!ext4_ext_is_unw    4336                         if ((!ext4_ext_is_unwritten(ex)) &&
4255                             (flags & EXT4_GET    4337                             (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4256                                 path = conver !! 4338                                 allocated = convert_initialized_extent(
4257                                         inode !! 4339                                                 handle, inode, map, &path,
4258                                 if (IS_ERR(pa !! 4340                                                 allocated);
4259                                         err = !! 4341                                 goto out2;
                                                   >> 4342                         } else if (!ext4_ext_is_unwritten(ex))
4260                                 goto out;        4343                                 goto out;
4261                         } else if (!ext4_ext_ << 
4262                                 map->m_flags  << 
4263                                 map->m_pblk = << 
4264                                 if (allocated << 
4265                                         alloc << 
4266                                 map->m_len =  << 
4267                                 ext4_ext_show << 
4268                                 goto out;     << 
4269                         }                     << 
4270                                                  4344 
4271                         path = ext4_ext_handl !! 4345                         ret = ext4_ext_handle_unwritten_extents(
4272                                 handle, inode !! 4346                                 handle, inode, map, &path, flags,
4273                                 &allocated, n !! 4347                                 allocated, newblock);
4274                         if (IS_ERR(path))     !! 4348                         if (ret < 0)
4275                                 err = PTR_ERR !! 4349                                 err = ret;
4276                         goto out;             !! 4350                         else
                                                   >> 4351                                 allocated = ret;
                                                   >> 4352                         goto out2;
4277                 }                                4353                 }
4278         }                                        4354         }
4279                                                  4355 
4280         /*                                       4356         /*
4281          * requested block isn't allocated ye    4357          * requested block isn't allocated yet;
4282          * we couldn't try to create block if !! 4358          * we couldn't try to create block if create flag is zero
4283          */                                      4359          */
4284         if ((flags & EXT4_GET_BLOCKS_CREATE)     4360         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4285                 ext4_lblk_t len;              !! 4361                 ext4_lblk_t hole_start, hole_len;
4286                                                  4362 
4287                 len = ext4_ext_determine_inse !! 4363                 hole_start = map->m_lblk;
                                                   >> 4364                 hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
                                                   >> 4365                 /*
                                                   >> 4366                  * put just found gap into cache to speed up
                                                   >> 4367                  * subsequent requests
                                                   >> 4368                  */
                                                   >> 4369                 ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
4288                                                  4370 
                                                   >> 4371                 /* Update hole_len to reflect hole size after map->m_lblk */
                                                   >> 4372                 if (hole_start != map->m_lblk)
                                                   >> 4373                         hole_len -= map->m_lblk - hole_start;
4289                 map->m_pblk = 0;                 4374                 map->m_pblk = 0;
4290                 map->m_len = min_t(unsigned i !! 4375                 map->m_len = min_t(unsigned int, map->m_len, hole_len);
4291                 goto out;                     !! 4376 
                                                   >> 4377                 goto out2;
4292         }                                        4378         }
4293                                                  4379 
4294         /*                                       4380         /*
4295          * Okay, we need to do block allocati    4381          * Okay, we need to do block allocation.
4296          */                                      4382          */
4297         newex.ee_block = cpu_to_le32(map->m_l    4383         newex.ee_block = cpu_to_le32(map->m_lblk);
4298         cluster_offset = EXT4_LBLK_COFF(sbi,     4384         cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4299                                                  4385 
4300         /*                                       4386         /*
4301          * If we are doing bigalloc, check to    4387          * If we are doing bigalloc, check to see if the extent returned
4302          * by ext4_find_extent() implies a cl    4388          * by ext4_find_extent() implies a cluster we can use.
4303          */                                      4389          */
4304         if (cluster_offset && ex &&              4390         if (cluster_offset && ex &&
4305             get_implied_cluster_alloc(inode->    4391             get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4306                 ar.len = allocated = map->m_l    4392                 ar.len = allocated = map->m_len;
4307                 newblock = map->m_pblk;          4393                 newblock = map->m_pblk;
                                                   >> 4394                 map_from_cluster = true;
4308                 goto got_allocated_blocks;       4395                 goto got_allocated_blocks;
4309         }                                        4396         }
4310                                                  4397 
4311         /* find neighbour allocated blocks */    4398         /* find neighbour allocated blocks */
4312         ar.lleft = map->m_lblk;                  4399         ar.lleft = map->m_lblk;
4313         err = ext4_ext_search_left(inode, pat    4400         err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4314         if (err)                                 4401         if (err)
4315                 goto out;                     !! 4402                 goto out2;
4316         ar.lright = map->m_lblk;                 4403         ar.lright = map->m_lblk;
                                                   >> 4404         ex2 = NULL;
4317         err = ext4_ext_search_right(inode, pa    4405         err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4318         if (err < 0)                          !! 4406         if (err)
4319                 goto out;                     !! 4407                 goto out2;
4320                                                  4408 
4321         /* Check if the extent after searchin    4409         /* Check if the extent after searching to the right implies a
4322          * cluster we can use. */                4410          * cluster we can use. */
4323         if ((sbi->s_cluster_ratio > 1) && err !! 4411         if ((sbi->s_cluster_ratio > 1) && ex2 &&
4324             get_implied_cluster_alloc(inode-> !! 4412             get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4325                 ar.len = allocated = map->m_l    4413                 ar.len = allocated = map->m_len;
4326                 newblock = map->m_pblk;          4414                 newblock = map->m_pblk;
4327                 err = 0;                      !! 4415                 map_from_cluster = true;
4328                 goto got_allocated_blocks;       4416                 goto got_allocated_blocks;
4329         }                                        4417         }
4330                                                  4418 
4331         /*                                       4419         /*
4332          * See if request is beyond maximum n    4420          * See if request is beyond maximum number of blocks we can have in
4333          * a single extent. For an initialize    4421          * a single extent. For an initialized extent this limit is
4334          * EXT_INIT_MAX_LEN and for an unwrit    4422          * EXT_INIT_MAX_LEN and for an unwritten extent this limit is
4335          * EXT_UNWRITTEN_MAX_LEN.                4423          * EXT_UNWRITTEN_MAX_LEN.
4336          */                                      4424          */
4337         if (map->m_len > EXT_INIT_MAX_LEN &&     4425         if (map->m_len > EXT_INIT_MAX_LEN &&
4338             !(flags & EXT4_GET_BLOCKS_UNWRIT_    4426             !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4339                 map->m_len = EXT_INIT_MAX_LEN    4427                 map->m_len = EXT_INIT_MAX_LEN;
4340         else if (map->m_len > EXT_UNWRITTEN_M    4428         else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
4341                  (flags & EXT4_GET_BLOCKS_UNW    4429                  (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4342                 map->m_len = EXT_UNWRITTEN_MA    4430                 map->m_len = EXT_UNWRITTEN_MAX_LEN;
4343                                                  4431 
4344         /* Check if we can really insert (m_l    4432         /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4345         newex.ee_len = cpu_to_le16(map->m_len    4433         newex.ee_len = cpu_to_le16(map->m_len);
4346         err = ext4_ext_check_overlap(sbi, ino    4434         err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4347         if (err)                                 4435         if (err)
4348                 allocated = ext4_ext_get_actu    4436                 allocated = ext4_ext_get_actual_len(&newex);
4349         else                                     4437         else
4350                 allocated = map->m_len;          4438                 allocated = map->m_len;
4351                                                  4439 
4352         /* allocate new block */                 4440         /* allocate new block */
4353         ar.inode = inode;                        4441         ar.inode = inode;
4354         ar.goal = ext4_ext_find_goal(inode, p    4442         ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4355         ar.logical = map->m_lblk;                4443         ar.logical = map->m_lblk;
4356         /*                                       4444         /*
4357          * We calculate the offset from the b    4445          * We calculate the offset from the beginning of the cluster
4358          * for the logical block number, sinc    4446          * for the logical block number, since when we allocate a
4359          * physical cluster, the physical blo    4447          * physical cluster, the physical block should start at the
4360          * same offset from the beginning of     4448          * same offset from the beginning of the cluster.  This is
4361          * needed so that future calls to get    4449          * needed so that future calls to get_implied_cluster_alloc()
4362          * work correctly.                       4450          * work correctly.
4363          */                                      4451          */
4364         offset = EXT4_LBLK_COFF(sbi, map->m_l    4452         offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4365         ar.len = EXT4_NUM_B2C(sbi, offset+all    4453         ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4366         ar.goal -= offset;                       4454         ar.goal -= offset;
4367         ar.logical -= offset;                    4455         ar.logical -= offset;
4368         if (S_ISREG(inode->i_mode))              4456         if (S_ISREG(inode->i_mode))
4369                 ar.flags = EXT4_MB_HINT_DATA;    4457                 ar.flags = EXT4_MB_HINT_DATA;
4370         else                                     4458         else
4371                 /* disable in-core preallocat    4459                 /* disable in-core preallocation for non-regular files */
4372                 ar.flags = 0;                    4460                 ar.flags = 0;
4373         if (flags & EXT4_GET_BLOCKS_NO_NORMAL    4461         if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4374                 ar.flags |= EXT4_MB_HINT_NOPR    4462                 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4375         if (flags & EXT4_GET_BLOCKS_DELALLOC_    4463         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4376                 ar.flags |= EXT4_MB_DELALLOC_    4464                 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
4377         if (flags & EXT4_GET_BLOCKS_METADATA_    4465         if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
4378                 ar.flags |= EXT4_MB_USE_RESER    4466                 ar.flags |= EXT4_MB_USE_RESERVED;
4379         newblock = ext4_mb_new_blocks(handle,    4467         newblock = ext4_mb_new_blocks(handle, &ar, &err);
4380         if (!newblock)                           4468         if (!newblock)
4381                 goto out;                     !! 4469                 goto out2;
                                                   >> 4470         ext_debug("allocate new block: goal %llu, found %llu/%u\n",
                                                   >> 4471                   ar.goal, newblock, allocated);
                                                   >> 4472         free_on_err = 1;
4382         allocated_clusters = ar.len;             4473         allocated_clusters = ar.len;
4383         ar.len = EXT4_C2B(sbi, ar.len) - offs    4474         ar.len = EXT4_C2B(sbi, ar.len) - offset;
4384         ext_debug(inode, "allocate new block: << 
4385                   ar.goal, newblock, ar.len,  << 
4386         if (ar.len > allocated)                  4475         if (ar.len > allocated)
4387                 ar.len = allocated;              4476                 ar.len = allocated;
4388                                                  4477 
4389 got_allocated_blocks:                            4478 got_allocated_blocks:
4390         /* try to insert new extent into foun    4479         /* try to insert new extent into found leaf and return */
4391         pblk = newblock + offset;             !! 4480         ext4_ext_store_pblock(&newex, newblock + offset);
4392         ext4_ext_store_pblock(&newex, pblk);  << 
4393         newex.ee_len = cpu_to_le16(ar.len);      4481         newex.ee_len = cpu_to_le16(ar.len);
4394         /* Mark unwritten */                     4482         /* Mark unwritten */
4395         if (flags & EXT4_GET_BLOCKS_UNWRIT_EX !! 4483         if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){
4396                 ext4_ext_mark_unwritten(&newe    4484                 ext4_ext_mark_unwritten(&newex);
4397                 map->m_flags |= EXT4_MAP_UNWR    4485                 map->m_flags |= EXT4_MAP_UNWRITTEN;
4398         }                                        4486         }
4399                                                  4487 
4400         path = ext4_ext_insert_extent(handle, !! 4488         err = 0;
4401         if (IS_ERR(path)) {                   !! 4489         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4402                 err = PTR_ERR(path);          !! 4490                 err = check_eofblocks_fl(handle, inode, map->m_lblk,
4403                 if (allocated_clusters) {     !! 4491                                          path, ar.len);
4404                         int fb_flags = 0;     !! 4492         if (!err)
                                                   >> 4493                 err = ext4_ext_insert_extent(handle, inode, &path,
                                                   >> 4494                                              &newex, flags);
                                                   >> 4495 
                                                   >> 4496         if (err && free_on_err) {
                                                   >> 4497                 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
                                                   >> 4498                         EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
                                                   >> 4499                 /* free data blocks we just allocated */
                                                   >> 4500                 /* not a good idea to call discard here directly,
                                                   >> 4501                  * but otherwise we'd need to call it every free() */
                                                   >> 4502                 ext4_discard_preallocations(inode);
                                                   >> 4503                 ext4_free_blocks(handle, inode, NULL, newblock,
                                                   >> 4504                                  EXT4_C2B(sbi, allocated_clusters), fb_flags);
                                                   >> 4505                 goto out2;
                                                   >> 4506         }
                                                   >> 4507 
                                                   >> 4508         /* previous routine could use block we allocated */
                                                   >> 4509         newblock = ext4_ext_pblock(&newex);
                                                   >> 4510         allocated = ext4_ext_get_actual_len(&newex);
                                                   >> 4511         if (allocated > map->m_len)
                                                   >> 4512                 allocated = map->m_len;
                                                   >> 4513         map->m_flags |= EXT4_MAP_NEW;
4405                                                  4514 
                                                   >> 4515         /*
                                                   >> 4516          * Update reserved blocks/metadata blocks after successful
                                                   >> 4517          * block allocation which had been deferred till now.
                                                   >> 4518          */
                                                   >> 4519         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
                                                   >> 4520                 unsigned int reserved_clusters;
                                                   >> 4521                 /*
                                                   >> 4522                  * Check how many clusters we had reserved this allocated range
                                                   >> 4523                  */
                                                   >> 4524                 reserved_clusters = get_reserved_cluster_alloc(inode,
                                                   >> 4525                                                 map->m_lblk, allocated);
                                                   >> 4526                 if (!map_from_cluster) {
                                                   >> 4527                         BUG_ON(allocated_clusters < reserved_clusters);
                                                   >> 4528                         if (reserved_clusters < allocated_clusters) {
                                                   >> 4529                                 struct ext4_inode_info *ei = EXT4_I(inode);
                                                   >> 4530                                 int reservation = allocated_clusters -
                                                   >> 4531                                                   reserved_clusters;
                                                   >> 4532                                 /*
                                                   >> 4533                                  * It seems we claimed few clusters outside of
                                                   >> 4534                                  * the range of this allocation. We should give
                                                   >> 4535                                  * it back to the reservation pool. This can
                                                   >> 4536                                  * happen in the following case:
                                                   >> 4537                                  *
                                                   >> 4538                                  * * Suppose s_cluster_ratio is 4 (i.e., each
                                                   >> 4539                                  *   cluster has 4 blocks. Thus, the clusters
                                                   >> 4540                                  *   are [0-3],[4-7],[8-11]...
                                                   >> 4541                                  * * First comes delayed allocation write for
                                                   >> 4542                                  *   logical blocks 10 & 11. Since there were no
                                                   >> 4543                                  *   previous delayed allocated blocks in the
                                                   >> 4544                                  *   range [8-11], we would reserve 1 cluster
                                                   >> 4545                                  *   for this write.
                                                   >> 4546                                  * * Next comes write for logical blocks 3 to 8.
                                                   >> 4547                                  *   In this case, we will reserve 2 clusters
                                                   >> 4548                                  *   (for [0-3] and [4-7]; and not for [8-11] as
                                                   >> 4549                                  *   that range has a delayed allocated blocks.
                                                   >> 4550                                  *   Thus total reserved clusters now becomes 3.
                                                   >> 4551                                  * * Now, during the delayed allocation writeout
                                                   >> 4552                                  *   time, we will first write blocks [3-8] and
                                                   >> 4553                                  *   allocate 3 clusters for writing these
                                                   >> 4554                                  *   blocks. Also, we would claim all these
                                                   >> 4555                                  *   three clusters above.
                                                   >> 4556                                  * * Now when we come here to writeout the
                                                   >> 4557                                  *   blocks [10-11], we would expect to claim
                                                   >> 4558                                  *   the reservation of 1 cluster we had made
                                                   >> 4559                                  *   (and we would claim it since there are no
                                                   >> 4560                                  *   more delayed allocated blocks in the range
                                                   >> 4561                                  *   [8-11]. But our reserved cluster count had
                                                   >> 4562                                  *   already gone to 0.
                                                   >> 4563                                  *
                                                   >> 4564                                  *   Thus, at the step 4 above when we determine
                                                   >> 4565                                  *   that there are still some unwritten delayed
                                                   >> 4566                                  *   allocated blocks outside of our current
                                                   >> 4567                                  *   block range, we should increment the
                                                   >> 4568                                  *   reserved clusters count so that when the
                                                   >> 4569                                  *   remaining blocks finally gets written, we
                                                   >> 4570                                  *   could claim them.
                                                   >> 4571                                  */
                                                   >> 4572                                 dquot_reserve_block(inode,
                                                   >> 4573                                                 EXT4_C2B(sbi, reservation));
                                                   >> 4574                                 spin_lock(&ei->i_block_reservation_lock);
                                                   >> 4575                                 ei->i_reserved_data_blocks += reservation;
                                                   >> 4576                                 spin_unlock(&ei->i_block_reservation_lock);
                                                   >> 4577                         }
4406                         /*                       4578                         /*
4407                          * free data blocks w !! 4579                          * We will claim quota for all newly allocated blocks.
4408                          * not a good idea to !! 4580                          * We're updating the reserved space *after* the
4409                          * but otherwise we'd !! 4581                          * correction above so we do not accidentally free
                                                   >> 4582                          * all the metadata reservation because we might
                                                   >> 4583                          * actually need it later on.
4410                          */                      4584                          */
4411                         ext4_discard_prealloc !! 4585                         ext4_da_update_reserve_space(inode, allocated_clusters,
4412                         if (flags & EXT4_GET_ !! 4586                                                         1);
4413                                 fb_flags = EX << 
4414                         ext4_free_blocks(hand << 
4415                                          EXT4 << 
4416                                          fb_f << 
4417                 }                                4587                 }
4418                 goto out;                     << 
4419         }                                        4588         }
4420                                                  4589 
4421         /*                                       4590         /*
4422          * Cache the extent and update transa    4591          * Cache the extent and update transaction to commit on fdatasync only
4423          * when it is _not_ an unwritten exte    4592          * when it is _not_ an unwritten extent.
4424          */                                      4593          */
4425         if ((flags & EXT4_GET_BLOCKS_UNWRIT_E    4594         if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
4426                 ext4_update_inode_fsync_trans    4595                 ext4_update_inode_fsync_trans(handle, inode, 1);
4427         else                                     4596         else
4428                 ext4_update_inode_fsync_trans    4597                 ext4_update_inode_fsync_trans(handle, inode, 0);
4429                                               << 
4430         map->m_flags |= (EXT4_MAP_NEW | EXT4_ << 
4431         map->m_pblk = pblk;                   << 
4432         map->m_len = ar.len;                  << 
4433         allocated = map->m_len;               << 
4434         ext4_ext_show_leaf(inode, path);      << 
4435 out:                                             4598 out:
4436         ext4_free_ext_path(path);             !! 4599         if (allocated > map->m_len)
                                                   >> 4600                 allocated = map->m_len;
                                                   >> 4601         ext4_ext_show_leaf(inode, path);
                                                   >> 4602         map->m_flags |= EXT4_MAP_MAPPED;
                                                   >> 4603         map->m_pblk = newblock;
                                                   >> 4604         map->m_len = allocated;
                                                   >> 4605 out2:
                                                   >> 4606         ext4_ext_drop_refs(path);
                                                   >> 4607         kfree(path);
4437                                                  4608 
4438         trace_ext4_ext_map_blocks_exit(inode,    4609         trace_ext4_ext_map_blocks_exit(inode, flags, map,
4439                                        err ?     4610                                        err ? err : allocated);
4440         return err ? err : allocated;            4611         return err ? err : allocated;
4441 }                                                4612 }
4442                                                  4613 
4443 int ext4_ext_truncate(handle_t *handle, struc    4614 int ext4_ext_truncate(handle_t *handle, struct inode *inode)
4444 {                                                4615 {
4445         struct super_block *sb = inode->i_sb;    4616         struct super_block *sb = inode->i_sb;
4446         ext4_lblk_t last_block;                  4617         ext4_lblk_t last_block;
4447         int err = 0;                             4618         int err = 0;
4448                                                  4619 
4449         /*                                       4620         /*
4450          * TODO: optimization is possible her    4621          * TODO: optimization is possible here.
4451          * Probably we need not scan at all,     4622          * Probably we need not scan at all,
4452          * because page truncation is enough.    4623          * because page truncation is enough.
4453          */                                      4624          */
4454                                                  4625 
4455         /* we have to know where to truncate     4626         /* we have to know where to truncate from in crash case */
4456         EXT4_I(inode)->i_disksize = inode->i_    4627         EXT4_I(inode)->i_disksize = inode->i_size;
4457         err = ext4_mark_inode_dirty(handle, i    4628         err = ext4_mark_inode_dirty(handle, inode);
4458         if (err)                                 4629         if (err)
4459                 return err;                      4630                 return err;
4460                                                  4631 
4461         last_block = (inode->i_size + sb->s_b    4632         last_block = (inode->i_size + sb->s_blocksize - 1)
4462                         >> EXT4_BLOCK_SIZE_BI    4633                         >> EXT4_BLOCK_SIZE_BITS(sb);
4463         ext4_es_remove_extent(inode, last_blo !! 4634 retry:
4464                                               !! 4635         err = ext4_es_remove_extent(inode, last_block,
4465 retry_remove_space:                           !! 4636                                     EXT_MAX_BLOCKS - last_block);
4466         err = ext4_ext_remove_space(inode, la << 
4467         if (err == -ENOMEM) {                    4637         if (err == -ENOMEM) {
4468                 memalloc_retry_wait(GFP_ATOMI !! 4638                 cond_resched();
4469                 goto retry_remove_space;      !! 4639                 congestion_wait(BLK_RW_ASYNC, HZ/50);
                                                   >> 4640                 goto retry;
4470         }                                        4641         }
4471         return err;                           !! 4642         if (err)
                                                   >> 4643                 return err;
                                                   >> 4644         return ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4472 }                                                4645 }
4473                                                  4646 
4474 static int ext4_alloc_file_blocks(struct file    4647 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4475                                   ext4_lblk_t    4648                                   ext4_lblk_t len, loff_t new_size,
4476                                   int flags)     4649                                   int flags)
4477 {                                                4650 {
4478         struct inode *inode = file_inode(file    4651         struct inode *inode = file_inode(file);
4479         handle_t *handle;                        4652         handle_t *handle;
4480         int ret = 0, ret2 = 0, ret3 = 0;      !! 4653         int ret = 0;
                                                   >> 4654         int ret2 = 0;
4481         int retries = 0;                         4655         int retries = 0;
4482         int depth = 0;                           4656         int depth = 0;
4483         struct ext4_map_blocks map;              4657         struct ext4_map_blocks map;
4484         unsigned int credits;                    4658         unsigned int credits;
4485         loff_t epos;                             4659         loff_t epos;
4486                                                  4660 
4487         BUG_ON(!ext4_test_inode_flag(inode, E    4661         BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
4488         map.m_lblk = offset;                     4662         map.m_lblk = offset;
4489         map.m_len = len;                         4663         map.m_len = len;
4490         /*                                       4664         /*
4491          * Don't normalize the request if it     4665          * Don't normalize the request if it can fit in one extent so
4492          * that it doesn't get unnecessarily     4666          * that it doesn't get unnecessarily split into multiple
4493          * extents.                              4667          * extents.
4494          */                                      4668          */
4495         if (len <= EXT_UNWRITTEN_MAX_LEN)        4669         if (len <= EXT_UNWRITTEN_MAX_LEN)
4496                 flags |= EXT4_GET_BLOCKS_NO_N    4670                 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4497                                                  4671 
4498         /*                                       4672         /*
4499          * credits to insert 1 extent into ex    4673          * credits to insert 1 extent into extent tree
4500          */                                      4674          */
4501         credits = ext4_chunk_trans_blocks(ino    4675         credits = ext4_chunk_trans_blocks(inode, len);
4502         depth = ext_depth(inode);                4676         depth = ext_depth(inode);
4503                                                  4677 
4504 retry:                                           4678 retry:
4505         while (len) {                         !! 4679         while (ret >= 0 && len) {
4506                 /*                               4680                 /*
4507                  * Recalculate credits when e    4681                  * Recalculate credits when extent tree depth changes.
4508                  */                              4682                  */
4509                 if (depth != ext_depth(inode)    4683                 if (depth != ext_depth(inode)) {
4510                         credits = ext4_chunk_    4684                         credits = ext4_chunk_trans_blocks(inode, len);
4511                         depth = ext_depth(ino    4685                         depth = ext_depth(inode);
4512                 }                                4686                 }
4513                                                  4687 
4514                 handle = ext4_journal_start(i    4688                 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4515                                             c    4689                                             credits);
4516                 if (IS_ERR(handle)) {            4690                 if (IS_ERR(handle)) {
4517                         ret = PTR_ERR(handle)    4691                         ret = PTR_ERR(handle);
4518                         break;                   4692                         break;
4519                 }                                4693                 }
4520                 ret = ext4_map_blocks(handle,    4694                 ret = ext4_map_blocks(handle, inode, &map, flags);
4521                 if (ret <= 0) {                  4695                 if (ret <= 0) {
4522                         ext4_debug("inode #%l    4696                         ext4_debug("inode #%lu: block %u: len %u: "
4523                                    "ext4_ext_    4697                                    "ext4_ext_map_blocks returned %d",
4524                                    inode->i_i    4698                                    inode->i_ino, map.m_lblk,
4525                                    map.m_len,    4699                                    map.m_len, ret);
4526                         ext4_mark_inode_dirty    4700                         ext4_mark_inode_dirty(handle, inode);
4527                         ext4_journal_stop(han !! 4701                         ret2 = ext4_journal_stop(handle);
4528                         break;                   4702                         break;
4529                 }                                4703                 }
4530                 /*                            << 
4531                  * allow a full retry cycle f << 
4532                  */                           << 
4533                 retries = 0;                  << 
4534                 map.m_lblk += ret;               4704                 map.m_lblk += ret;
4535                 map.m_len = len = len - ret;     4705                 map.m_len = len = len - ret;
4536                 epos = (loff_t)map.m_lblk <<     4706                 epos = (loff_t)map.m_lblk << inode->i_blkbits;
4537                 inode_set_ctime_current(inode !! 4707                 inode->i_ctime = current_time(inode);
4538                 if (new_size) {                  4708                 if (new_size) {
4539                         if (epos > new_size)     4709                         if (epos > new_size)
4540                                 epos = new_si    4710                                 epos = new_size;
4541                         if (ext4_update_inode    4711                         if (ext4_update_inode_size(inode, epos) & 0x1)
4542                                 inode_set_mti !! 4712                                 inode->i_mtime = inode->i_ctime;
4543                                               !! 4713                 } else {
                                                   >> 4714                         if (epos > inode->i_size)
                                                   >> 4715                                 ext4_set_inode_flag(inode,
                                                   >> 4716                                                     EXT4_INODE_EOFBLOCKS);
4544                 }                                4717                 }
4545                 ret2 = ext4_mark_inode_dirty( !! 4718                 ext4_mark_inode_dirty(handle, inode);
4546                 ext4_update_inode_fsync_trans    4719                 ext4_update_inode_fsync_trans(handle, inode, 1);
4547                 ret3 = ext4_journal_stop(hand !! 4720                 ret2 = ext4_journal_stop(handle);
4548                 ret2 = ret3 ? ret3 : ret2;    !! 4721                 if (ret2)
4549                 if (unlikely(ret2))           << 
4550                         break;                   4722                         break;
4551         }                                        4723         }
4552         if (ret == -ENOSPC && ext4_should_ret !! 4724         if (ret == -ENOSPC &&
                                                   >> 4725                         ext4_should_retry_alloc(inode->i_sb, &retries)) {
                                                   >> 4726                 ret = 0;
4553                 goto retry;                      4727                 goto retry;
                                                   >> 4728         }
4554                                                  4729 
4555         return ret > 0 ? ret2 : ret;             4730         return ret > 0 ? ret2 : ret;
4556 }                                                4731 }
4557                                                  4732 
4558 static int ext4_collapse_range(struct file *f << 
4559                                               << 
4560 static int ext4_insert_range(struct file *fil << 
4561                                               << 
4562 static long ext4_zero_range(struct file *file    4733 static long ext4_zero_range(struct file *file, loff_t offset,
4563                             loff_t len, int m    4734                             loff_t len, int mode)
4564 {                                                4735 {
4565         struct inode *inode = file_inode(file    4736         struct inode *inode = file_inode(file);
4566         struct address_space *mapping = file- << 
4567         handle_t *handle = NULL;                 4737         handle_t *handle = NULL;
4568         unsigned int max_blocks;                 4738         unsigned int max_blocks;
4569         loff_t new_size = 0;                     4739         loff_t new_size = 0;
4570         int ret = 0;                             4740         int ret = 0;
4571         int flags;                               4741         int flags;
4572         int credits;                             4742         int credits;
4573         int partial_begin, partial_end;          4743         int partial_begin, partial_end;
4574         loff_t start, end;                       4744         loff_t start, end;
4575         ext4_lblk_t lblk;                        4745         ext4_lblk_t lblk;
4576         unsigned int blkbits = inode->i_blkbi    4746         unsigned int blkbits = inode->i_blkbits;
4577                                                  4747 
4578         trace_ext4_zero_range(inode, offset,     4748         trace_ext4_zero_range(inode, offset, len, mode);
4579                                                  4749 
                                                   >> 4750         if (!S_ISREG(inode->i_mode))
                                                   >> 4751                 return -EINVAL;
                                                   >> 4752 
                                                   >> 4753         /* Call ext4_force_commit to flush all data in case of data=journal. */
                                                   >> 4754         if (ext4_should_journal_data(inode)) {
                                                   >> 4755                 ret = ext4_force_commit(inode->i_sb);
                                                   >> 4756                 if (ret)
                                                   >> 4757                         return ret;
                                                   >> 4758         }
                                                   >> 4759 
4580         /*                                       4760         /*
4581          * Round up offset. This is not fallo !! 4761          * Round up offset. This is not fallocate, we neet to zero out
4582          * blocks, so convert interior block     4762          * blocks, so convert interior block aligned part of the range to
4583          * unwritten and possibly manually ze    4763          * unwritten and possibly manually zero out unaligned parts of the
4584          * range. Here, start and partial_beg !! 4764          * range.
4585          * partial_end are exclusive.         << 
4586          */                                      4765          */
4587         start = round_up(offset, 1 << blkbits    4766         start = round_up(offset, 1 << blkbits);
4588         end = round_down((offset + len), 1 <<    4767         end = round_down((offset + len), 1 << blkbits);
4589                                                  4768 
4590         if (start < offset || end > offset +     4769         if (start < offset || end > offset + len)
4591                 return -EINVAL;                  4770                 return -EINVAL;
4592         partial_begin = offset & ((1 << blkbi    4771         partial_begin = offset & ((1 << blkbits) - 1);
4593         partial_end = (offset + len) & ((1 <<    4772         partial_end = (offset + len) & ((1 << blkbits) - 1);
4594                                                  4773 
4595         lblk = start >> blkbits;                 4774         lblk = start >> blkbits;
4596         max_blocks = (end >> blkbits);           4775         max_blocks = (end >> blkbits);
4597         if (max_blocks < lblk)                   4776         if (max_blocks < lblk)
4598                 max_blocks = 0;                  4777                 max_blocks = 0;
4599         else                                     4778         else
4600                 max_blocks -= lblk;              4779                 max_blocks -= lblk;
4601                                                  4780 
4602         inode_lock(inode);                       4781         inode_lock(inode);
4603                                                  4782 
4604         /*                                       4783         /*
4605          * Indirect files do not support unwr !! 4784          * Indirect files do not support unwritten extnets
4606          */                                      4785          */
4607         if (!(ext4_test_inode_flag(inode, EXT    4786         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4608                 ret = -EOPNOTSUPP;               4787                 ret = -EOPNOTSUPP;
4609                 goto out_mutex;                  4788                 goto out_mutex;
4610         }                                        4789         }
4611                                                  4790 
4612         if (!(mode & FALLOC_FL_KEEP_SIZE) &&     4791         if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4613             (offset + len > inode->i_size ||  !! 4792             (offset + len > i_size_read(inode) ||
4614              offset + len > EXT4_I(inode)->i_    4793              offset + len > EXT4_I(inode)->i_disksize)) {
4615                 new_size = offset + len;         4794                 new_size = offset + len;
4616                 ret = inode_newsize_ok(inode,    4795                 ret = inode_newsize_ok(inode, new_size);
4617                 if (ret)                         4796                 if (ret)
4618                         goto out_mutex;          4797                         goto out_mutex;
4619         }                                        4798         }
4620                                                  4799 
4621         flags = EXT4_GET_BLOCKS_CREATE_UNWRIT    4800         flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
                                                   >> 4801         if (mode & FALLOC_FL_KEEP_SIZE)
                                                   >> 4802                 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4622                                                  4803 
4623         /* Wait all existing dio workers, new !! 4804         /* Wait all existing dio workers, newcomers will block on i_mutex */
4624         inode_dio_wait(inode);                   4805         inode_dio_wait(inode);
4625                                                  4806 
4626         ret = file_modified(file);            << 
4627         if (ret)                              << 
4628                 goto out_mutex;               << 
4629                                               << 
4630         /* Preallocate the range including th    4807         /* Preallocate the range including the unaligned edges */
4631         if (partial_begin || partial_end) {      4808         if (partial_begin || partial_end) {
4632                 ret = ext4_alloc_file_blocks(    4809                 ret = ext4_alloc_file_blocks(file,
4633                                 round_down(of    4810                                 round_down(offset, 1 << blkbits) >> blkbits,
4634                                 (round_up((of    4811                                 (round_up((offset + len), 1 << blkbits) -
4635                                  round_down(o    4812                                  round_down(offset, 1 << blkbits)) >> blkbits,
4636                                 new_size, fla    4813                                 new_size, flags);
4637                 if (ret)                         4814                 if (ret)
4638                         goto out_mutex;          4815                         goto out_mutex;
4639                                                  4816 
4640         }                                        4817         }
4641                                                  4818 
4642         /* Zero range excluding the unaligned    4819         /* Zero range excluding the unaligned edges */
4643         if (max_blocks > 0) {                    4820         if (max_blocks > 0) {
4644                 flags |= (EXT4_GET_BLOCKS_CON    4821                 flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4645                           EXT4_EX_NOCACHE);      4822                           EXT4_EX_NOCACHE);
4646                                                  4823 
4647                 /*                               4824                 /*
4648                  * Prevent page faults from r    4825                  * Prevent page faults from reinstantiating pages we have
4649                  * released from page cache.     4826                  * released from page cache.
4650                  */                              4827                  */
4651                 filemap_invalidate_lock(mappi !! 4828                 down_write(&EXT4_I(inode)->i_mmap_sem);
4652                                               << 
4653                 ret = ext4_break_layouts(inod << 
4654                 if (ret) {                    << 
4655                         filemap_invalidate_un << 
4656                         goto out_mutex;       << 
4657                 }                             << 
4658                                               << 
4659                 ret = ext4_update_disksize_be    4829                 ret = ext4_update_disksize_before_punch(inode, offset, len);
4660                 if (ret) {                       4830                 if (ret) {
4661                         filemap_invalidate_un !! 4831                         up_write(&EXT4_I(inode)->i_mmap_sem);
4662                         goto out_mutex;          4832                         goto out_mutex;
4663                 }                                4833                 }
4664                                               << 
4665                 /*                            << 
4666                  * For journalled data we nee << 
4667                  * before discarding page cac << 
4668                  * disk in case of crash befo << 
4669                  */                           << 
4670                 if (ext4_should_journal_data( << 
4671                         ret = filemap_write_a << 
4672                                               << 
4673                         if (ret) {            << 
4674                                 filemap_inval << 
4675                                 goto out_mute << 
4676                         }                     << 
4677                 }                             << 
4678                                               << 
4679                 /* Now release the pages and     4834                 /* Now release the pages and zero block aligned part of pages */
4680                 truncate_pagecache_range(inod    4835                 truncate_pagecache_range(inode, start, end - 1);
4681                 inode_set_mtime_to_ts(inode,  !! 4836                 inode->i_mtime = inode->i_ctime = current_time(inode);
4682                                                  4837 
4683                 ret = ext4_alloc_file_blocks(    4838                 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4684                                                  4839                                              flags);
4685                 filemap_invalidate_unlock(map !! 4840                 up_write(&EXT4_I(inode)->i_mmap_sem);
4686                 if (ret)                         4841                 if (ret)
4687                         goto out_mutex;          4842                         goto out_mutex;
4688         }                                        4843         }
4689         if (!partial_begin && !partial_end)      4844         if (!partial_begin && !partial_end)
4690                 goto out_mutex;                  4845                 goto out_mutex;
4691                                                  4846 
4692         /*                                       4847         /*
4693          * In worst case we have to writeout     4848          * In worst case we have to writeout two nonadjacent unwritten
4694          * blocks and update the inode           4849          * blocks and update the inode
4695          */                                      4850          */
4696         credits = (2 * ext4_ext_index_trans_b    4851         credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
4697         if (ext4_should_journal_data(inode))     4852         if (ext4_should_journal_data(inode))
4698                 credits += 2;                    4853                 credits += 2;
4699         handle = ext4_journal_start(inode, EX    4854         handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
4700         if (IS_ERR(handle)) {                    4855         if (IS_ERR(handle)) {
4701                 ret = PTR_ERR(handle);           4856                 ret = PTR_ERR(handle);
4702                 ext4_std_error(inode->i_sb, r    4857                 ext4_std_error(inode->i_sb, ret);
4703                 goto out_mutex;                  4858                 goto out_mutex;
4704         }                                        4859         }
4705                                                  4860 
4706         inode_set_mtime_to_ts(inode, inode_se !! 4861         inode->i_mtime = inode->i_ctime = current_time(inode);
4707         if (new_size)                         !! 4862         if (new_size) {
4708                 ext4_update_inode_size(inode,    4863                 ext4_update_inode_size(inode, new_size);
4709         ret = ext4_mark_inode_dirty(handle, i !! 4864         } else {
4710         if (unlikely(ret))                    !! 4865                 /*
4711                 goto out_handle;              !! 4866                 * Mark that we allocate beyond EOF so the subsequent truncate
                                                   >> 4867                 * can proceed even if the new size is the same as i_size.
                                                   >> 4868                 */
                                                   >> 4869                 if ((offset + len) > i_size_read(inode))
                                                   >> 4870                         ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
                                                   >> 4871         }
                                                   >> 4872         ext4_mark_inode_dirty(handle, inode);
                                                   >> 4873 
4712         /* Zero out partial block at the edge    4874         /* Zero out partial block at the edges of the range */
4713         ret = ext4_zero_partial_blocks(handle    4875         ret = ext4_zero_partial_blocks(handle, inode, offset, len);
4714         if (ret >= 0)                            4876         if (ret >= 0)
4715                 ext4_update_inode_fsync_trans    4877                 ext4_update_inode_fsync_trans(handle, inode, 1);
4716                                                  4878 
4717         if (file->f_flags & O_SYNC)              4879         if (file->f_flags & O_SYNC)
4718                 ext4_handle_sync(handle);        4880                 ext4_handle_sync(handle);
4719                                                  4881 
4720 out_handle:                                   << 
4721         ext4_journal_stop(handle);               4882         ext4_journal_stop(handle);
4722 out_mutex:                                       4883 out_mutex:
4723         inode_unlock(inode);                     4884         inode_unlock(inode);
4724         return ret;                              4885         return ret;
4725 }                                                4886 }
4726                                                  4887 
4727 /*                                               4888 /*
4728  * preallocate space for a file. This impleme    4889  * preallocate space for a file. This implements ext4's fallocate file
4729  * operation, which gets called from sys_fall    4890  * operation, which gets called from sys_fallocate system call.
4730  * For block-mapped files, posix_fallocate sh    4891  * For block-mapped files, posix_fallocate should fall back to the method
4731  * of writing zeroes to the required new bloc    4892  * of writing zeroes to the required new blocks (the same behavior which is
4732  * expected for file systems which do not sup    4893  * expected for file systems which do not support fallocate() system call).
4733  */                                              4894  */
4734 long ext4_fallocate(struct file *file, int mo    4895 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4735 {                                                4896 {
4736         struct inode *inode = file_inode(file    4897         struct inode *inode = file_inode(file);
4737         loff_t new_size = 0;                     4898         loff_t new_size = 0;
4738         unsigned int max_blocks;                 4899         unsigned int max_blocks;
4739         int ret = 0;                             4900         int ret = 0;
4740         int flags;                               4901         int flags;
4741         ext4_lblk_t lblk;                        4902         ext4_lblk_t lblk;
4742         unsigned int blkbits = inode->i_blkbi    4903         unsigned int blkbits = inode->i_blkbits;
4743                                                  4904 
4744         /*                                       4905         /*
4745          * Encrypted inodes can't handle coll    4906          * Encrypted inodes can't handle collapse range or insert
4746          * range since we would need to re-en    4907          * range since we would need to re-encrypt blocks with a
4747          * different IV or XTS tweak (which a    4908          * different IV or XTS tweak (which are based on the logical
4748          * block number).                        4909          * block number).
4749          */                                   !! 4910          *
4750         if (IS_ENCRYPTED(inode) &&            !! 4911          * XXX It's not clear why zero range isn't working, but we'll
4751             (mode & (FALLOC_FL_COLLAPSE_RANGE !! 4912          * leave it disabled for encrypted inodes for now.  This is a
                                                   >> 4913          * bug we should fix....
                                                   >> 4914          */
                                                   >> 4915         if (ext4_encrypted_inode(inode) &&
                                                   >> 4916             (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
                                                   >> 4917                      FALLOC_FL_ZERO_RANGE)))
4752                 return -EOPNOTSUPP;              4918                 return -EOPNOTSUPP;
4753                                                  4919 
4754         /* Return error if mode is not suppor    4920         /* Return error if mode is not supported */
4755         if (mode & ~(FALLOC_FL_KEEP_SIZE | FA    4921         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
4756                      FALLOC_FL_COLLAPSE_RANGE    4922                      FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
4757                      FALLOC_FL_INSERT_RANGE))    4923                      FALLOC_FL_INSERT_RANGE))
4758                 return -EOPNOTSUPP;              4924                 return -EOPNOTSUPP;
4759                                                  4925 
4760         inode_lock(inode);                    !! 4926         if (mode & FALLOC_FL_PUNCH_HOLE)
                                                   >> 4927                 return ext4_punch_hole(inode, offset, len);
                                                   >> 4928 
4761         ret = ext4_convert_inline_data(inode)    4929         ret = ext4_convert_inline_data(inode);
4762         inode_unlock(inode);                  << 
4763         if (ret)                                 4930         if (ret)
4764                 goto exit;                    !! 4931                 return ret;
4765                                                  4932 
4766         if (mode & FALLOC_FL_PUNCH_HOLE) {    !! 4933         if (mode & FALLOC_FL_COLLAPSE_RANGE)
4767                 ret = ext4_punch_hole(file, o !! 4934                 return ext4_collapse_range(inode, offset, len);
4768                 goto exit;                    << 
4769         }                                     << 
4770                                                  4935 
4771         if (mode & FALLOC_FL_COLLAPSE_RANGE)  !! 4936         if (mode & FALLOC_FL_INSERT_RANGE)
4772                 ret = ext4_collapse_range(fil !! 4937                 return ext4_insert_range(inode, offset, len);
4773                 goto exit;                    << 
4774         }                                     << 
4775                                                  4938 
4776         if (mode & FALLOC_FL_INSERT_RANGE) {  !! 4939         if (mode & FALLOC_FL_ZERO_RANGE)
4777                 ret = ext4_insert_range(file, !! 4940                 return ext4_zero_range(file, offset, len, mode);
4778                 goto exit;                    << 
4779         }                                     << 
4780                                                  4941 
4781         if (mode & FALLOC_FL_ZERO_RANGE) {    << 
4782                 ret = ext4_zero_range(file, o << 
4783                 goto exit;                    << 
4784         }                                     << 
4785         trace_ext4_fallocate_enter(inode, off    4942         trace_ext4_fallocate_enter(inode, offset, len, mode);
4786         lblk = offset >> blkbits;                4943         lblk = offset >> blkbits;
4787                                                  4944 
4788         max_blocks = EXT4_MAX_BLOCKS(len, off    4945         max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4789         flags = EXT4_GET_BLOCKS_CREATE_UNWRIT    4946         flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
                                                   >> 4947         if (mode & FALLOC_FL_KEEP_SIZE)
                                                   >> 4948                 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4790                                                  4949 
4791         inode_lock(inode);                       4950         inode_lock(inode);
4792                                                  4951 
4793         /*                                       4952         /*
4794          * We only support preallocation for     4953          * We only support preallocation for extent-based files only
4795          */                                      4954          */
4796         if (!(ext4_test_inode_flag(inode, EXT    4955         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4797                 ret = -EOPNOTSUPP;               4956                 ret = -EOPNOTSUPP;
4798                 goto out;                        4957                 goto out;
4799         }                                        4958         }
4800                                                  4959 
4801         if (!(mode & FALLOC_FL_KEEP_SIZE) &&     4960         if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4802             (offset + len > inode->i_size ||  !! 4961             (offset + len > i_size_read(inode) ||
4803              offset + len > EXT4_I(inode)->i_    4962              offset + len > EXT4_I(inode)->i_disksize)) {
4804                 new_size = offset + len;         4963                 new_size = offset + len;
4805                 ret = inode_newsize_ok(inode,    4964                 ret = inode_newsize_ok(inode, new_size);
4806                 if (ret)                         4965                 if (ret)
4807                         goto out;                4966                         goto out;
4808         }                                        4967         }
4809                                                  4968 
4810         /* Wait all existing dio workers, new !! 4969         /* Wait all existing dio workers, newcomers will block on i_mutex */
4811         inode_dio_wait(inode);                   4970         inode_dio_wait(inode);
4812                                                  4971 
4813         ret = file_modified(file);            << 
4814         if (ret)                              << 
4815                 goto out;                     << 
4816                                               << 
4817         ret = ext4_alloc_file_blocks(file, lb    4972         ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
4818         if (ret)                                 4973         if (ret)
4819                 goto out;                        4974                 goto out;
4820                                                  4975 
4821         if (file->f_flags & O_SYNC && EXT4_SB    4976         if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
4822                 ret = ext4_fc_commit(EXT4_SB( !! 4977                 ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
4823                                         EXT4_ !! 4978                                                 EXT4_I(inode)->i_sync_tid);
4824         }                                        4979         }
4825 out:                                             4980 out:
4826         inode_unlock(inode);                     4981         inode_unlock(inode);
4827         trace_ext4_fallocate_exit(inode, offs    4982         trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4828 exit:                                         << 
4829         return ret;                              4983         return ret;
4830 }                                                4984 }
4831                                                  4985 
4832 /*                                               4986 /*
4833  * This function convert a range of blocks to    4987  * This function convert a range of blocks to written extents
4834  * The caller of this function will pass the     4988  * The caller of this function will pass the start offset and the size.
4835  * all unwritten extents within this range wi    4989  * all unwritten extents within this range will be converted to
4836  * written extents.                              4990  * written extents.
4837  *                                               4991  *
4838  * This function is called from the direct IO    4992  * This function is called from the direct IO end io call back
4839  * function, to convert the fallocated extent    4993  * function, to convert the fallocated extents after IO is completed.
4840  * Returns 0 on success.                         4994  * Returns 0 on success.
4841  */                                              4995  */
4842 int ext4_convert_unwritten_extents(handle_t *    4996 int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
4843                                    loff_t off    4997                                    loff_t offset, ssize_t len)
4844 {                                                4998 {
4845         unsigned int max_blocks;                 4999         unsigned int max_blocks;
4846         int ret = 0, ret2 = 0, ret3 = 0;      !! 5000         int ret = 0;
                                                   >> 5001         int ret2 = 0;
4847         struct ext4_map_blocks map;              5002         struct ext4_map_blocks map;
4848         unsigned int blkbits = inode->i_blkbi !! 5003         unsigned int credits, blkbits = inode->i_blkbits;
4849         unsigned int credits = 0;             << 
4850                                                  5004 
4851         map.m_lblk = offset >> blkbits;          5005         map.m_lblk = offset >> blkbits;
4852         max_blocks = EXT4_MAX_BLOCKS(len, off    5006         max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4853                                                  5007 
4854         if (!handle) {                        !! 5008         /*
                                                   >> 5009          * This is somewhat ugly but the idea is clear: When transaction is
                                                   >> 5010          * reserved, everything goes into it. Otherwise we rather start several
                                                   >> 5011          * smaller transactions for conversion of each extent separately.
                                                   >> 5012          */
                                                   >> 5013         if (handle) {
                                                   >> 5014                 handle = ext4_journal_start_reserved(handle,
                                                   >> 5015                                                      EXT4_HT_EXT_CONVERT);
                                                   >> 5016                 if (IS_ERR(handle))
                                                   >> 5017                         return PTR_ERR(handle);
                                                   >> 5018                 credits = 0;
                                                   >> 5019         } else {
4855                 /*                               5020                 /*
4856                  * credits to insert 1 extent    5021                  * credits to insert 1 extent into extent tree
4857                  */                              5022                  */
4858                 credits = ext4_chunk_trans_bl    5023                 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4859         }                                        5024         }
4860         while (ret >= 0 && ret < max_blocks)     5025         while (ret >= 0 && ret < max_blocks) {
4861                 map.m_lblk += ret;               5026                 map.m_lblk += ret;
4862                 map.m_len = (max_blocks -= re    5027                 map.m_len = (max_blocks -= ret);
4863                 if (credits) {                   5028                 if (credits) {
4864                         handle = ext4_journal    5029                         handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4865                                                  5030                                                     credits);
4866                         if (IS_ERR(handle)) {    5031                         if (IS_ERR(handle)) {
4867                                 ret = PTR_ERR    5032                                 ret = PTR_ERR(handle);
4868                                 break;           5033                                 break;
4869                         }                        5034                         }
4870                 }                                5035                 }
4871                 ret = ext4_map_blocks(handle,    5036                 ret = ext4_map_blocks(handle, inode, &map,
4872                                       EXT4_GE    5037                                       EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4873                 if (ret <= 0)                    5038                 if (ret <= 0)
4874                         ext4_warning(inode->i    5039                         ext4_warning(inode->i_sb,
4875                                      "inode #    5040                                      "inode #%lu: block %u: len %u: "
4876                                      "ext4_ex    5041                                      "ext4_ext_map_blocks returned %d",
4877                                      inode->i    5042                                      inode->i_ino, map.m_lblk,
4878                                      map.m_le    5043                                      map.m_len, ret);
4879                 ret2 = ext4_mark_inode_dirty( !! 5044                 ext4_mark_inode_dirty(handle, inode);
4880                 if (credits) {                !! 5045                 if (credits)
4881                         ret3 = ext4_journal_s !! 5046                         ret2 = ext4_journal_stop(handle);
4882                         if (unlikely(ret3))   << 
4883                                 ret2 = ret3;  << 
4884                 }                             << 
4885                                               << 
4886                 if (ret <= 0 || ret2)            5047                 if (ret <= 0 || ret2)
4887                         break;                   5048                         break;
4888         }                                        5049         }
                                                   >> 5050         if (!credits)
                                                   >> 5051                 ret2 = ext4_journal_stop(handle);
4889         return ret > 0 ? ret2 : ret;             5052         return ret > 0 ? ret2 : ret;
4890 }                                                5053 }
4891                                                  5054 
4892 int ext4_convert_unwritten_io_end_vec(handle_ !! 5055 /*
                                                   >> 5056  * If newes is not existing extent (newes->ec_pblk equals zero) find
                                                   >> 5057  * delayed extent at start of newes and update newes accordingly and
                                                   >> 5058  * return start of the next delayed extent.
                                                   >> 5059  *
                                                   >> 5060  * If newes is existing extent (newes->ec_pblk is not equal zero)
                                                   >> 5061  * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
                                                   >> 5062  * extent found. Leave newes unmodified.
                                                   >> 5063  */
                                                   >> 5064 static int ext4_find_delayed_extent(struct inode *inode,
                                                   >> 5065                                     struct extent_status *newes)
4893 {                                                5066 {
4894         int ret = 0, err = 0;                 !! 5067         struct extent_status es;
4895         struct ext4_io_end_vec *io_end_vec;   !! 5068         ext4_lblk_t block, next_del;
4896                                                  5069 
4897         /*                                    !! 5070         if (newes->es_pblk == 0) {
4898          * This is somewhat ugly but the idea !! 5071                 ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
4899          * reserved, everything goes into it. !! 5072                                 newes->es_lblk + newes->es_len - 1, &es);
4900          * smaller transactions for conversio << 
4901          */                                   << 
4902         if (handle) {                         << 
4903                 handle = ext4_journal_start_r << 
4904                                               << 
4905                 if (IS_ERR(handle))           << 
4906                         return PTR_ERR(handle << 
4907         }                                     << 
4908                                                  5073 
4909         list_for_each_entry(io_end_vec, &io_e !! 5074                 /*
4910                 ret = ext4_convert_unwritten_ !! 5075                  * No extent in extent-tree contains block @newes->es_pblk,
4911                                               !! 5076                  * then the block may stay in 1)a hole or 2)delayed-extent.
4912                                               !! 5077                  */
4913                 if (ret)                      !! 5078                 if (es.es_len == 0)
4914                         break;                !! 5079                         /* A hole found. */
                                                   >> 5080                         return 0;
                                                   >> 5081 
                                                   >> 5082                 if (es.es_lblk > newes->es_lblk) {
                                                   >> 5083                         /* A hole found. */
                                                   >> 5084                         newes->es_len = min(es.es_lblk - newes->es_lblk,
                                                   >> 5085                                             newes->es_len);
                                                   >> 5086                         return 0;
                                                   >> 5087                 }
                                                   >> 5088 
                                                   >> 5089                 newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
4915         }                                        5090         }
4916                                                  5091 
4917         if (handle)                           !! 5092         block = newes->es_lblk + newes->es_len;
4918                 err = ext4_journal_stop(handl !! 5093         ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
                                                   >> 5094         if (es.es_len == 0)
                                                   >> 5095                 next_del = EXT_MAX_BLOCKS;
                                                   >> 5096         else
                                                   >> 5097                 next_del = es.es_lblk;
4919                                                  5098 
4920         return ret < 0 ? ret : err;           !! 5099         return next_del;
4921 }                                                5100 }
                                                   >> 5101 /* fiemap flags we can handle specified here */
                                                   >> 5102 #define EXT4_FIEMAP_FLAGS       (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4922                                                  5103 
4923 static int ext4_iomap_xattr_fiemap(struct ino !! 5104 static int ext4_xattr_fiemap(struct inode *inode,
                                                   >> 5105                                 struct fiemap_extent_info *fieinfo)
4924 {                                                5106 {
4925         __u64 physical = 0;                      5107         __u64 physical = 0;
4926         __u64 length = 0;                     !! 5108         __u64 length;
                                                   >> 5109         __u32 flags = FIEMAP_EXTENT_LAST;
4927         int blockbits = inode->i_sb->s_blocks    5110         int blockbits = inode->i_sb->s_blocksize_bits;
4928         int error = 0;                           5111         int error = 0;
4929         u16 iomap_type;                       << 
4930                                                  5112 
4931         /* in-inode? */                          5113         /* in-inode? */
4932         if (ext4_test_inode_state(inode, EXT4    5114         if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4933                 struct ext4_iloc iloc;           5115                 struct ext4_iloc iloc;
4934                 int offset;     /* offset of     5116                 int offset;     /* offset of xattr in inode */
4935                                                  5117 
4936                 error = ext4_get_inode_loc(in    5118                 error = ext4_get_inode_loc(inode, &iloc);
4937                 if (error)                       5119                 if (error)
4938                         return error;            5120                         return error;
4939                 physical = (__u64)iloc.bh->b_    5121                 physical = (__u64)iloc.bh->b_blocknr << blockbits;
4940                 offset = EXT4_GOOD_OLD_INODE_    5122                 offset = EXT4_GOOD_OLD_INODE_SIZE +
4941                                 EXT4_I(inode)    5123                                 EXT4_I(inode)->i_extra_isize;
4942                 physical += offset;              5124                 physical += offset;
4943                 length = EXT4_SB(inode->i_sb)    5125                 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
                                                   >> 5126                 flags |= FIEMAP_EXTENT_DATA_INLINE;
4944                 brelse(iloc.bh);                 5127                 brelse(iloc.bh);
4945                 iomap_type = IOMAP_INLINE;    !! 5128         } else { /* external block */
4946         } else if (EXT4_I(inode)->i_file_acl) << 
4947                 physical = (__u64)EXT4_I(inod    5129                 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
4948                 length = inode->i_sb->s_block    5130                 length = inode->i_sb->s_blocksize;
4949                 iomap_type = IOMAP_MAPPED;    << 
4950         } else {                              << 
4951                 /* no in-inode or external bl << 
4952                 error = -ENOENT;              << 
4953                 goto out;                     << 
4954         }                                        5131         }
4955                                                  5132 
4956         iomap->addr = physical;               !! 5133         if (physical)
4957         iomap->offset = 0;                    !! 5134                 error = fiemap_fill_next_extent(fieinfo, 0, physical,
4958         iomap->length = length;               !! 5135                                                 length, flags);
4959         iomap->type = iomap_type;             !! 5136         return (error < 0 ? error : 0);
4960         iomap->flags = 0;                     << 
4961 out:                                          << 
4962         return error;                         << 
4963 }                                             << 
4964                                               << 
4965 static int ext4_iomap_xattr_begin(struct inod << 
4966                                   loff_t leng << 
4967                                   struct ioma << 
4968 {                                             << 
4969         int error;                            << 
4970                                               << 
4971         error = ext4_iomap_xattr_fiemap(inode << 
4972         if (error == 0 && (offset >= iomap->l << 
4973                 error = -ENOENT;              << 
4974         return error;                         << 
4975 }                                                5137 }
4976                                                  5138 
4977 static const struct iomap_ops ext4_iomap_xatt !! 5139 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4978         .iomap_begin            = ext4_iomap_ !! 5140                 __u64 start, __u64 len)
4979 };                                            << 
4980                                               << 
4981 static int ext4_fiemap_check_ranges(struct in << 
4982 {                                                5141 {
4983         u64 maxbytes;                         !! 5142         ext4_lblk_t start_blk;
4984                                               !! 5143         int error = 0;
4985         if (ext4_test_inode_flag(inode, EXT4_ << 
4986                 maxbytes = inode->i_sb->s_max << 
4987         else                                  << 
4988                 maxbytes = EXT4_SB(inode->i_s << 
4989                                                  5144 
4990         if (*len == 0)                        !! 5145         if (ext4_has_inline_data(inode)) {
4991                 return -EINVAL;               !! 5146                 int has_inline = 1;
4992         if (start > maxbytes)                 << 
4993                 return -EFBIG;                << 
4994                                                  5147 
4995         /*                                    !! 5148                 error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline,
4996          * Shrink request scope to what the f !! 5149                                                 start, len);
4997          */                                   << 
4998         if (*len > maxbytes || (maxbytes - *l << 
4999                 *len = maxbytes - start;      << 
5000         return 0;                             << 
5001 }                                             << 
5002                                                  5150 
5003 int ext4_fiemap(struct inode *inode, struct f !! 5151                 if (has_inline)
5004                 u64 start, u64 len)           !! 5152                         return error;
5005 {                                             !! 5153         }
5006         int error = 0;                        << 
5007                                                  5154 
5008         if (fieinfo->fi_flags & FIEMAP_FLAG_C    5155         if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
5009                 error = ext4_ext_precache(ino    5156                 error = ext4_ext_precache(inode);
5010                 if (error)                       5157                 if (error)
5011                         return error;            5158                         return error;
5012                 fieinfo->fi_flags &= ~FIEMAP_ << 
5013         }                                        5159         }
5014                                                  5160 
5015         /*                                    !! 5161         /* fallback to generic here if not in extents fmt */
5016          * For bitmap files the maximum size  !! 5162         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5017          * s_maxbytes, so check len here manu !! 5163                 return generic_block_fiemap(inode, fieinfo, start, len,
5018          * generic check.                     !! 5164                         ext4_get_block);
5019          */                                   !! 5165 
5020         error = ext4_fiemap_check_ranges(inod !! 5166         if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
5021         if (error)                            !! 5167                 return -EBADR;
5022                 return error;                 << 
5023                                                  5168 
5024         if (fieinfo->fi_flags & FIEMAP_FLAG_X    5169         if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
5025                 fieinfo->fi_flags &= ~FIEMAP_ !! 5170                 error = ext4_xattr_fiemap(inode, fieinfo);
5026                 return iomap_fiemap(inode, fi !! 5171         } else {
5027                                     &ext4_iom !! 5172                 ext4_lblk_t len_blks;
5028         }                                     !! 5173                 __u64 last_blk;
                                                   >> 5174 
                                                   >> 5175                 start_blk = start >> inode->i_sb->s_blocksize_bits;
                                                   >> 5176                 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
                                                   >> 5177                 if (last_blk >= EXT_MAX_BLOCKS)
                                                   >> 5178                         last_blk = EXT_MAX_BLOCKS-1;
                                                   >> 5179                 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
5029                                                  5180 
5030         return iomap_fiemap(inode, fieinfo, s !! 5181                 /*
                                                   >> 5182                  * Walk the extent tree gathering extent information
                                                   >> 5183                  * and pushing extents back to the user.
                                                   >> 5184                  */
                                                   >> 5185                 error = ext4_fill_fiemap_extents(inode, start_blk,
                                                   >> 5186                                                  len_blks, fieinfo);
                                                   >> 5187         }
                                                   >> 5188         return error;
5031 }                                                5189 }
5032                                                  5190 
5033 int ext4_get_es_cache(struct inode *inode, st !! 5191 /*
5034                       __u64 start, __u64 len) !! 5192  * ext4_access_path:
                                                   >> 5193  * Function to access the path buffer for marking it dirty.
                                                   >> 5194  * It also checks if there are sufficient credits left in the journal handle
                                                   >> 5195  * to update path.
                                                   >> 5196  */
                                                   >> 5197 static int
                                                   >> 5198 ext4_access_path(handle_t *handle, struct inode *inode,
                                                   >> 5199                 struct ext4_ext_path *path)
5035 {                                                5200 {
5036         ext4_lblk_t start_blk, len_blks;      !! 5201         int credits, err;
5037         __u64 last_blk;                       << 
5038         int error = 0;                        << 
5039                                                  5202 
5040         if (ext4_has_inline_data(inode)) {    !! 5203         if (!ext4_handle_valid(handle))
5041                 int has_inline;               !! 5204                 return 0;
5042                                               << 
5043                 down_read(&EXT4_I(inode)->xat << 
5044                 has_inline = ext4_has_inline_ << 
5045                 up_read(&EXT4_I(inode)->xattr << 
5046                 if (has_inline)               << 
5047                         return 0;             << 
5048         }                                     << 
5049                                                  5205 
5050         if (fieinfo->fi_flags & FIEMAP_FLAG_C !! 5206         /*
5051                 error = ext4_ext_precache(ino !! 5207          * Check if need to extend journal credits
5052                 if (error)                    !! 5208          * 3 for leaf, sb, and inode plus 2 (bmap and group
5053                         return error;         !! 5209          * descriptor) for each block group; assume two block
5054                 fieinfo->fi_flags &= ~FIEMAP_ !! 5210          * groups
                                                   >> 5211          */
                                                   >> 5212         if (handle->h_buffer_credits < 7) {
                                                   >> 5213                 credits = ext4_writepage_trans_blocks(inode);
                                                   >> 5214                 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
                                                   >> 5215                 /* EAGAIN is success */
                                                   >> 5216                 if (err && err != -EAGAIN)
                                                   >> 5217                         return err;
5055         }                                        5218         }
5056                                                  5219 
5057         error = fiemap_prep(inode, fieinfo, s !! 5220         err = ext4_ext_get_access(handle, inode, path);
5058         if (error)                            !! 5221         return err;
5059                 return error;                 << 
5060                                               << 
5061         error = ext4_fiemap_check_ranges(inod << 
5062         if (error)                            << 
5063                 return error;                 << 
5064                                               << 
5065         start_blk = start >> inode->i_sb->s_b << 
5066         last_blk = (start + len - 1) >> inode << 
5067         if (last_blk >= EXT_MAX_BLOCKS)       << 
5068                 last_blk = EXT_MAX_BLOCKS-1;  << 
5069         len_blks = ((ext4_lblk_t) last_blk) - << 
5070                                               << 
5071         /*                                    << 
5072          * Walk the extent tree gathering ext << 
5073          * and pushing extents back to the us << 
5074          */                                   << 
5075         return ext4_fill_es_cache_info(inode, << 
5076 }                                                5222 }
5077                                                  5223 
5078 /*                                               5224 /*
5079  * ext4_ext_shift_path_extents:                  5225  * ext4_ext_shift_path_extents:
5080  * Shift the extents of a path structure lyin    5226  * Shift the extents of a path structure lying between path[depth].p_ext
5081  * and EXT_LAST_EXTENT(path[depth].p_hdr), by    5227  * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells
5082  * if it is right shift or left shift operati    5228  * if it is right shift or left shift operation.
5083  */                                              5229  */
5084 static int                                       5230 static int
5085 ext4_ext_shift_path_extents(struct ext4_ext_p    5231 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
5086                             struct inode *ino    5232                             struct inode *inode, handle_t *handle,
5087                             enum SHIFT_DIRECT    5233                             enum SHIFT_DIRECTION SHIFT)
5088 {                                                5234 {
5089         int depth, err = 0;                      5235         int depth, err = 0;
5090         struct ext4_extent *ex_start, *ex_las    5236         struct ext4_extent *ex_start, *ex_last;
5091         bool update = false;                  !! 5237         bool update = 0;
5092         int credits, restart_credits;         << 
5093         depth = path->p_depth;                   5238         depth = path->p_depth;
5094                                                  5239 
5095         while (depth >= 0) {                     5240         while (depth >= 0) {
5096                 if (depth == path->p_depth) {    5241                 if (depth == path->p_depth) {
5097                         ex_start = path[depth    5242                         ex_start = path[depth].p_ext;
5098                         if (!ex_start)           5243                         if (!ex_start)
5099                                 return -EFSCO    5244                                 return -EFSCORRUPTED;
5100                                                  5245 
5101                         ex_last = EXT_LAST_EX    5246                         ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
5102                         /* leaf + sb + inode  << 
5103                         credits = 3;          << 
5104                         if (ex_start == EXT_F << 
5105                                 update = true << 
5106                                 /* extent tre << 
5107                                 credits = dep << 
5108                         }                     << 
5109                                               << 
5110                         restart_credits = ext << 
5111                         err = ext4_datasem_en << 
5112                                         resta << 
5113                         if (err) {            << 
5114                                 if (err > 0)  << 
5115                                         err = << 
5116                                 goto out;     << 
5117                         }                     << 
5118                                                  5247 
5119                         err = ext4_ext_get_ac !! 5248                         err = ext4_access_path(handle, inode, path + depth);
5120                         if (err)                 5249                         if (err)
5121                                 goto out;        5250                                 goto out;
5122                                                  5251 
                                                   >> 5252                         if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
                                                   >> 5253                                 update = 1;
                                                   >> 5254 
5123                         while (ex_start <= ex    5255                         while (ex_start <= ex_last) {
5124                                 if (SHIFT ==     5256                                 if (SHIFT == SHIFT_LEFT) {
5125                                         le32_    5257                                         le32_add_cpu(&ex_start->ee_block,
5126                                                  5258                                                 -shift);
5127                                         /* Tr    5259                                         /* Try to merge to the left. */
5128                                         if ((    5260                                         if ((ex_start >
5129                                             E    5261                                             EXT_FIRST_EXTENT(path[depth].p_hdr))
5130                                             &    5262                                             &&
5131                                             e    5263                                             ext4_ext_try_to_merge_right(inode,
5132                                             p    5264                                             path, ex_start - 1))
5133                                                  5265                                                 ex_last--;
5134                                         else     5266                                         else
5135                                                  5267                                                 ex_start++;
5136                                 } else {         5268                                 } else {
5137                                         le32_    5269                                         le32_add_cpu(&ex_last->ee_block, shift);
5138                                         ext4_    5270                                         ext4_ext_try_to_merge_right(inode, path,
5139                                                  5271                                                 ex_last);
5140                                         ex_la    5272                                         ex_last--;
5141                                 }                5273                                 }
5142                         }                        5274                         }
5143                         err = ext4_ext_dirty(    5275                         err = ext4_ext_dirty(handle, inode, path + depth);
5144                         if (err)                 5276                         if (err)
5145                                 goto out;        5277                                 goto out;
5146                                                  5278 
5147                         if (--depth < 0 || !u    5279                         if (--depth < 0 || !update)
5148                                 break;           5280                                 break;
5149                 }                                5281                 }
5150                                                  5282 
5151                 /* Update index too */           5283                 /* Update index too */
5152                 err = ext4_ext_get_access(han !! 5284                 err = ext4_access_path(handle, inode, path + depth);
5153                 if (err)                         5285                 if (err)
5154                         goto out;                5286                         goto out;
5155                                                  5287 
5156                 if (SHIFT == SHIFT_LEFT)         5288                 if (SHIFT == SHIFT_LEFT)
5157                         le32_add_cpu(&path[de    5289                         le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
5158                 else                             5290                 else
5159                         le32_add_cpu(&path[de    5291                         le32_add_cpu(&path[depth].p_idx->ei_block, shift);
5160                 err = ext4_ext_dirty(handle,     5292                 err = ext4_ext_dirty(handle, inode, path + depth);
5161                 if (err)                         5293                 if (err)
5162                         goto out;                5294                         goto out;
5163                                                  5295 
5164                 /* we are done if current ind    5296                 /* we are done if current index is not a starting index */
5165                 if (path[depth].p_idx != EXT_    5297                 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
5166                         break;                   5298                         break;
5167                                                  5299 
5168                 depth--;                         5300                 depth--;
5169         }                                        5301         }
5170                                                  5302 
5171 out:                                             5303 out:
5172         return err;                              5304         return err;
5173 }                                                5305 }
5174                                                  5306 
5175 /*                                               5307 /*
5176  * ext4_ext_shift_extents:                       5308  * ext4_ext_shift_extents:
5177  * All the extents which lies in the range fr    5309  * All the extents which lies in the range from @start to the last allocated
5178  * block for the @inode are shifted either to    5310  * block for the @inode are shifted either towards left or right (depending
5179  * upon @SHIFT) by @shift blocks.                5311  * upon @SHIFT) by @shift blocks.
5180  * On success, 0 is returned, error otherwise    5312  * On success, 0 is returned, error otherwise.
5181  */                                              5313  */
5182 static int                                       5314 static int
5183 ext4_ext_shift_extents(struct inode *inode, h    5315 ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5184                        ext4_lblk_t start, ext    5316                        ext4_lblk_t start, ext4_lblk_t shift,
5185                        enum SHIFT_DIRECTION S    5317                        enum SHIFT_DIRECTION SHIFT)
5186 {                                                5318 {
5187         struct ext4_ext_path *path;              5319         struct ext4_ext_path *path;
5188         int ret = 0, depth;                      5320         int ret = 0, depth;
5189         struct ext4_extent *extent;              5321         struct ext4_extent *extent;
5190         ext4_lblk_t stop, *iterator, ex_start    5322         ext4_lblk_t stop, *iterator, ex_start, ex_end;
5191         ext4_lblk_t tmp = EXT_MAX_BLOCKS;     << 
5192                                                  5323 
5193         /* Let path point to the last extent     5324         /* Let path point to the last extent */
5194         path = ext4_find_extent(inode, EXT_MA    5325         path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5195                                 EXT4_EX_NOCAC    5326                                 EXT4_EX_NOCACHE);
5196         if (IS_ERR(path))                        5327         if (IS_ERR(path))
5197                 return PTR_ERR(path);            5328                 return PTR_ERR(path);
5198                                                  5329 
5199         depth = path->p_depth;                   5330         depth = path->p_depth;
5200         extent = path[depth].p_ext;              5331         extent = path[depth].p_ext;
5201         if (!extent)                             5332         if (!extent)
5202                 goto out;                        5333                 goto out;
5203                                                  5334 
5204         stop = le32_to_cpu(extent->ee_block);    5335         stop = le32_to_cpu(extent->ee_block);
5205                                                  5336 
5206        /*                                        5337        /*
5207         * For left shifts, make sure the hole    5338         * For left shifts, make sure the hole on the left is big enough to
5208         * accommodate the shift.  For right s    5339         * accommodate the shift.  For right shifts, make sure the last extent
5209         * won't be shifted beyond EXT_MAX_BLO    5340         * won't be shifted beyond EXT_MAX_BLOCKS.
5210         */                                       5341         */
5211         if (SHIFT == SHIFT_LEFT) {               5342         if (SHIFT == SHIFT_LEFT) {
5212                 path = ext4_find_extent(inode !! 5343                 path = ext4_find_extent(inode, start - 1, &path,
5213                                         EXT4_    5344                                         EXT4_EX_NOCACHE);
5214                 if (IS_ERR(path))                5345                 if (IS_ERR(path))
5215                         return PTR_ERR(path);    5346                         return PTR_ERR(path);
5216                 depth = path->p_depth;           5347                 depth = path->p_depth;
5217                 extent =  path[depth].p_ext;     5348                 extent =  path[depth].p_ext;
5218                 if (extent) {                    5349                 if (extent) {
5219                         ex_start = le32_to_cp    5350                         ex_start = le32_to_cpu(extent->ee_block);
5220                         ex_end = le32_to_cpu(    5351                         ex_end = le32_to_cpu(extent->ee_block) +
5221                                 ext4_ext_get_    5352                                 ext4_ext_get_actual_len(extent);
5222                 } else {                         5353                 } else {
5223                         ex_start = 0;            5354                         ex_start = 0;
5224                         ex_end = 0;              5355                         ex_end = 0;
5225                 }                                5356                 }
5226                                                  5357 
5227                 if ((start == ex_start && shi    5358                 if ((start == ex_start && shift > ex_start) ||
5228                     (shift > start - ex_end))    5359                     (shift > start - ex_end)) {
5229                         ret = -EINVAL;           5360                         ret = -EINVAL;
5230                         goto out;                5361                         goto out;
5231                 }                                5362                 }
5232         } else {                                 5363         } else {
5233                 if (shift > EXT_MAX_BLOCKS -     5364                 if (shift > EXT_MAX_BLOCKS -
5234                     (stop + ext4_ext_get_actu    5365                     (stop + ext4_ext_get_actual_len(extent))) {
5235                         ret = -EINVAL;           5366                         ret = -EINVAL;
5236                         goto out;                5367                         goto out;
5237                 }                                5368                 }
5238         }                                        5369         }
5239                                                  5370 
5240         /*                                       5371         /*
5241          * In case of left shift, iterator po    5372          * In case of left shift, iterator points to start and it is increased
5242          * till we reach stop. In case of rig    5373          * till we reach stop. In case of right shift, iterator points to stop
5243          * and it is decreased till we reach     5374          * and it is decreased till we reach start.
5244          */                                      5375          */
5245 again:                                        << 
5246         ret = 0;                              << 
5247         if (SHIFT == SHIFT_LEFT)                 5376         if (SHIFT == SHIFT_LEFT)
5248                 iterator = &start;               5377                 iterator = &start;
5249         else                                     5378         else
5250                 iterator = &stop;                5379                 iterator = &stop;
5251                                                  5380 
5252         if (tmp != EXT_MAX_BLOCKS)            << 
5253                 *iterator = tmp;              << 
5254                                               << 
5255         /*                                       5381         /*
5256          * Its safe to start updating extents    5382          * Its safe to start updating extents.  Start and stop are unsigned, so
5257          * in case of right shift if extent w    5383          * in case of right shift if extent with 0 block is reached, iterator
5258          * becomes NULL to indicate the end o    5384          * becomes NULL to indicate the end of the loop.
5259          */                                      5385          */
5260         while (iterator && start <= stop) {      5386         while (iterator && start <= stop) {
5261                 path = ext4_find_extent(inode !! 5387                 path = ext4_find_extent(inode, *iterator, &path,
5262                                         EXT4_    5388                                         EXT4_EX_NOCACHE);
5263                 if (IS_ERR(path))                5389                 if (IS_ERR(path))
5264                         return PTR_ERR(path);    5390                         return PTR_ERR(path);
5265                 depth = path->p_depth;           5391                 depth = path->p_depth;
5266                 extent = path[depth].p_ext;      5392                 extent = path[depth].p_ext;
5267                 if (!extent) {                   5393                 if (!extent) {
5268                         EXT4_ERROR_INODE(inod    5394                         EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
5269                                          (uns    5395                                          (unsigned long) *iterator);
5270                         return -EFSCORRUPTED;    5396                         return -EFSCORRUPTED;
5271                 }                                5397                 }
5272                 if (SHIFT == SHIFT_LEFT && *i    5398                 if (SHIFT == SHIFT_LEFT && *iterator >
5273                     le32_to_cpu(extent->ee_bl    5399                     le32_to_cpu(extent->ee_block)) {
5274                         /* Hole, move to the     5400                         /* Hole, move to the next extent */
5275                         if (extent < EXT_LAST    5401                         if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
5276                                 path[depth].p    5402                                 path[depth].p_ext++;
5277                         } else {                 5403                         } else {
5278                                 *iterator = e    5404                                 *iterator = ext4_ext_next_allocated_block(path);
5279                                 continue;        5405                                 continue;
5280                         }                        5406                         }
5281                 }                                5407                 }
5282                                                  5408 
5283                 tmp = *iterator;              << 
5284                 if (SHIFT == SHIFT_LEFT) {       5409                 if (SHIFT == SHIFT_LEFT) {
5285                         extent = EXT_LAST_EXT    5410                         extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5286                         *iterator = le32_to_c    5411                         *iterator = le32_to_cpu(extent->ee_block) +
5287                                         ext4_    5412                                         ext4_ext_get_actual_len(extent);
5288                 } else {                         5413                 } else {
5289                         extent = EXT_FIRST_EX    5414                         extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
5290                         if (le32_to_cpu(exten !! 5415                         if (le32_to_cpu(extent->ee_block) > 0)
5291                                 *iterator = l    5416                                 *iterator = le32_to_cpu(extent->ee_block) - 1;
5292                         else if (le32_to_cpu( !! 5417                         else
                                                   >> 5418                                 /* Beginning is reached, end of the loop */
5293                                 iterator = NU    5419                                 iterator = NULL;
5294                         else {                !! 5420                         /* Update path extent in case we need to stop */
5295                                 extent = EXT_ !! 5421                         while (le32_to_cpu(extent->ee_block) < start)
5296                                 while (le32_t << 
5297                                         exten << 
5298                                               << 
5299                                 if (extent == << 
5300                                         break << 
5301                                               << 
5302                                 extent++;        5422                                 extent++;
5303                                 iterator = NU << 
5304                         }                     << 
5305                         path[depth].p_ext = e    5423                         path[depth].p_ext = extent;
5306                 }                                5424                 }
5307                 ret = ext4_ext_shift_path_ext    5425                 ret = ext4_ext_shift_path_extents(path, shift, inode,
5308                                 handle, SHIFT    5426                                 handle, SHIFT);
5309                 /* iterator can be NULL which << 
5310                 if (ret == -EAGAIN)           << 
5311                         goto again;           << 
5312                 if (ret)                         5427                 if (ret)
5313                         break;                   5428                         break;
5314         }                                        5429         }
5315 out:                                             5430 out:
5316         ext4_free_ext_path(path);             !! 5431         ext4_ext_drop_refs(path);
                                                   >> 5432         kfree(path);
5317         return ret;                              5433         return ret;
5318 }                                                5434 }
5319                                                  5435 
5320 /*                                               5436 /*
5321  * ext4_collapse_range:                          5437  * ext4_collapse_range:
5322  * This implements the fallocate's collapse r    5438  * This implements the fallocate's collapse range functionality for ext4
5323  * Returns: 0 and non-zero on error.             5439  * Returns: 0 and non-zero on error.
5324  */                                              5440  */
5325 static int ext4_collapse_range(struct file *f !! 5441 int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
5326 {                                                5442 {
5327         struct inode *inode = file_inode(file << 
5328         struct super_block *sb = inode->i_sb;    5443         struct super_block *sb = inode->i_sb;
5329         struct address_space *mapping = inode << 
5330         ext4_lblk_t punch_start, punch_stop;     5444         ext4_lblk_t punch_start, punch_stop;
5331         handle_t *handle;                        5445         handle_t *handle;
5332         unsigned int credits;                    5446         unsigned int credits;
5333         loff_t new_size, ioffset;                5447         loff_t new_size, ioffset;
5334         int ret;                                 5448         int ret;
5335                                                  5449 
5336         /*                                       5450         /*
5337          * We need to test this early because    5451          * We need to test this early because xfstests assumes that a
5338          * collapse range of (0, 1) will retu    5452          * collapse range of (0, 1) will return EOPNOTSUPP if the file
5339          * system does not support collapse r    5453          * system does not support collapse range.
5340          */                                      5454          */
5341         if (!ext4_test_inode_flag(inode, EXT4    5455         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5342                 return -EOPNOTSUPP;              5456                 return -EOPNOTSUPP;
5343                                                  5457 
5344         /* Collapse range works only on fs cl !! 5458         /* Collapse range works only on fs block size aligned offsets. */
5345         if (!IS_ALIGNED(offset | len, EXT4_CL !! 5459         if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
                                                   >> 5460             len & (EXT4_CLUSTER_SIZE(sb) - 1))
                                                   >> 5461                 return -EINVAL;
                                                   >> 5462 
                                                   >> 5463         if (!S_ISREG(inode->i_mode))
5346                 return -EINVAL;                  5464                 return -EINVAL;
5347                                                  5465 
5348         trace_ext4_collapse_range(inode, offs    5466         trace_ext4_collapse_range(inode, offset, len);
5349                                                  5467 
5350         punch_start = offset >> EXT4_BLOCK_SI    5468         punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5351         punch_stop = (offset + len) >> EXT4_B    5469         punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
5352                                                  5470 
                                                   >> 5471         /* Call ext4_force_commit to flush all data in case of data=journal. */
                                                   >> 5472         if (ext4_should_journal_data(inode)) {
                                                   >> 5473                 ret = ext4_force_commit(inode->i_sb);
                                                   >> 5474                 if (ret)
                                                   >> 5475                         return ret;
                                                   >> 5476         }
                                                   >> 5477 
5353         inode_lock(inode);                       5478         inode_lock(inode);
5354         /*                                       5479         /*
5355          * There is no need to overlap collap    5480          * There is no need to overlap collapse range with EOF, in which case
5356          * it is effectively a truncate opera    5481          * it is effectively a truncate operation
5357          */                                      5482          */
5358         if (offset + len >= inode->i_size) {  !! 5483         if (offset + len >= i_size_read(inode)) {
5359                 ret = -EINVAL;                   5484                 ret = -EINVAL;
5360                 goto out_mutex;                  5485                 goto out_mutex;
5361         }                                        5486         }
5362                                                  5487 
5363         /* Currently just for extent based fi    5488         /* Currently just for extent based files */
5364         if (!ext4_test_inode_flag(inode, EXT4    5489         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5365                 ret = -EOPNOTSUPP;               5490                 ret = -EOPNOTSUPP;
5366                 goto out_mutex;                  5491                 goto out_mutex;
5367         }                                        5492         }
5368                                                  5493 
5369         /* Wait for existing dio to complete     5494         /* Wait for existing dio to complete */
5370         inode_dio_wait(inode);                   5495         inode_dio_wait(inode);
5371                                                  5496 
5372         ret = file_modified(file);            << 
5373         if (ret)                              << 
5374                 goto out_mutex;               << 
5375                                               << 
5376         /*                                       5497         /*
5377          * Prevent page faults from reinstant    5498          * Prevent page faults from reinstantiating pages we have released from
5378          * page cache.                           5499          * page cache.
5379          */                                      5500          */
5380         filemap_invalidate_lock(mapping);     !! 5501         down_write(&EXT4_I(inode)->i_mmap_sem);
5381                                               << 
5382         ret = ext4_break_layouts(inode);      << 
5383         if (ret)                              << 
5384                 goto out_mmap;                << 
5385                                               << 
5386         /*                                       5502         /*
5387          * Need to round down offset to be al    5503          * Need to round down offset to be aligned with page size boundary
5388          * for page size > block size.           5504          * for page size > block size.
5389          */                                      5505          */
5390         ioffset = round_down(offset, PAGE_SIZ    5506         ioffset = round_down(offset, PAGE_SIZE);
5391         /*                                       5507         /*
5392          * Write tail of the last page before    5508          * Write tail of the last page before removed range since it will get
5393          * removed from the page cache below.    5509          * removed from the page cache below.
5394          */                                      5510          */
5395         ret = filemap_write_and_wait_range(ma !! 5511         ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
5396         if (ret)                                 5512         if (ret)
5397                 goto out_mmap;                   5513                 goto out_mmap;
5398         /*                                       5514         /*
5399          * Write data that will be shifted to    5515          * Write data that will be shifted to preserve them when discarding
5400          * page cache below. We are also prot    5516          * page cache below. We are also protected from pages becoming dirty
5401          * by i_rwsem and invalidate_lock.    !! 5517          * by i_mmap_sem.
5402          */                                      5518          */
5403         ret = filemap_write_and_wait_range(ma !! 5519         ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
5404                                            LL    5520                                            LLONG_MAX);
5405         if (ret)                                 5521         if (ret)
5406                 goto out_mmap;                   5522                 goto out_mmap;
5407         truncate_pagecache(inode, ioffset);      5523         truncate_pagecache(inode, ioffset);
5408                                                  5524 
5409         credits = ext4_writepage_trans_blocks    5525         credits = ext4_writepage_trans_blocks(inode);
5410         handle = ext4_journal_start(inode, EX    5526         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5411         if (IS_ERR(handle)) {                    5527         if (IS_ERR(handle)) {
5412                 ret = PTR_ERR(handle);           5528                 ret = PTR_ERR(handle);
5413                 goto out_mmap;                   5529                 goto out_mmap;
5414         }                                        5530         }
5415         ext4_fc_mark_ineligible(sb, EXT4_FC_R << 
5416                                                  5531 
5417         down_write(&EXT4_I(inode)->i_data_sem    5532         down_write(&EXT4_I(inode)->i_data_sem);
5418         ext4_discard_preallocations(inode);      5533         ext4_discard_preallocations(inode);
5419         ext4_es_remove_extent(inode, punch_st !! 5534 
                                                   >> 5535         ret = ext4_es_remove_extent(inode, punch_start,
                                                   >> 5536                                     EXT_MAX_BLOCKS - punch_start);
                                                   >> 5537         if (ret) {
                                                   >> 5538                 up_write(&EXT4_I(inode)->i_data_sem);
                                                   >> 5539                 goto out_stop;
                                                   >> 5540         }
5420                                                  5541 
5421         ret = ext4_ext_remove_space(inode, pu    5542         ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
5422         if (ret) {                               5543         if (ret) {
5423                 up_write(&EXT4_I(inode)->i_da    5544                 up_write(&EXT4_I(inode)->i_data_sem);
5424                 goto out_stop;                   5545                 goto out_stop;
5425         }                                        5546         }
5426         ext4_discard_preallocations(inode);      5547         ext4_discard_preallocations(inode);
5427                                                  5548 
5428         ret = ext4_ext_shift_extents(inode, h    5549         ret = ext4_ext_shift_extents(inode, handle, punch_stop,
5429                                      punch_st    5550                                      punch_stop - punch_start, SHIFT_LEFT);
5430         if (ret) {                               5551         if (ret) {
5431                 up_write(&EXT4_I(inode)->i_da    5552                 up_write(&EXT4_I(inode)->i_data_sem);
5432                 goto out_stop;                   5553                 goto out_stop;
5433         }                                        5554         }
5434                                                  5555 
5435         new_size = inode->i_size - len;       !! 5556         new_size = i_size_read(inode) - len;
5436         i_size_write(inode, new_size);           5557         i_size_write(inode, new_size);
5437         EXT4_I(inode)->i_disksize = new_size;    5558         EXT4_I(inode)->i_disksize = new_size;
5438                                                  5559 
5439         up_write(&EXT4_I(inode)->i_data_sem);    5560         up_write(&EXT4_I(inode)->i_data_sem);
5440         if (IS_SYNC(inode))                      5561         if (IS_SYNC(inode))
5441                 ext4_handle_sync(handle);        5562                 ext4_handle_sync(handle);
5442         inode_set_mtime_to_ts(inode, inode_se !! 5563         inode->i_mtime = inode->i_ctime = current_time(inode);
5443         ret = ext4_mark_inode_dirty(handle, i !! 5564         ext4_mark_inode_dirty(handle, inode);
5444         ext4_update_inode_fsync_trans(handle,    5565         ext4_update_inode_fsync_trans(handle, inode, 1);
5445                                                  5566 
5446 out_stop:                                        5567 out_stop:
5447         ext4_journal_stop(handle);               5568         ext4_journal_stop(handle);
5448 out_mmap:                                        5569 out_mmap:
5449         filemap_invalidate_unlock(mapping);   !! 5570         up_write(&EXT4_I(inode)->i_mmap_sem);
5450 out_mutex:                                       5571 out_mutex:
5451         inode_unlock(inode);                     5572         inode_unlock(inode);
5452         return ret;                              5573         return ret;
5453 }                                                5574 }
5454                                                  5575 
5455 /*                                               5576 /*
5456  * ext4_insert_range:                            5577  * ext4_insert_range:
5457  * This function implements the FALLOC_FL_INS    5578  * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate.
5458  * The data blocks starting from @offset to t    5579  * The data blocks starting from @offset to the EOF are shifted by @len
5459  * towards right to create a hole in the @ino    5580  * towards right to create a hole in the @inode. Inode size is increased
5460  * by len bytes.                                 5581  * by len bytes.
5461  * Returns 0 on success, error otherwise.        5582  * Returns 0 on success, error otherwise.
5462  */                                              5583  */
5463 static int ext4_insert_range(struct file *fil !! 5584 int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
5464 {                                                5585 {
5465         struct inode *inode = file_inode(file << 
5466         struct super_block *sb = inode->i_sb;    5586         struct super_block *sb = inode->i_sb;
5467         struct address_space *mapping = inode << 
5468         handle_t *handle;                        5587         handle_t *handle;
5469         struct ext4_ext_path *path;              5588         struct ext4_ext_path *path;
5470         struct ext4_extent *extent;              5589         struct ext4_extent *extent;
5471         ext4_lblk_t offset_lblk, len_lblk, ee    5590         ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
5472         unsigned int credits, ee_len;            5591         unsigned int credits, ee_len;
5473         int ret = 0, depth, split_flag = 0;      5592         int ret = 0, depth, split_flag = 0;
5474         loff_t ioffset;                          5593         loff_t ioffset;
5475                                                  5594 
5476         /*                                       5595         /*
5477          * We need to test this early because    5596          * We need to test this early because xfstests assumes that an
5478          * insert range of (0, 1) will return    5597          * insert range of (0, 1) will return EOPNOTSUPP if the file
5479          * system does not support insert ran    5598          * system does not support insert range.
5480          */                                      5599          */
5481         if (!ext4_test_inode_flag(inode, EXT4    5600         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5482                 return -EOPNOTSUPP;              5601                 return -EOPNOTSUPP;
5483                                                  5602 
5484         /* Insert range works only on fs clus !! 5603         /* Insert range works only on fs block size aligned offsets. */
5485         if (!IS_ALIGNED(offset | len, EXT4_CL !! 5604         if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
                                                   >> 5605                         len & (EXT4_CLUSTER_SIZE(sb) - 1))
5486                 return -EINVAL;                  5606                 return -EINVAL;
5487                                                  5607 
                                                   >> 5608         if (!S_ISREG(inode->i_mode))
                                                   >> 5609                 return -EOPNOTSUPP;
                                                   >> 5610 
5488         trace_ext4_insert_range(inode, offset    5611         trace_ext4_insert_range(inode, offset, len);
5489                                                  5612 
5490         offset_lblk = offset >> EXT4_BLOCK_SI    5613         offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5491         len_lblk = len >> EXT4_BLOCK_SIZE_BIT    5614         len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
5492                                                  5615 
                                                   >> 5616         /* Call ext4_force_commit to flush all data in case of data=journal */
                                                   >> 5617         if (ext4_should_journal_data(inode)) {
                                                   >> 5618                 ret = ext4_force_commit(inode->i_sb);
                                                   >> 5619                 if (ret)
                                                   >> 5620                         return ret;
                                                   >> 5621         }
                                                   >> 5622 
5493         inode_lock(inode);                       5623         inode_lock(inode);
5494         /* Currently just for extent based fi    5624         /* Currently just for extent based files */
5495         if (!ext4_test_inode_flag(inode, EXT4    5625         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5496                 ret = -EOPNOTSUPP;               5626                 ret = -EOPNOTSUPP;
5497                 goto out_mutex;                  5627                 goto out_mutex;
5498         }                                        5628         }
5499                                                  5629 
5500         /* Check whether the maximum file siz !! 5630         /* Check for wrap through zero */
5501         if (len > inode->i_sb->s_maxbytes - i !! 5631         if (inode->i_size + len > inode->i_sb->s_maxbytes) {
5502                 ret = -EFBIG;                    5632                 ret = -EFBIG;
5503                 goto out_mutex;                  5633                 goto out_mutex;
5504         }                                        5634         }
5505                                                  5635 
5506         /* Offset must be less than i_size */ !! 5636         /* Offset should be less than i_size */
5507         if (offset >= inode->i_size) {        !! 5637         if (offset >= i_size_read(inode)) {
5508                 ret = -EINVAL;                   5638                 ret = -EINVAL;
5509                 goto out_mutex;                  5639                 goto out_mutex;
5510         }                                        5640         }
5511                                                  5641 
5512         /* Wait for existing dio to complete     5642         /* Wait for existing dio to complete */
5513         inode_dio_wait(inode);                   5643         inode_dio_wait(inode);
5514                                                  5644 
5515         ret = file_modified(file);            << 
5516         if (ret)                              << 
5517                 goto out_mutex;               << 
5518                                               << 
5519         /*                                       5645         /*
5520          * Prevent page faults from reinstant    5646          * Prevent page faults from reinstantiating pages we have released from
5521          * page cache.                           5647          * page cache.
5522          */                                      5648          */
5523         filemap_invalidate_lock(mapping);     !! 5649         down_write(&EXT4_I(inode)->i_mmap_sem);
5524                                               << 
5525         ret = ext4_break_layouts(inode);      << 
5526         if (ret)                              << 
5527                 goto out_mmap;                << 
5528                                               << 
5529         /*                                       5650         /*
5530          * Need to round down to align start     5651          * Need to round down to align start offset to page size boundary
5531          * for page size > block size.           5652          * for page size > block size.
5532          */                                      5653          */
5533         ioffset = round_down(offset, PAGE_SIZ    5654         ioffset = round_down(offset, PAGE_SIZE);
5534         /* Write out all dirty pages */          5655         /* Write out all dirty pages */
5535         ret = filemap_write_and_wait_range(in    5656         ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
5536                         LLONG_MAX);              5657                         LLONG_MAX);
5537         if (ret)                                 5658         if (ret)
5538                 goto out_mmap;                   5659                 goto out_mmap;
5539         truncate_pagecache(inode, ioffset);      5660         truncate_pagecache(inode, ioffset);
5540                                                  5661 
5541         credits = ext4_writepage_trans_blocks    5662         credits = ext4_writepage_trans_blocks(inode);
5542         handle = ext4_journal_start(inode, EX    5663         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5543         if (IS_ERR(handle)) {                    5664         if (IS_ERR(handle)) {
5544                 ret = PTR_ERR(handle);           5665                 ret = PTR_ERR(handle);
5545                 goto out_mmap;                   5666                 goto out_mmap;
5546         }                                        5667         }
5547         ext4_fc_mark_ineligible(sb, EXT4_FC_R << 
5548                                                  5668 
5549         /* Expand file to avoid data loss if     5669         /* Expand file to avoid data loss if there is error while shifting */
5550         inode->i_size += len;                    5670         inode->i_size += len;
5551         EXT4_I(inode)->i_disksize += len;        5671         EXT4_I(inode)->i_disksize += len;
5552         inode_set_mtime_to_ts(inode, inode_se !! 5672         inode->i_mtime = inode->i_ctime = current_time(inode);
5553         ret = ext4_mark_inode_dirty(handle, i    5673         ret = ext4_mark_inode_dirty(handle, inode);
5554         if (ret)                                 5674         if (ret)
5555                 goto out_stop;                   5675                 goto out_stop;
5556                                                  5676 
5557         down_write(&EXT4_I(inode)->i_data_sem    5677         down_write(&EXT4_I(inode)->i_data_sem);
5558         ext4_discard_preallocations(inode);      5678         ext4_discard_preallocations(inode);
5559                                                  5679 
5560         path = ext4_find_extent(inode, offset    5680         path = ext4_find_extent(inode, offset_lblk, NULL, 0);
5561         if (IS_ERR(path)) {                      5681         if (IS_ERR(path)) {
5562                 up_write(&EXT4_I(inode)->i_da    5682                 up_write(&EXT4_I(inode)->i_data_sem);
5563                 ret = PTR_ERR(path);          << 
5564                 goto out_stop;                   5683                 goto out_stop;
5565         }                                        5684         }
5566                                                  5685 
5567         depth = ext_depth(inode);                5686         depth = ext_depth(inode);
5568         extent = path[depth].p_ext;              5687         extent = path[depth].p_ext;
5569         if (extent) {                            5688         if (extent) {
5570                 ee_start_lblk = le32_to_cpu(e    5689                 ee_start_lblk = le32_to_cpu(extent->ee_block);
5571                 ee_len = ext4_ext_get_actual_    5690                 ee_len = ext4_ext_get_actual_len(extent);
5572                                                  5691 
5573                 /*                               5692                 /*
5574                  * If offset_lblk is not the     5693                  * If offset_lblk is not the starting block of extent, split
5575                  * the extent @offset_lblk       5694                  * the extent @offset_lblk
5576                  */                              5695                  */
5577                 if ((offset_lblk > ee_start_l    5696                 if ((offset_lblk > ee_start_lblk) &&
5578                                 (offset_lblk     5697                                 (offset_lblk < (ee_start_lblk + ee_len))) {
5579                         if (ext4_ext_is_unwri    5698                         if (ext4_ext_is_unwritten(extent))
5580                                 split_flag =     5699                                 split_flag = EXT4_EXT_MARK_UNWRIT1 |
5581                                         EXT4_    5700                                         EXT4_EXT_MARK_UNWRIT2;
5582                         path = ext4_split_ext !! 5701                         ret = ext4_split_extent_at(handle, inode, &path,
5583                                         offse    5702                                         offset_lblk, split_flag,
5584                                         EXT4_    5703                                         EXT4_EX_NOCACHE |
5585                                         EXT4_    5704                                         EXT4_GET_BLOCKS_PRE_IO |
5586                                         EXT4_    5705                                         EXT4_GET_BLOCKS_METADATA_NOFAIL);
5587                 }                                5706                 }
5588                                                  5707 
5589                 if (IS_ERR(path)) {           !! 5708                 ext4_ext_drop_refs(path);
                                                   >> 5709                 kfree(path);
                                                   >> 5710                 if (ret < 0) {
5590                         up_write(&EXT4_I(inod    5711                         up_write(&EXT4_I(inode)->i_data_sem);
5591                         ret = PTR_ERR(path);  << 
5592                         goto out_stop;           5712                         goto out_stop;
5593                 }                                5713                 }
                                                   >> 5714         } else {
                                                   >> 5715                 ext4_ext_drop_refs(path);
                                                   >> 5716                 kfree(path);
5594         }                                        5717         }
5595                                                  5718 
5596         ext4_free_ext_path(path);             !! 5719         ret = ext4_es_remove_extent(inode, offset_lblk,
5597         ext4_es_remove_extent(inode, offset_l !! 5720                         EXT_MAX_BLOCKS - offset_lblk);
                                                   >> 5721         if (ret) {
                                                   >> 5722                 up_write(&EXT4_I(inode)->i_data_sem);
                                                   >> 5723                 goto out_stop;
                                                   >> 5724         }
5598                                                  5725 
5599         /*                                       5726         /*
5600          * if offset_lblk lies in a hole whic    5727          * if offset_lblk lies in a hole which is at start of file, use
5601          * ee_start_lblk to shift extents        5728          * ee_start_lblk to shift extents
5602          */                                      5729          */
5603         ret = ext4_ext_shift_extents(inode, h    5730         ret = ext4_ext_shift_extents(inode, handle,
5604                 max(ee_start_lblk, offset_lbl !! 5731                 ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk,
                                                   >> 5732                 len_lblk, SHIFT_RIGHT);
5605                                                  5733 
5606         up_write(&EXT4_I(inode)->i_data_sem);    5734         up_write(&EXT4_I(inode)->i_data_sem);
5607         if (IS_SYNC(inode))                      5735         if (IS_SYNC(inode))
5608                 ext4_handle_sync(handle);        5736                 ext4_handle_sync(handle);
5609         if (ret >= 0)                            5737         if (ret >= 0)
5610                 ext4_update_inode_fsync_trans    5738                 ext4_update_inode_fsync_trans(handle, inode, 1);
5611                                                  5739 
5612 out_stop:                                        5740 out_stop:
5613         ext4_journal_stop(handle);               5741         ext4_journal_stop(handle);
5614 out_mmap:                                        5742 out_mmap:
5615         filemap_invalidate_unlock(mapping);   !! 5743         up_write(&EXT4_I(inode)->i_mmap_sem);
5616 out_mutex:                                       5744 out_mutex:
5617         inode_unlock(inode);                     5745         inode_unlock(inode);
5618         return ret;                              5746         return ret;
5619 }                                                5747 }
5620                                                  5748 
5621 /**                                              5749 /**
5622  * ext4_swap_extents() - Swap extents between !! 5750  * ext4_swap_extents - Swap extents between two inodes
5623  * @handle: handle for this transaction       !! 5751  *
5624  * @inode1:     First inode                      5752  * @inode1:     First inode
5625  * @inode2:     Second inode                     5753  * @inode2:     Second inode
5626  * @lblk1:      Start block for first inode      5754  * @lblk1:      Start block for first inode
5627  * @lblk2:      Start block for second inode     5755  * @lblk2:      Start block for second inode
5628  * @count:      Number of blocks to swap         5756  * @count:      Number of blocks to swap
5629  * @unwritten: Mark second inode's extents as    5757  * @unwritten: Mark second inode's extents as unwritten after swap
5630  * @erp:        Pointer to save error value      5758  * @erp:        Pointer to save error value
5631  *                                               5759  *
5632  * This helper routine does exactly what is p    5760  * This helper routine does exactly what is promise "swap extents". All other
5633  * stuff such as page-cache locking consisten    5761  * stuff such as page-cache locking consistency, bh mapping consistency or
5634  * extent's data copying must be performed by    5762  * extent's data copying must be performed by caller.
5635  * Locking:                                      5763  * Locking:
5636  *              i_rwsem is held for both inod !! 5764  *              i_mutex is held for both inodes
5637  *              i_data_sem is locked for writ    5765  *              i_data_sem is locked for write for both inodes
5638  * Assumptions:                                  5766  * Assumptions:
5639  *              All pages from requested rang    5767  *              All pages from requested range are locked for both inodes
5640  */                                              5768  */
5641 int                                              5769 int
5642 ext4_swap_extents(handle_t *handle, struct in    5770 ext4_swap_extents(handle_t *handle, struct inode *inode1,
5643                   struct inode *inode2, ext4_    5771                   struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
5644                   ext4_lblk_t count, int unwr    5772                   ext4_lblk_t count, int unwritten, int *erp)
5645 {                                                5773 {
5646         struct ext4_ext_path *path1 = NULL;      5774         struct ext4_ext_path *path1 = NULL;
5647         struct ext4_ext_path *path2 = NULL;      5775         struct ext4_ext_path *path2 = NULL;
5648         int replaced_count = 0;                  5776         int replaced_count = 0;
5649                                                  5777 
5650         BUG_ON(!rwsem_is_locked(&EXT4_I(inode    5778         BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
5651         BUG_ON(!rwsem_is_locked(&EXT4_I(inode    5779         BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
5652         BUG_ON(!inode_is_locked(inode1));        5780         BUG_ON(!inode_is_locked(inode1));
5653         BUG_ON(!inode_is_locked(inode2));        5781         BUG_ON(!inode_is_locked(inode2));
5654                                                  5782 
5655         ext4_es_remove_extent(inode1, lblk1,  !! 5783         *erp = ext4_es_remove_extent(inode1, lblk1, count);
5656         ext4_es_remove_extent(inode2, lblk2,  !! 5784         if (unlikely(*erp))
                                                   >> 5785                 return 0;
                                                   >> 5786         *erp = ext4_es_remove_extent(inode2, lblk2, count);
                                                   >> 5787         if (unlikely(*erp))
                                                   >> 5788                 return 0;
5657                                                  5789 
5658         while (count) {                          5790         while (count) {
5659                 struct ext4_extent *ex1, *ex2    5791                 struct ext4_extent *ex1, *ex2, tmp_ex;
5660                 ext4_lblk_t e1_blk, e2_blk;      5792                 ext4_lblk_t e1_blk, e2_blk;
5661                 int e1_len, e2_len, len;         5793                 int e1_len, e2_len, len;
5662                 int split = 0;                   5794                 int split = 0;
5663                                                  5795 
5664                 path1 = ext4_find_extent(inod !! 5796                 path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
5665                 if (IS_ERR(path1)) {             5797                 if (IS_ERR(path1)) {
5666                         *erp = PTR_ERR(path1)    5798                         *erp = PTR_ERR(path1);
5667                         goto errout;          !! 5799                         path1 = NULL;
                                                   >> 5800                 finish:
                                                   >> 5801                         count = 0;
                                                   >> 5802                         goto repeat;
5668                 }                                5803                 }
5669                 path2 = ext4_find_extent(inod !! 5804                 path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
5670                 if (IS_ERR(path2)) {             5805                 if (IS_ERR(path2)) {
5671                         *erp = PTR_ERR(path2)    5806                         *erp = PTR_ERR(path2);
5672                         goto errout;          !! 5807                         path2 = NULL;
                                                   >> 5808                         goto finish;
5673                 }                                5809                 }
5674                 ex1 = path1[path1->p_depth].p    5810                 ex1 = path1[path1->p_depth].p_ext;
5675                 ex2 = path2[path2->p_depth].p    5811                 ex2 = path2[path2->p_depth].p_ext;
5676                 /* Do we have something to sw !! 5812                 /* Do we have somthing to swap ? */
5677                 if (unlikely(!ex2 || !ex1))      5813                 if (unlikely(!ex2 || !ex1))
5678                         goto errout;          !! 5814                         goto finish;
5679                                                  5815 
5680                 e1_blk = le32_to_cpu(ex1->ee_    5816                 e1_blk = le32_to_cpu(ex1->ee_block);
5681                 e2_blk = le32_to_cpu(ex2->ee_    5817                 e2_blk = le32_to_cpu(ex2->ee_block);
5682                 e1_len = ext4_ext_get_actual_    5818                 e1_len = ext4_ext_get_actual_len(ex1);
5683                 e2_len = ext4_ext_get_actual_    5819                 e2_len = ext4_ext_get_actual_len(ex2);
5684                                                  5820 
5685                 /* Hole handling */              5821                 /* Hole handling */
5686                 if (!in_range(lblk1, e1_blk,     5822                 if (!in_range(lblk1, e1_blk, e1_len) ||
5687                     !in_range(lblk2, e2_blk,     5823                     !in_range(lblk2, e2_blk, e2_len)) {
5688                         ext4_lblk_t next1, ne    5824                         ext4_lblk_t next1, next2;
5689                                                  5825 
5690                         /* if hole after exte    5826                         /* if hole after extent, then go to next extent */
5691                         next1 = ext4_ext_next    5827                         next1 = ext4_ext_next_allocated_block(path1);
5692                         next2 = ext4_ext_next    5828                         next2 = ext4_ext_next_allocated_block(path2);
5693                         /* If hole before ext    5829                         /* If hole before extent, then shift to that extent */
5694                         if (e1_blk > lblk1)      5830                         if (e1_blk > lblk1)
5695                                 next1 = e1_bl    5831                                 next1 = e1_blk;
5696                         if (e2_blk > lblk2)      5832                         if (e2_blk > lblk2)
5697                                 next2 = e2_bl    5833                                 next2 = e2_blk;
5698                         /* Do we have somethi    5834                         /* Do we have something to swap */
5699                         if (next1 == EXT_MAX_    5835                         if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
5700                                 goto errout;  !! 5836                                 goto finish;
5701                         /* Move to the righte    5837                         /* Move to the rightest boundary */
5702                         len = next1 - lblk1;     5838                         len = next1 - lblk1;
5703                         if (len < next2 - lbl    5839                         if (len < next2 - lblk2)
5704                                 len = next2 -    5840                                 len = next2 - lblk2;
5705                         if (len > count)         5841                         if (len > count)
5706                                 len = count;     5842                                 len = count;
5707                         lblk1 += len;            5843                         lblk1 += len;
5708                         lblk2 += len;            5844                         lblk2 += len;
5709                         count -= len;            5845                         count -= len;
5710                         continue;             !! 5846                         goto repeat;
5711                 }                                5847                 }
5712                                                  5848 
5713                 /* Prepare left boundary */      5849                 /* Prepare left boundary */
5714                 if (e1_blk < lblk1) {            5850                 if (e1_blk < lblk1) {
5715                         split = 1;               5851                         split = 1;
5716                         path1 = ext4_force_sp !! 5852                         *erp = ext4_force_split_extent_at(handle, inode1,
5717                                               !! 5853                                                 &path1, lblk1, 0);
5718                         if (IS_ERR(path1)) {  !! 5854                         if (unlikely(*erp))
5719                                 *erp = PTR_ER !! 5855                                 goto finish;
5720                                 goto errout;  << 
5721                         }                     << 
5722                 }                                5856                 }
5723                 if (e2_blk < lblk2) {            5857                 if (e2_blk < lblk2) {
5724                         split = 1;               5858                         split = 1;
5725                         path2 = ext4_force_sp !! 5859                         *erp = ext4_force_split_extent_at(handle, inode2,
5726                                               !! 5860                                                 &path2,  lblk2, 0);
5727                         if (IS_ERR(path2)) {  !! 5861                         if (unlikely(*erp))
5728                                 *erp = PTR_ER !! 5862                                 goto finish;
5729                                 goto errout;  << 
5730                         }                     << 
5731                 }                                5863                 }
5732                 /* ext4_split_extent_at() may    5864                 /* ext4_split_extent_at() may result in leaf extent split,
5733                  * path must to be revalidate    5865                  * path must to be revalidated. */
5734                 if (split)                       5866                 if (split)
5735                         continue;             !! 5867                         goto repeat;
5736                                                  5868 
5737                 /* Prepare right boundary */     5869                 /* Prepare right boundary */
5738                 len = count;                     5870                 len = count;
5739                 if (len > e1_blk + e1_len - l    5871                 if (len > e1_blk + e1_len - lblk1)
5740                         len = e1_blk + e1_len    5872                         len = e1_blk + e1_len - lblk1;
5741                 if (len > e2_blk + e2_len - l    5873                 if (len > e2_blk + e2_len - lblk2)
5742                         len = e2_blk + e2_len    5874                         len = e2_blk + e2_len - lblk2;
5743                                                  5875 
5744                 if (len != e1_len) {             5876                 if (len != e1_len) {
5745                         split = 1;               5877                         split = 1;
5746                         path1 = ext4_force_sp !! 5878                         *erp = ext4_force_split_extent_at(handle, inode1,
5747                                               !! 5879                                                 &path1, lblk1 + len, 0);
5748                         if (IS_ERR(path1)) {  !! 5880                         if (unlikely(*erp))
5749                                 *erp = PTR_ER !! 5881                                 goto finish;
5750                                 goto errout;  << 
5751                         }                     << 
5752                 }                                5882                 }
5753                 if (len != e2_len) {             5883                 if (len != e2_len) {
5754                         split = 1;               5884                         split = 1;
5755                         path2 = ext4_force_sp !! 5885                         *erp = ext4_force_split_extent_at(handle, inode2,
5756                                               !! 5886                                                 &path2, lblk2 + len, 0);
5757                         if (IS_ERR(path2)) {  !! 5887                         if (*erp)
5758                                 *erp = PTR_ER !! 5888                                 goto finish;
5759                                 goto errout;  << 
5760                         }                     << 
5761                 }                                5889                 }
5762                 /* ext4_split_extent_at() may    5890                 /* ext4_split_extent_at() may result in leaf extent split,
5763                  * path must to be revalidate    5891                  * path must to be revalidated. */
5764                 if (split)                       5892                 if (split)
5765                         continue;             !! 5893                         goto repeat;
5766                                                  5894 
5767                 BUG_ON(e2_len != e1_len);        5895                 BUG_ON(e2_len != e1_len);
5768                 *erp = ext4_ext_get_access(ha    5896                 *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
5769                 if (unlikely(*erp))              5897                 if (unlikely(*erp))
5770                         goto errout;          !! 5898                         goto finish;
5771                 *erp = ext4_ext_get_access(ha    5899                 *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
5772                 if (unlikely(*erp))              5900                 if (unlikely(*erp))
5773                         goto errout;          !! 5901                         goto finish;
5774                                                  5902 
5775                 /* Both extents are fully ins    5903                 /* Both extents are fully inside boundaries. Swap it now */
5776                 tmp_ex = *ex1;                   5904                 tmp_ex = *ex1;
5777                 ext4_ext_store_pblock(ex1, ex    5905                 ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
5778                 ext4_ext_store_pblock(ex2, ex    5906                 ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
5779                 ex1->ee_len = cpu_to_le16(e2_    5907                 ex1->ee_len = cpu_to_le16(e2_len);
5780                 ex2->ee_len = cpu_to_le16(e1_    5908                 ex2->ee_len = cpu_to_le16(e1_len);
5781                 if (unwritten)                   5909                 if (unwritten)
5782                         ext4_ext_mark_unwritt    5910                         ext4_ext_mark_unwritten(ex2);
5783                 if (ext4_ext_is_unwritten(&tm    5911                 if (ext4_ext_is_unwritten(&tmp_ex))
5784                         ext4_ext_mark_unwritt    5912                         ext4_ext_mark_unwritten(ex1);
5785                                                  5913 
5786                 ext4_ext_try_to_merge(handle,    5914                 ext4_ext_try_to_merge(handle, inode2, path2, ex2);
5787                 ext4_ext_try_to_merge(handle,    5915                 ext4_ext_try_to_merge(handle, inode1, path1, ex1);
5788                 *erp = ext4_ext_dirty(handle,    5916                 *erp = ext4_ext_dirty(handle, inode2, path2 +
5789                                       path2->    5917                                       path2->p_depth);
5790                 if (unlikely(*erp))              5918                 if (unlikely(*erp))
5791                         goto errout;          !! 5919                         goto finish;
5792                 *erp = ext4_ext_dirty(handle,    5920                 *erp = ext4_ext_dirty(handle, inode1, path1 +
5793                                       path1->    5921                                       path1->p_depth);
5794                 /*                               5922                 /*
5795                  * Looks scarry ah..? second     5923                  * Looks scarry ah..? second inode already points to new blocks,
5796                  * and it was successfully di    5924                  * and it was successfully dirtied. But luckily error may happen
5797                  * only due to journal error,    5925                  * only due to journal error, so full transaction will be
5798                  * aborted anyway.               5926                  * aborted anyway.
5799                  */                              5927                  */
5800                 if (unlikely(*erp))              5928                 if (unlikely(*erp))
5801                         goto errout;          !! 5929                         goto finish;
5802                                               << 
5803                 lblk1 += len;                    5930                 lblk1 += len;
5804                 lblk2 += len;                    5931                 lblk2 += len;
5805                 replaced_count += len;           5932                 replaced_count += len;
5806                 count -= len;                    5933                 count -= len;
5807         }                                     << 
5808                                               << 
5809 errout:                                       << 
5810         ext4_free_ext_path(path1);            << 
5811         ext4_free_ext_path(path2);            << 
5812         return replaced_count;                << 
5813 }                                             << 
5814                                               << 
5815 /*                                            << 
5816  * ext4_clu_mapped - determine whether any bl << 
5817  *                   been mapped to a physica << 
5818  *                                            << 
5819  * @inode - file containing the logical clust << 
5820  * @lclu - logical cluster of interest        << 
5821  *                                            << 
5822  * Returns 1 if any block in the logical clus << 
5823  * that a physical cluster has been allocated << 
5824  * returns 0.  Can also return negative error << 
5825  * ext4_ext_map_blocks().                     << 
5826  */                                           << 
5827 int ext4_clu_mapped(struct inode *inode, ext4 << 
5828 {                                             << 
5829         struct ext4_sb_info *sbi = EXT4_SB(in << 
5830         struct ext4_ext_path *path;           << 
5831         int depth, mapped = 0, err = 0;       << 
5832         struct ext4_extent *extent;           << 
5833         ext4_lblk_t first_lblk, first_lclu, l << 
5834                                               << 
5835         /*                                    << 
5836          * if data can be stored inline, the  << 
5837          * mapped - no physical clusters have << 
5838          * file has no extents                << 
5839          */                                   << 
5840         if (ext4_test_inode_state(inode, EXT4 << 
5841             ext4_has_inline_data(inode))      << 
5842                 return 0;                     << 
5843                                               << 
5844         /* search for the extent closest to t << 
5845         path = ext4_find_extent(inode, EXT4_C << 
5846         if (IS_ERR(path))                     << 
5847                 return PTR_ERR(path);         << 
5848                                               << 
5849         depth = ext_depth(inode);             << 
5850                                               << 
5851         /*                                    << 
5852          * A consistent leaf must not be empt << 
5853          * though, _during_ tree modification << 
5854          * be put in ext4_find_extent().      << 
5855          */                                   << 
5856         if (unlikely(path[depth].p_ext == NUL << 
5857                 EXT4_ERROR_INODE(inode,       << 
5858                     "bad extent address - lbl << 
5859                                  (unsigned lo << 
5860                                  depth, path[ << 
5861                 err = -EFSCORRUPTED;          << 
5862                 goto out;                     << 
5863         }                                     << 
5864                                               << 
5865         extent = path[depth].p_ext;           << 
5866                                               << 
5867         /* can't be mapped if the extent tree << 
5868         if (extent == NULL)                   << 
5869                 goto out;                     << 
5870                                               << 
5871         first_lblk = le32_to_cpu(extent->ee_b << 
5872         first_lclu = EXT4_B2C(sbi, first_lblk << 
5873                                                  5934 
5874         /*                                    !! 5935         repeat:
5875          * Three possible outcomes at this po !! 5936                 ext4_ext_drop_refs(path1);
5876          * the target cluster, to the left of !! 5937                 kfree(path1);
5877          * right of the target cluster.  The  !! 5938                 ext4_ext_drop_refs(path2);
5878          * The last case indicates the target !! 5939                 kfree(path2);
5879          */                                   !! 5940                 path1 = path2 = NULL;
5880         if (lclu >= first_lclu) {             << 
5881                 last_lclu = EXT4_B2C(sbi, fir << 
5882                                      ext4_ext << 
5883                 if (lclu <= last_lclu) {      << 
5884                         mapped = 1;           << 
5885                 } else {                      << 
5886                         first_lblk = ext4_ext << 
5887                         first_lclu = EXT4_B2C << 
5888                         if (lclu == first_lcl << 
5889                                 mapped = 1;   << 
5890                 }                             << 
5891         }                                        5941         }
5892                                               !! 5942         return replaced_count;
5893 out:                                          << 
5894         ext4_free_ext_path(path);             << 
5895                                               << 
5896         return err ? err : mapped;            << 
5897 }                                             << 
5898                                               << 
5899 /*                                            << 
5900  * Updates physical block address and unwritt << 
5901  * starting at lblk start and of len. If such << 
5902  * this function splits the extent tree appro << 
5903  * extent like this.  This function is called << 
5904  * replay path.  Returns 0 on success and err << 
5905  */                                           << 
5906 int ext4_ext_replay_update_ex(struct inode *i << 
5907                               int len, int un << 
5908 {                                             << 
5909         struct ext4_ext_path *path;           << 
5910         struct ext4_extent *ex;               << 
5911         int ret;                              << 
5912                                               << 
5913         path = ext4_find_extent(inode, start, << 
5914         if (IS_ERR(path))                     << 
5915                 return PTR_ERR(path);         << 
5916         ex = path[path->p_depth].p_ext;       << 
5917         if (!ex) {                            << 
5918                 ret = -EFSCORRUPTED;          << 
5919                 goto out;                     << 
5920         }                                     << 
5921                                               << 
5922         if (le32_to_cpu(ex->ee_block) != star << 
5923                 ext4_ext_get_actual_len(ex) ! << 
5924                 /* We need to split this exte << 
5925                 down_write(&EXT4_I(inode)->i_ << 
5926                 path = ext4_force_split_exten << 
5927                 up_write(&EXT4_I(inode)->i_da << 
5928                 if (IS_ERR(path)) {           << 
5929                         ret = PTR_ERR(path);  << 
5930                         goto out;             << 
5931                 }                             << 
5932                                               << 
5933                 path = ext4_find_extent(inode << 
5934                 if (IS_ERR(path))             << 
5935                         return PTR_ERR(path); << 
5936                                               << 
5937                 ex = path[path->p_depth].p_ex << 
5938                 WARN_ON(le32_to_cpu(ex->ee_bl << 
5939                                               << 
5940                 if (ext4_ext_get_actual_len(e << 
5941                         down_write(&EXT4_I(in << 
5942                         path = ext4_force_spl << 
5943                                               << 
5944                         up_write(&EXT4_I(inod << 
5945                         if (IS_ERR(path)) {   << 
5946                                 ret = PTR_ERR << 
5947                                 goto out;     << 
5948                         }                     << 
5949                                               << 
5950                         path = ext4_find_exte << 
5951                         if (IS_ERR(path))     << 
5952                                 return PTR_ER << 
5953                         ex = path[path->p_dep << 
5954                 }                             << 
5955         }                                     << 
5956         if (unwritten)                        << 
5957                 ext4_ext_mark_unwritten(ex);  << 
5958         else                                  << 
5959                 ext4_ext_mark_initialized(ex) << 
5960         ext4_ext_store_pblock(ex, pblk);      << 
5961         down_write(&EXT4_I(inode)->i_data_sem << 
5962         ret = ext4_ext_dirty(NULL, inode, &pa << 
5963         up_write(&EXT4_I(inode)->i_data_sem); << 
5964 out:                                          << 
5965         ext4_free_ext_path(path);             << 
5966         ext4_mark_inode_dirty(NULL, inode);   << 
5967         return ret;                           << 
5968 }                                             << 
5969                                               << 
5970 /* Try to shrink the extent tree */           << 
5971 void ext4_ext_replay_shrink_inode(struct inod << 
5972 {                                             << 
5973         struct ext4_ext_path *path = NULL;    << 
5974         struct ext4_extent *ex;               << 
5975         ext4_lblk_t old_cur, cur = 0;         << 
5976                                               << 
5977         while (cur < end) {                   << 
5978                 path = ext4_find_extent(inode << 
5979                 if (IS_ERR(path))             << 
5980                         return;               << 
5981                 ex = path[path->p_depth].p_ex << 
5982                 if (!ex) {                    << 
5983                         ext4_free_ext_path(pa << 
5984                         ext4_mark_inode_dirty << 
5985                         return;               << 
5986                 }                             << 
5987                 old_cur = cur;                << 
5988                 cur = le32_to_cpu(ex->ee_bloc << 
5989                 if (cur <= old_cur)           << 
5990                         cur = old_cur + 1;    << 
5991                 ext4_ext_try_to_merge(NULL, i << 
5992                 down_write(&EXT4_I(inode)->i_ << 
5993                 ext4_ext_dirty(NULL, inode, & << 
5994                 up_write(&EXT4_I(inode)->i_da << 
5995                 ext4_mark_inode_dirty(NULL, i << 
5996                 ext4_free_ext_path(path);     << 
5997         }                                     << 
5998 }                                             << 
5999                                               << 
6000 /* Check if *cur is a hole and if it is, skip << 
6001 static int skip_hole(struct inode *inode, ext << 
6002 {                                             << 
6003         int ret;                              << 
6004         struct ext4_map_blocks map;           << 
6005                                               << 
6006         map.m_lblk = *cur;                    << 
6007         map.m_len = ((inode->i_size) >> inode << 
6008                                               << 
6009         ret = ext4_map_blocks(NULL, inode, &m << 
6010         if (ret < 0)                          << 
6011                 return ret;                   << 
6012         if (ret != 0)                         << 
6013                 return 0;                     << 
6014         *cur = *cur + map.m_len;              << 
6015         return 0;                             << 
6016 }                                             << 
6017                                               << 
6018 /* Count number of blocks used by this inode  << 
6019 int ext4_ext_replay_set_iblocks(struct inode  << 
6020 {                                             << 
6021         struct ext4_ext_path *path = NULL, *p << 
6022         struct ext4_extent *ex;               << 
6023         ext4_lblk_t cur = 0, end;             << 
6024         int numblks = 0, i, ret = 0;          << 
6025         ext4_fsblk_t cmp1, cmp2;              << 
6026         struct ext4_map_blocks map;           << 
6027                                               << 
6028         /* Determin the size of the file firs << 
6029         path = ext4_find_extent(inode, EXT_MA << 
6030                                         EXT4_ << 
6031         if (IS_ERR(path))                     << 
6032                 return PTR_ERR(path);         << 
6033         ex = path[path->p_depth].p_ext;       << 
6034         if (!ex)                              << 
6035                 goto out;                     << 
6036         end = le32_to_cpu(ex->ee_block) + ext << 
6037                                               << 
6038         /* Count the number of data blocks */ << 
6039         cur = 0;                              << 
6040         while (cur < end) {                   << 
6041                 map.m_lblk = cur;             << 
6042                 map.m_len = end - cur;        << 
6043                 ret = ext4_map_blocks(NULL, i << 
6044                 if (ret < 0)                  << 
6045                         break;                << 
6046                 if (ret > 0)                  << 
6047                         numblks += ret;       << 
6048                 cur = cur + map.m_len;        << 
6049         }                                     << 
6050                                               << 
6051         /*                                    << 
6052          * Count the number of extent tree bl << 
6053          * two successive extents and determi << 
6054          * their paths. When path is differen << 
6055          * we compare the blocks in the path  << 
6056          * iblocks by total number of differe << 
6057          */                                   << 
6058         cur = 0;                              << 
6059         ret = skip_hole(inode, &cur);         << 
6060         if (ret < 0)                          << 
6061                 goto out;                     << 
6062         path = ext4_find_extent(inode, cur, p << 
6063         if (IS_ERR(path))                     << 
6064                 goto out;                     << 
6065         numblks += path->p_depth;             << 
6066         while (cur < end) {                   << 
6067                 path = ext4_find_extent(inode << 
6068                 if (IS_ERR(path))             << 
6069                         break;                << 
6070                 ex = path[path->p_depth].p_ex << 
6071                 if (!ex)                      << 
6072                         goto cleanup;         << 
6073                                               << 
6074                 cur = max(cur + 1, le32_to_cp << 
6075                                         ext4_ << 
6076                 ret = skip_hole(inode, &cur); << 
6077                 if (ret < 0)                  << 
6078                         break;                << 
6079                                               << 
6080                 path2 = ext4_find_extent(inod << 
6081                 if (IS_ERR(path2))            << 
6082                         break;                << 
6083                                               << 
6084                 for (i = 0; i <= max(path->p_ << 
6085                         cmp1 = cmp2 = 0;      << 
6086                         if (i <= path->p_dept << 
6087                                 cmp1 = path[i << 
6088                                         path[ << 
6089                         if (i <= path2->p_dep << 
6090                                 cmp2 = path2[ << 
6091                                         path2 << 
6092                         if (cmp1 != cmp2 && c << 
6093                                 numblks++;    << 
6094                 }                             << 
6095         }                                     << 
6096                                               << 
6097 out:                                          << 
6098         inode->i_blocks = numblks << (inode-> << 
6099         ext4_mark_inode_dirty(NULL, inode);   << 
6100 cleanup:                                      << 
6101         ext4_free_ext_path(path);             << 
6102         ext4_free_ext_path(path2);            << 
6103         return 0;                             << 
6104 }                                             << 
6105                                               << 
6106 int ext4_ext_clear_bb(struct inode *inode)    << 
6107 {                                             << 
6108         struct ext4_ext_path *path = NULL;    << 
6109         struct ext4_extent *ex;               << 
6110         ext4_lblk_t cur = 0, end;             << 
6111         int j, ret = 0;                       << 
6112         struct ext4_map_blocks map;           << 
6113                                               << 
6114         if (ext4_test_inode_flag(inode, EXT4_ << 
6115                 return 0;                     << 
6116                                               << 
6117         /* Determin the size of the file firs << 
6118         path = ext4_find_extent(inode, EXT_MA << 
6119                                         EXT4_ << 
6120         if (IS_ERR(path))                     << 
6121                 return PTR_ERR(path);         << 
6122         ex = path[path->p_depth].p_ext;       << 
6123         if (!ex)                              << 
6124                 goto out;                     << 
6125         end = le32_to_cpu(ex->ee_block) + ext << 
6126                                               << 
6127         cur = 0;                              << 
6128         while (cur < end) {                   << 
6129                 map.m_lblk = cur;             << 
6130                 map.m_len = end - cur;        << 
6131                 ret = ext4_map_blocks(NULL, i << 
6132                 if (ret < 0)                  << 
6133                         break;                << 
6134                 if (ret > 0) {                << 
6135                         path = ext4_find_exte << 
6136                         if (!IS_ERR(path)) {  << 
6137                                 for (j = 0; j << 
6138                                         ext4_ << 
6139                                               << 
6140                                         ext4_ << 
6141                                               << 
6142                                 }             << 
6143                         } else {              << 
6144                                 path = NULL;  << 
6145                         }                     << 
6146                         ext4_mb_mark_bb(inode << 
6147                         ext4_fc_record_region << 
6148                                         map.m << 
6149                 }                             << 
6150                 cur = cur + map.m_len;        << 
6151         }                                     << 
6152                                               << 
6153 out:                                          << 
6154         ext4_free_ext_path(path);             << 
6155         return 0;                             << 
6156 }                                                5943 }
6157                                                  5944 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php