1 // SPDX-License-Identifier: GPL-2.0-or-later !! 1 /* -*- mode: c; c-basic-offset: 8; -*- 2 /* !! 2 * vim: noexpandtab sw=8 ts=8 sts=0: >> 3 * 3 * Copyright (C) 2002, 2004 Oracle. All right 4 * Copyright (C) 2002, 2004 Oracle. All rights reserved. >> 5 * >> 6 * This program is free software; you can redistribute it and/or >> 7 * modify it under the terms of the GNU General Public >> 8 * License as published by the Free Software Foundation; either >> 9 * version 2 of the License, or (at your option) any later version. >> 10 * >> 11 * This program is distributed in the hope that it will be useful, >> 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of >> 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU >> 14 * General Public License for more details. >> 15 * >> 16 * You should have received a copy of the GNU General Public >> 17 * License along with this program; if not, write to the >> 18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, >> 19 * Boston, MA 021110-1307, USA. 4 */ 20 */ 5 21 6 #include <linux/fs.h> 22 #include <linux/fs.h> 7 #include <linux/slab.h> 23 #include <linux/slab.h> 8 #include <linux/highmem.h> 24 #include <linux/highmem.h> 9 #include <linux/pagemap.h> 25 #include <linux/pagemap.h> 10 #include <asm/byteorder.h> 26 #include <asm/byteorder.h> 11 #include <linux/swap.h> 27 #include <linux/swap.h> >> 28 #include <linux/pipe_fs_i.h> 12 #include <linux/mpage.h> 29 #include <linux/mpage.h> 13 #include <linux/quotaops.h> 30 #include <linux/quotaops.h> 14 #include <linux/blkdev.h> 31 #include <linux/blkdev.h> 15 #include <linux/uio.h> 32 #include <linux/uio.h> 16 #include <linux/mm.h> << 17 33 18 #include <cluster/masklog.h> 34 #include <cluster/masklog.h> 19 35 20 #include "ocfs2.h" 36 #include "ocfs2.h" 21 37 22 #include "alloc.h" 38 #include "alloc.h" 23 #include "aops.h" 39 #include "aops.h" 24 #include "dlmglue.h" 40 #include "dlmglue.h" 25 #include "extent_map.h" 41 #include "extent_map.h" 26 #include "file.h" 42 #include "file.h" 27 #include "inode.h" 43 #include "inode.h" 28 #include "journal.h" 44 #include "journal.h" 29 #include "suballoc.h" 45 #include "suballoc.h" 30 #include "super.h" 46 #include "super.h" 31 #include "symlink.h" 47 #include "symlink.h" 32 #include "refcounttree.h" 48 #include "refcounttree.h" 33 #include "ocfs2_trace.h" 49 #include "ocfs2_trace.h" 34 50 35 #include "buffer_head_io.h" 51 #include "buffer_head_io.h" 36 #include "dir.h" 52 #include "dir.h" 37 #include "namei.h" 53 #include "namei.h" 38 #include "sysfile.h" 54 #include "sysfile.h" 39 55 40 static int ocfs2_symlink_get_block(struct inod 56 static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, 41 struct buff 57 struct buffer_head *bh_result, int create) 42 { 58 { 43 int err = -EIO; 59 int err = -EIO; 44 int status; 60 int status; 45 struct ocfs2_dinode *fe = NULL; 61 struct ocfs2_dinode *fe = NULL; 46 struct buffer_head *bh = NULL; 62 struct buffer_head *bh = NULL; 47 struct buffer_head *buffer_cache_bh = 63 struct buffer_head *buffer_cache_bh = NULL; 48 struct ocfs2_super *osb = OCFS2_SB(ino 64 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 49 void *kaddr; 65 void *kaddr; 50 66 51 trace_ocfs2_symlink_get_block( 67 trace_ocfs2_symlink_get_block( 52 (unsigned long long)OC 68 (unsigned long long)OCFS2_I(inode)->ip_blkno, 53 (unsigned long long)ib 69 (unsigned long long)iblock, bh_result, create); 54 70 55 BUG_ON(ocfs2_inode_is_fast_symlink(ino 71 BUG_ON(ocfs2_inode_is_fast_symlink(inode)); 56 72 57 if ((iblock << inode->i_sb->s_blocksiz 73 if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) { 58 mlog(ML_ERROR, "block offset > 74 mlog(ML_ERROR, "block offset > PATH_MAX: %llu", 59 (unsigned long long)ibloc 75 (unsigned long long)iblock); 60 goto bail; 76 goto bail; 61 } 77 } 62 78 63 status = ocfs2_read_inode_block(inode, 79 status = ocfs2_read_inode_block(inode, &bh); 64 if (status < 0) { 80 if (status < 0) { 65 mlog_errno(status); 81 mlog_errno(status); 66 goto bail; 82 goto bail; 67 } 83 } 68 fe = (struct ocfs2_dinode *) bh->b_dat 84 fe = (struct ocfs2_dinode *) bh->b_data; 69 85 70 if ((u64)iblock >= ocfs2_clusters_to_b 86 if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb, 71 87 le32_to_cpu(fe->i_clusters))) { 72 err = -ENOMEM; 88 err = -ENOMEM; 73 mlog(ML_ERROR, "block offset i 89 mlog(ML_ERROR, "block offset is outside the allocated size: " 74 "%llu\n", (unsigned long 90 "%llu\n", (unsigned long long)iblock); 75 goto bail; 91 goto bail; 76 } 92 } 77 93 78 /* We don't use the page cache to crea 94 /* We don't use the page cache to create symlink data, so if 79 * need be, copy it over from the buff 95 * need be, copy it over from the buffer cache. */ 80 if (!buffer_uptodate(bh_result) && ocf 96 if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) { 81 u64 blkno = le64_to_cpu(fe->id 97 u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + 82 iblock; 98 iblock; 83 buffer_cache_bh = sb_getblk(os 99 buffer_cache_bh = sb_getblk(osb->sb, blkno); 84 if (!buffer_cache_bh) { 100 if (!buffer_cache_bh) { 85 err = -ENOMEM; 101 err = -ENOMEM; 86 mlog(ML_ERROR, "couldn 102 mlog(ML_ERROR, "couldn't getblock for symlink!\n"); 87 goto bail; 103 goto bail; 88 } 104 } 89 105 90 /* we haven't locked out trans 106 /* we haven't locked out transactions, so a commit 91 * could've happened. Since we 107 * could've happened. Since we've got a reference on 92 * the bh, even if it commits 108 * the bh, even if it commits while we're doing the 93 * copy, the data is still goo 109 * copy, the data is still good. */ 94 if (buffer_jbd(buffer_cache_bh 110 if (buffer_jbd(buffer_cache_bh) 95 && ocfs2_inode_is_new(inod 111 && ocfs2_inode_is_new(inode)) { 96 kaddr = kmap_atomic(bh 112 kaddr = kmap_atomic(bh_result->b_page); 97 if (!kaddr) { 113 if (!kaddr) { 98 mlog(ML_ERROR, 114 mlog(ML_ERROR, "couldn't kmap!\n"); 99 goto bail; 115 goto bail; 100 } 116 } 101 memcpy(kaddr + (bh_res 117 memcpy(kaddr + (bh_result->b_size * iblock), 102 buffer_cache_bh 118 buffer_cache_bh->b_data, 103 bh_result->b_si 119 bh_result->b_size); 104 kunmap_atomic(kaddr); 120 kunmap_atomic(kaddr); 105 set_buffer_uptodate(bh 121 set_buffer_uptodate(bh_result); 106 } 122 } 107 brelse(buffer_cache_bh); 123 brelse(buffer_cache_bh); 108 } 124 } 109 125 110 map_bh(bh_result, inode->i_sb, 126 map_bh(bh_result, inode->i_sb, 111 le64_to_cpu(fe->id2.i_list.l_re 127 le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock); 112 128 113 err = 0; 129 err = 0; 114 130 115 bail: 131 bail: 116 brelse(bh); 132 brelse(bh); 117 133 118 return err; 134 return err; 119 } 135 } 120 136 121 static int ocfs2_lock_get_block(struct inode * 137 static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock, 122 struct buffer_head *bh_res 138 struct buffer_head *bh_result, int create) 123 { 139 { 124 int ret = 0; 140 int ret = 0; 125 struct ocfs2_inode_info *oi = OCFS2_I( 141 struct ocfs2_inode_info *oi = OCFS2_I(inode); 126 142 127 down_read(&oi->ip_alloc_sem); 143 down_read(&oi->ip_alloc_sem); 128 ret = ocfs2_get_block(inode, iblock, b 144 ret = ocfs2_get_block(inode, iblock, bh_result, create); 129 up_read(&oi->ip_alloc_sem); 145 up_read(&oi->ip_alloc_sem); 130 146 131 return ret; 147 return ret; 132 } 148 } 133 149 134 int ocfs2_get_block(struct inode *inode, secto 150 int ocfs2_get_block(struct inode *inode, sector_t iblock, 135 struct buffer_head *bh_res 151 struct buffer_head *bh_result, int create) 136 { 152 { 137 int err = 0; 153 int err = 0; 138 unsigned int ext_flags; 154 unsigned int ext_flags; 139 u64 max_blocks = bh_result->b_size >> 155 u64 max_blocks = bh_result->b_size >> inode->i_blkbits; 140 u64 p_blkno, count, past_eof; 156 u64 p_blkno, count, past_eof; 141 struct ocfs2_super *osb = OCFS2_SB(ino 157 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 142 158 143 trace_ocfs2_get_block((unsigned long l 159 trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno, 144 (unsigned long l 160 (unsigned long long)iblock, bh_result, create); 145 161 146 if (OCFS2_I(inode)->ip_flags & OCFS2_I 162 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) 147 mlog(ML_NOTICE, "get_block on 163 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n", 148 inode, inode->i_ino); 164 inode, inode->i_ino); 149 165 150 if (S_ISLNK(inode->i_mode)) { 166 if (S_ISLNK(inode->i_mode)) { 151 /* this always does I/O for so 167 /* this always does I/O for some reason. */ 152 err = ocfs2_symlink_get_block( 168 err = ocfs2_symlink_get_block(inode, iblock, bh_result, create); 153 goto bail; 169 goto bail; 154 } 170 } 155 171 156 err = ocfs2_extent_map_get_blocks(inod 172 err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count, 157 &ext 173 &ext_flags); 158 if (err) { 174 if (err) { 159 mlog(ML_ERROR, "Error %d from 175 mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, " 160 "%llu, NULL)\n", err, ino 176 "%llu, NULL)\n", err, inode, (unsigned long long)iblock, 161 (unsigned long long)p_blk 177 (unsigned long long)p_blkno); 162 goto bail; 178 goto bail; 163 } 179 } 164 180 165 if (max_blocks < count) 181 if (max_blocks < count) 166 count = max_blocks; 182 count = max_blocks; 167 183 168 /* 184 /* 169 * ocfs2 never allocates in this funct 185 * ocfs2 never allocates in this function - the only time we 170 * need to use BH_New is when we're ex 186 * need to use BH_New is when we're extending i_size on a file 171 * system which doesn't support holes, 187 * system which doesn't support holes, in which case BH_New 172 * allows __block_write_begin() to zer 188 * allows __block_write_begin() to zero. 173 * 189 * 174 * If we see this on a sparse file sys 190 * If we see this on a sparse file system, then a truncate has 175 * raced us and removed the cluster. I 191 * raced us and removed the cluster. In this case, we clear 176 * the buffers dirty and uptodate bits 192 * the buffers dirty and uptodate bits and let the buffer code 177 * ignore it as a hole. 193 * ignore it as a hole. 178 */ 194 */ 179 if (create && p_blkno == 0 && ocfs2_sp 195 if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) { 180 clear_buffer_dirty(bh_result); 196 clear_buffer_dirty(bh_result); 181 clear_buffer_uptodate(bh_resul 197 clear_buffer_uptodate(bh_result); 182 goto bail; 198 goto bail; 183 } 199 } 184 200 185 /* Treat the unwritten extent as a hol 201 /* Treat the unwritten extent as a hole for zeroing purposes. */ 186 if (p_blkno && !(ext_flags & OCFS2_EXT 202 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) 187 map_bh(bh_result, inode->i_sb, 203 map_bh(bh_result, inode->i_sb, p_blkno); 188 204 189 bh_result->b_size = count << inode->i_ 205 bh_result->b_size = count << inode->i_blkbits; 190 206 191 if (!ocfs2_sparse_alloc(osb)) { 207 if (!ocfs2_sparse_alloc(osb)) { 192 if (p_blkno == 0) { 208 if (p_blkno == 0) { 193 err = -EIO; 209 err = -EIO; 194 mlog(ML_ERROR, 210 mlog(ML_ERROR, 195 "iblock = %llu p_ 211 "iblock = %llu p_blkno = %llu blkno=(%llu)\n", 196 (unsigned long lo 212 (unsigned long long)iblock, 197 (unsigned long lo 213 (unsigned long long)p_blkno, 198 (unsigned long lo 214 (unsigned long long)OCFS2_I(inode)->ip_blkno); 199 mlog(ML_ERROR, "Size % 215 mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters); 200 dump_stack(); 216 dump_stack(); 201 goto bail; 217 goto bail; 202 } 218 } 203 } 219 } 204 220 205 past_eof = ocfs2_blocks_for_bytes(inod 221 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); 206 222 207 trace_ocfs2_get_block_end((unsigned lo 223 trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno, 208 (unsigned lo 224 (unsigned long long)past_eof); 209 if (create && (iblock >= past_eof)) 225 if (create && (iblock >= past_eof)) 210 set_buffer_new(bh_result); 226 set_buffer_new(bh_result); 211 227 212 bail: 228 bail: 213 if (err < 0) 229 if (err < 0) 214 err = -EIO; 230 err = -EIO; 215 231 216 return err; 232 return err; 217 } 233 } 218 234 219 int ocfs2_read_inline_data(struct inode *inode 235 int ocfs2_read_inline_data(struct inode *inode, struct page *page, 220 struct buffer_head 236 struct buffer_head *di_bh) 221 { 237 { 222 void *kaddr; 238 void *kaddr; 223 loff_t size; 239 loff_t size; 224 struct ocfs2_dinode *di = (struct ocfs 240 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 225 241 226 if (!(le16_to_cpu(di->i_dyn_features) 242 if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) { 227 ocfs2_error(inode->i_sb, "Inod 243 ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n", 228 (unsigned long lon 244 (unsigned long long)OCFS2_I(inode)->ip_blkno); 229 return -EROFS; 245 return -EROFS; 230 } 246 } 231 247 232 size = i_size_read(inode); 248 size = i_size_read(inode); 233 249 234 if (size > PAGE_SIZE || 250 if (size > PAGE_SIZE || 235 size > ocfs2_max_inline_data_with_ 251 size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { 236 ocfs2_error(inode->i_sb, 252 ocfs2_error(inode->i_sb, 237 "Inode %llu has wi 253 "Inode %llu has with inline data has bad size: %Lu\n", 238 (unsigned long lon 254 (unsigned long long)OCFS2_I(inode)->ip_blkno, 239 (unsigned long lon 255 (unsigned long long)size); 240 return -EROFS; 256 return -EROFS; 241 } 257 } 242 258 243 kaddr = kmap_atomic(page); 259 kaddr = kmap_atomic(page); 244 if (size) 260 if (size) 245 memcpy(kaddr, di->id2.i_data.i 261 memcpy(kaddr, di->id2.i_data.id_data, size); 246 /* Clear the remaining part of the pag 262 /* Clear the remaining part of the page */ 247 memset(kaddr + size, 0, PAGE_SIZE - si 263 memset(kaddr + size, 0, PAGE_SIZE - size); 248 flush_dcache_page(page); 264 flush_dcache_page(page); 249 kunmap_atomic(kaddr); 265 kunmap_atomic(kaddr); 250 266 251 SetPageUptodate(page); 267 SetPageUptodate(page); 252 268 253 return 0; 269 return 0; 254 } 270 } 255 271 256 static int ocfs2_readpage_inline(struct inode 272 static int ocfs2_readpage_inline(struct inode *inode, struct page *page) 257 { 273 { 258 int ret; 274 int ret; 259 struct buffer_head *di_bh = NULL; 275 struct buffer_head *di_bh = NULL; 260 276 261 BUG_ON(!PageLocked(page)); 277 BUG_ON(!PageLocked(page)); 262 BUG_ON(!(OCFS2_I(inode)->ip_dyn_featur 278 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); 263 279 264 ret = ocfs2_read_inode_block(inode, &d 280 ret = ocfs2_read_inode_block(inode, &di_bh); 265 if (ret) { 281 if (ret) { 266 mlog_errno(ret); 282 mlog_errno(ret); 267 goto out; 283 goto out; 268 } 284 } 269 285 270 ret = ocfs2_read_inline_data(inode, pa 286 ret = ocfs2_read_inline_data(inode, page, di_bh); 271 out: 287 out: 272 unlock_page(page); 288 unlock_page(page); 273 289 274 brelse(di_bh); 290 brelse(di_bh); 275 return ret; 291 return ret; 276 } 292 } 277 293 278 static int ocfs2_read_folio(struct file *file, !! 294 static int ocfs2_readpage(struct file *file, struct page *page) 279 { 295 { 280 struct inode *inode = folio->mapping-> !! 296 struct inode *inode = page->mapping->host; 281 struct ocfs2_inode_info *oi = OCFS2_I( 297 struct ocfs2_inode_info *oi = OCFS2_I(inode); 282 loff_t start = folio_pos(folio); !! 298 loff_t start = (loff_t)page->index << PAGE_SHIFT; 283 int ret, unlock = 1; 299 int ret, unlock = 1; 284 300 285 trace_ocfs2_readpage((unsigned long lo !! 301 trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, >> 302 (page ? page->index : 0)); 286 303 287 ret = ocfs2_inode_lock_with_page(inode !! 304 ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page); 288 if (ret != 0) { 305 if (ret != 0) { 289 if (ret == AOP_TRUNCATED_PAGE) 306 if (ret == AOP_TRUNCATED_PAGE) 290 unlock = 0; 307 unlock = 0; 291 mlog_errno(ret); 308 mlog_errno(ret); 292 goto out; 309 goto out; 293 } 310 } 294 311 295 if (down_read_trylock(&oi->ip_alloc_se 312 if (down_read_trylock(&oi->ip_alloc_sem) == 0) { 296 /* 313 /* 297 * Unlock the folio and cycle !! 314 * Unlock the page and cycle ip_alloc_sem so that we don't 298 * busyloop waiting for ip_all 315 * busyloop waiting for ip_alloc_sem to unlock 299 */ 316 */ 300 ret = AOP_TRUNCATED_PAGE; 317 ret = AOP_TRUNCATED_PAGE; 301 folio_unlock(folio); !! 318 unlock_page(page); 302 unlock = 0; 319 unlock = 0; 303 down_read(&oi->ip_alloc_sem); 320 down_read(&oi->ip_alloc_sem); 304 up_read(&oi->ip_alloc_sem); 321 up_read(&oi->ip_alloc_sem); 305 goto out_inode_unlock; 322 goto out_inode_unlock; 306 } 323 } 307 324 308 /* 325 /* 309 * i_size might have just been updated 326 * i_size might have just been updated as we grabed the meta lock. We 310 * might now be discovering a truncate 327 * might now be discovering a truncate that hit on another node. 311 * block_read_full_folio->get_block fr !! 328 * block_read_full_page->get_block freaks out if it is asked to read 312 * beyond the end of a file, so we che 329 * beyond the end of a file, so we check here. Callers 313 * (generic_file_read, vm_ops->fault) 330 * (generic_file_read, vm_ops->fault) are clever enough to check i_size 314 * and notice that the folio they just !! 331 * and notice that the page they just read isn't needed. 315 * 332 * 316 * XXX sys_readahead() seems to get th 333 * XXX sys_readahead() seems to get that wrong? 317 */ 334 */ 318 if (start >= i_size_read(inode)) { 335 if (start >= i_size_read(inode)) { 319 folio_zero_segment(folio, 0, f !! 336 zero_user(page, 0, PAGE_SIZE); 320 folio_mark_uptodate(folio); !! 337 SetPageUptodate(page); 321 ret = 0; 338 ret = 0; 322 goto out_alloc; 339 goto out_alloc; 323 } 340 } 324 341 325 if (oi->ip_dyn_features & OCFS2_INLINE 342 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) 326 ret = ocfs2_readpage_inline(in !! 343 ret = ocfs2_readpage_inline(inode, page); 327 else 344 else 328 ret = block_read_full_folio(fo !! 345 ret = block_read_full_page(page, ocfs2_get_block); 329 unlock = 0; 346 unlock = 0; 330 347 331 out_alloc: 348 out_alloc: 332 up_read(&oi->ip_alloc_sem); !! 349 up_read(&OCFS2_I(inode)->ip_alloc_sem); 333 out_inode_unlock: 350 out_inode_unlock: 334 ocfs2_inode_unlock(inode, 0); 351 ocfs2_inode_unlock(inode, 0); 335 out: 352 out: 336 if (unlock) 353 if (unlock) 337 folio_unlock(folio); !! 354 unlock_page(page); 338 return ret; 355 return ret; 339 } 356 } 340 357 341 /* 358 /* 342 * This is used only for read-ahead. Failures 359 * This is used only for read-ahead. Failures or difficult to handle 343 * situations are safe to ignore. 360 * situations are safe to ignore. 344 * 361 * 345 * Right now, we don't bother with BH_Boundary 362 * Right now, we don't bother with BH_Boundary - in-inode extent lists 346 * are quite large (243 extents on 4k blocks), 363 * are quite large (243 extents on 4k blocks), so most inodes don't 347 * grow out to a tree. If need be, detecting b 364 * grow out to a tree. If need be, detecting boundary extents could 348 * trivially be added in a future version of o 365 * trivially be added in a future version of ocfs2_get_block(). 349 */ 366 */ 350 static void ocfs2_readahead(struct readahead_c !! 367 static int ocfs2_readpages(struct file *filp, struct address_space *mapping, >> 368 struct list_head *pages, unsigned nr_pages) 351 { 369 { 352 int ret; !! 370 int ret, err = -EIO; 353 struct inode *inode = rac->mapping->ho !! 371 struct inode *inode = mapping->host; 354 struct ocfs2_inode_info *oi = OCFS2_I( 372 struct ocfs2_inode_info *oi = OCFS2_I(inode); >> 373 loff_t start; >> 374 struct page *last; 355 375 356 /* 376 /* 357 * Use the nonblocking flag for the dl 377 * Use the nonblocking flag for the dlm code to avoid page 358 * lock inversion, but don't bother wi 378 * lock inversion, but don't bother with retrying. 359 */ 379 */ 360 ret = ocfs2_inode_lock_full(inode, NUL 380 ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK); 361 if (ret) 381 if (ret) 362 return; !! 382 return err; 363 383 364 if (down_read_trylock(&oi->ip_alloc_se !! 384 if (down_read_trylock(&oi->ip_alloc_sem) == 0) { 365 goto out_unlock; !! 385 ocfs2_inode_unlock(inode, 0); >> 386 return err; >> 387 } 366 388 367 /* 389 /* 368 * Don't bother with inline-data. Ther 390 * Don't bother with inline-data. There isn't anything 369 * to read-ahead in that case anyway.. 391 * to read-ahead in that case anyway... 370 */ 392 */ 371 if (oi->ip_dyn_features & OCFS2_INLINE 393 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) 372 goto out_up; !! 394 goto out_unlock; 373 395 374 /* 396 /* 375 * Check whether a remote node truncat 397 * Check whether a remote node truncated this file - we just 376 * drop out in that case as it's not w 398 * drop out in that case as it's not worth handling here. 377 */ 399 */ 378 if (readahead_pos(rac) >= i_size_read( !! 400 last = list_entry(pages->prev, struct page, lru); 379 goto out_up; !! 401 start = (loff_t)last->index << PAGE_SHIFT; >> 402 if (start >= i_size_read(inode)) >> 403 goto out_unlock; 380 404 381 mpage_readahead(rac, ocfs2_get_block); !! 405 err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block); 382 406 383 out_up: << 384 up_read(&oi->ip_alloc_sem); << 385 out_unlock: 407 out_unlock: >> 408 up_read(&oi->ip_alloc_sem); 386 ocfs2_inode_unlock(inode, 0); 409 ocfs2_inode_unlock(inode, 0); >> 410 >> 411 return err; 387 } 412 } 388 413 389 /* Note: Because we don't support holes, our a 414 /* Note: Because we don't support holes, our allocation has 390 * already happened (allocation writes zeros t 415 * already happened (allocation writes zeros to the file data) 391 * so we don't have to worry about ordered wri 416 * so we don't have to worry about ordered writes in 392 * ocfs2_writepages. !! 417 * ocfs2_writepage. 393 * 418 * 394 * ->writepages is called during the process o !! 419 * ->writepage is called during the process of invalidating the page cache 395 * during blocked lock processing. It can't b 420 * during blocked lock processing. It can't block on any cluster locks 396 * to during block mapping. It's relying on t 421 * to during block mapping. It's relying on the fact that the block 397 * mapping can't have disappeared under the di 422 * mapping can't have disappeared under the dirty pages that it is 398 * being asked to write back. 423 * being asked to write back. 399 */ 424 */ 400 static int ocfs2_writepages(struct address_spa !! 425 static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) 401 struct writeback_control *wbc) << 402 { 426 { 403 return mpage_writepages(mapping, wbc, !! 427 trace_ocfs2_writepage( >> 428 (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno, >> 429 page->index); >> 430 >> 431 return block_write_full_page(page, ocfs2_get_block, wbc); 404 } 432 } 405 433 406 /* Taken from ext3. We don't necessarily need 434 /* Taken from ext3. We don't necessarily need the full blown 407 * functionality yet, but IMHO it's better to 435 * functionality yet, but IMHO it's better to cut and paste the whole 408 * thing so we can avoid introducing our own b 436 * thing so we can avoid introducing our own bugs (and easily pick up 409 * their fixes when they happen) --Mark */ 437 * their fixes when they happen) --Mark */ 410 int walk_page_buffers( handle_t *handle, 438 int walk_page_buffers( handle_t *handle, 411 struct buffer_head *he 439 struct buffer_head *head, 412 unsigned from, 440 unsigned from, 413 unsigned to, 441 unsigned to, 414 int *partial, 442 int *partial, 415 int (*fn)( handle 443 int (*fn)( handle_t *handle, 416 struct 444 struct buffer_head *bh)) 417 { 445 { 418 struct buffer_head *bh; 446 struct buffer_head *bh; 419 unsigned block_start, block_end; 447 unsigned block_start, block_end; 420 unsigned blocksize = head->b_size; 448 unsigned blocksize = head->b_size; 421 int err, ret = 0; 449 int err, ret = 0; 422 struct buffer_head *next; 450 struct buffer_head *next; 423 451 424 for ( bh = head, block_start = 0; 452 for ( bh = head, block_start = 0; 425 ret == 0 && (bh != head || !bl 453 ret == 0 && (bh != head || !block_start); 426 block_start = block_end, bh = 454 block_start = block_end, bh = next) 427 { 455 { 428 next = bh->b_this_page; 456 next = bh->b_this_page; 429 block_end = block_start + bloc 457 block_end = block_start + blocksize; 430 if (block_end <= from || block 458 if (block_end <= from || block_start >= to) { 431 if (partial && !buffer 459 if (partial && !buffer_uptodate(bh)) 432 *partial = 1; 460 *partial = 1; 433 continue; 461 continue; 434 } 462 } 435 err = (*fn)(handle, bh); 463 err = (*fn)(handle, bh); 436 if (!ret) 464 if (!ret) 437 ret = err; 465 ret = err; 438 } 466 } 439 return ret; 467 return ret; 440 } 468 } 441 469 442 static sector_t ocfs2_bmap(struct address_spac 470 static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) 443 { 471 { 444 sector_t status; 472 sector_t status; 445 u64 p_blkno = 0; 473 u64 p_blkno = 0; 446 int err = 0; 474 int err = 0; 447 struct inode *inode = mapping->host; 475 struct inode *inode = mapping->host; 448 476 449 trace_ocfs2_bmap((unsigned long long)O 477 trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno, 450 (unsigned long long)b 478 (unsigned long long)block); 451 479 452 /* << 453 * The swap code (ab-)uses ->bmap to g << 454 * bypasseѕ the file system for actua << 455 * that on refcounted inodes, so we ha << 456 * 0 is the magic code for a bmap erro << 457 */ << 458 if (ocfs2_is_refcount_inode(inode)) << 459 return 0; << 460 << 461 /* We don't need to lock journal syste 480 /* We don't need to lock journal system files, since they aren't 462 * accessed concurrently from multiple 481 * accessed concurrently from multiple nodes. 463 */ 482 */ 464 if (!INODE_JOURNAL(inode)) { 483 if (!INODE_JOURNAL(inode)) { 465 err = ocfs2_inode_lock(inode, 484 err = ocfs2_inode_lock(inode, NULL, 0); 466 if (err) { 485 if (err) { 467 if (err != -ENOENT) 486 if (err != -ENOENT) 468 mlog_errno(err 487 mlog_errno(err); 469 goto bail; 488 goto bail; 470 } 489 } 471 down_read(&OCFS2_I(inode)->ip_ 490 down_read(&OCFS2_I(inode)->ip_alloc_sem); 472 } 491 } 473 492 474 if (!(OCFS2_I(inode)->ip_dyn_features 493 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) 475 err = ocfs2_extent_map_get_blo 494 err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, 476 495 NULL); 477 496 478 if (!INODE_JOURNAL(inode)) { 497 if (!INODE_JOURNAL(inode)) { 479 up_read(&OCFS2_I(inode)->ip_al 498 up_read(&OCFS2_I(inode)->ip_alloc_sem); 480 ocfs2_inode_unlock(inode, 0); 499 ocfs2_inode_unlock(inode, 0); 481 } 500 } 482 501 483 if (err) { 502 if (err) { 484 mlog(ML_ERROR, "get_blocks() f 503 mlog(ML_ERROR, "get_blocks() failed, block = %llu\n", 485 (unsigned long long)block 504 (unsigned long long)block); 486 mlog_errno(err); 505 mlog_errno(err); 487 goto bail; 506 goto bail; 488 } 507 } 489 508 490 bail: 509 bail: 491 status = err ? 0 : p_blkno; 510 status = err ? 0 : p_blkno; 492 511 493 return status; 512 return status; 494 } 513 } 495 514 496 static bool ocfs2_release_folio(struct folio * !! 515 static int ocfs2_releasepage(struct page *page, gfp_t wait) 497 { 516 { 498 if (!folio_buffers(folio)) !! 517 if (!page_has_buffers(page)) 499 return false; !! 518 return 0; 500 return try_to_free_buffers(folio); !! 519 return try_to_free_buffers(page); 501 } 520 } 502 521 503 static void ocfs2_figure_cluster_boundaries(st 522 static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, 504 u3 523 u32 cpos, 505 un 524 unsigned int *start, 506 un 525 unsigned int *end) 507 { 526 { 508 unsigned int cluster_start = 0, cluste 527 unsigned int cluster_start = 0, cluster_end = PAGE_SIZE; 509 528 510 if (unlikely(PAGE_SHIFT > osb->s_clust 529 if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) { 511 unsigned int cpp; 530 unsigned int cpp; 512 531 513 cpp = 1 << (PAGE_SHIFT - osb-> 532 cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits); 514 533 515 cluster_start = cpos % cpp; 534 cluster_start = cpos % cpp; 516 cluster_start = cluster_start 535 cluster_start = cluster_start << osb->s_clustersize_bits; 517 536 518 cluster_end = cluster_start + 537 cluster_end = cluster_start + osb->s_clustersize; 519 } 538 } 520 539 521 BUG_ON(cluster_start > PAGE_SIZE); 540 BUG_ON(cluster_start > PAGE_SIZE); 522 BUG_ON(cluster_end > PAGE_SIZE); 541 BUG_ON(cluster_end > PAGE_SIZE); 523 542 524 if (start) 543 if (start) 525 *start = cluster_start; 544 *start = cluster_start; 526 if (end) 545 if (end) 527 *end = cluster_end; 546 *end = cluster_end; 528 } 547 } 529 548 530 /* 549 /* 531 * 'from' and 'to' are the region in the page 550 * 'from' and 'to' are the region in the page to avoid zeroing. 532 * 551 * 533 * If pagesize > clustersize, this function wi 552 * If pagesize > clustersize, this function will avoid zeroing outside 534 * of the cluster boundary. 553 * of the cluster boundary. 535 * 554 * 536 * from == to == 0 is code for "zero the entir 555 * from == to == 0 is code for "zero the entire cluster region" 537 */ 556 */ 538 static void ocfs2_clear_page_regions(struct pa 557 static void ocfs2_clear_page_regions(struct page *page, 539 struct oc 558 struct ocfs2_super *osb, u32 cpos, 540 unsigned 559 unsigned from, unsigned to) 541 { 560 { 542 void *kaddr; 561 void *kaddr; 543 unsigned int cluster_start, cluster_en 562 unsigned int cluster_start, cluster_end; 544 563 545 ocfs2_figure_cluster_boundaries(osb, c 564 ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); 546 565 547 kaddr = kmap_atomic(page); 566 kaddr = kmap_atomic(page); 548 567 549 if (from || to) { 568 if (from || to) { 550 if (from > cluster_start) 569 if (from > cluster_start) 551 memset(kaddr + cluster 570 memset(kaddr + cluster_start, 0, from - cluster_start); 552 if (to < cluster_end) 571 if (to < cluster_end) 553 memset(kaddr + to, 0, 572 memset(kaddr + to, 0, cluster_end - to); 554 } else { 573 } else { 555 memset(kaddr + cluster_start, 574 memset(kaddr + cluster_start, 0, cluster_end - cluster_start); 556 } 575 } 557 576 558 kunmap_atomic(kaddr); 577 kunmap_atomic(kaddr); 559 } 578 } 560 579 561 /* 580 /* 562 * Nonsparse file systems fully allocate befor 581 * Nonsparse file systems fully allocate before we get to the write 563 * code. This prevents ocfs2_write() from tagg 582 * code. This prevents ocfs2_write() from tagging the write as an 564 * allocating one, which means ocfs2_map_page_ 583 * allocating one, which means ocfs2_map_page_blocks() might try to 565 * read-in the blocks at the tail of our file. 584 * read-in the blocks at the tail of our file. Avoid reading them by 566 * testing i_size against each block offset. 585 * testing i_size against each block offset. 567 */ 586 */ 568 static int ocfs2_should_read_blk(struct inode !! 587 static int ocfs2_should_read_blk(struct inode *inode, struct page *page, 569 unsigned int 588 unsigned int block_start) 570 { 589 { 571 u64 offset = folio_pos(folio) + block_ !! 590 u64 offset = page_offset(page) + block_start; 572 591 573 if (ocfs2_sparse_alloc(OCFS2_SB(inode- 592 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) 574 return 1; 593 return 1; 575 594 576 if (i_size_read(inode) > offset) 595 if (i_size_read(inode) > offset) 577 return 1; 596 return 1; 578 597 579 return 0; 598 return 0; 580 } 599 } 581 600 582 /* 601 /* 583 * Some of this taken from __block_write_begin 602 * Some of this taken from __block_write_begin(). We already have our 584 * mapping by now though, and the entire write 603 * mapping by now though, and the entire write will be allocating or 585 * it won't, so not much need to use BH_New. 604 * it won't, so not much need to use BH_New. 586 * 605 * 587 * This will also skip zeroing, which is handl 606 * This will also skip zeroing, which is handled externally. 588 */ 607 */ 589 int ocfs2_map_page_blocks(struct page *page, u 608 int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, 590 struct inode *inode, 609 struct inode *inode, unsigned int from, 591 unsigned int to, int 610 unsigned int to, int new) 592 { 611 { 593 struct folio *folio = page_folio(page) << 594 int ret = 0; 612 int ret = 0; 595 struct buffer_head *head, *bh, *wait[2 613 struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; 596 unsigned int block_end, block_start; 614 unsigned int block_end, block_start; 597 unsigned int bsize = i_blocksize(inode 615 unsigned int bsize = i_blocksize(inode); 598 616 599 head = folio_buffers(folio); !! 617 if (!page_has_buffers(page)) 600 if (!head) !! 618 create_empty_buffers(page, bsize, 0); 601 head = create_empty_buffers(fo << 602 619 >> 620 head = page_buffers(page); 603 for (bh = head, block_start = 0; bh != 621 for (bh = head, block_start = 0; bh != head || !block_start; 604 bh = bh->b_this_page, block_start 622 bh = bh->b_this_page, block_start += bsize) { 605 block_end = block_start + bsiz 623 block_end = block_start + bsize; 606 624 607 clear_buffer_new(bh); 625 clear_buffer_new(bh); 608 626 609 /* 627 /* 610 * Ignore blocks outside of ou 628 * Ignore blocks outside of our i/o range - 611 * they may belong to unalloca 629 * they may belong to unallocated clusters. 612 */ 630 */ 613 if (block_start >= to || block 631 if (block_start >= to || block_end <= from) { 614 if (folio_test_uptodat !! 632 if (PageUptodate(page)) 615 set_buffer_upt 633 set_buffer_uptodate(bh); 616 continue; 634 continue; 617 } 635 } 618 636 619 /* 637 /* 620 * For an allocating write wit 638 * For an allocating write with cluster size >= page 621 * size, we always write the e 639 * size, we always write the entire page. 622 */ 640 */ 623 if (new) 641 if (new) 624 set_buffer_new(bh); 642 set_buffer_new(bh); 625 643 626 if (!buffer_mapped(bh)) { 644 if (!buffer_mapped(bh)) { 627 map_bh(bh, inode->i_sb 645 map_bh(bh, inode->i_sb, *p_blkno); 628 clean_bdev_bh_alias(bh !! 646 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); 629 } 647 } 630 648 631 if (folio_test_uptodate(folio) !! 649 if (PageUptodate(page)) { 632 set_buffer_uptodate(bh !! 650 if (!buffer_uptodate(bh)) >> 651 set_buffer_uptodate(bh); 633 } else if (!buffer_uptodate(bh 652 } else if (!buffer_uptodate(bh) && !buffer_delay(bh) && 634 !buffer_new(bh) && 653 !buffer_new(bh) && 635 ocfs2_should_read_b !! 654 ocfs2_should_read_blk(inode, page, block_start) && 636 (block_start < from 655 (block_start < from || block_end > to)) { 637 bh_read_nowait(bh, 0); !! 656 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 638 *wait_bh++=bh; 657 *wait_bh++=bh; 639 } 658 } 640 659 641 *p_blkno = *p_blkno + 1; 660 *p_blkno = *p_blkno + 1; 642 } 661 } 643 662 644 /* 663 /* 645 * If we issued read requests - let th 664 * If we issued read requests - let them complete. 646 */ 665 */ 647 while(wait_bh > wait) { 666 while(wait_bh > wait) { 648 wait_on_buffer(*--wait_bh); 667 wait_on_buffer(*--wait_bh); 649 if (!buffer_uptodate(*wait_bh) 668 if (!buffer_uptodate(*wait_bh)) 650 ret = -EIO; 669 ret = -EIO; 651 } 670 } 652 671 653 if (ret == 0 || !new) 672 if (ret == 0 || !new) 654 return ret; 673 return ret; 655 674 656 /* 675 /* 657 * If we get -EIO above, zero out any 676 * If we get -EIO above, zero out any newly allocated blocks 658 * to avoid exposing stale data. 677 * to avoid exposing stale data. 659 */ 678 */ 660 bh = head; 679 bh = head; 661 block_start = 0; 680 block_start = 0; 662 do { 681 do { 663 block_end = block_start + bsiz 682 block_end = block_start + bsize; 664 if (block_end <= from) 683 if (block_end <= from) 665 goto next_bh; 684 goto next_bh; 666 if (block_start >= to) 685 if (block_start >= to) 667 break; 686 break; 668 687 669 folio_zero_range(folio, block_ !! 688 zero_user(page, block_start, bh->b_size); 670 set_buffer_uptodate(bh); 689 set_buffer_uptodate(bh); 671 mark_buffer_dirty(bh); 690 mark_buffer_dirty(bh); 672 691 673 next_bh: 692 next_bh: 674 block_start = block_end; 693 block_start = block_end; 675 bh = bh->b_this_page; 694 bh = bh->b_this_page; 676 } while (bh != head); 695 } while (bh != head); 677 696 678 return ret; 697 return ret; 679 } 698 } 680 699 681 #if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE) 700 #if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE) 682 #define OCFS2_MAX_CTXT_PAGES 1 701 #define OCFS2_MAX_CTXT_PAGES 1 683 #else 702 #else 684 #define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLU 703 #define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE) 685 #endif 704 #endif 686 705 687 #define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_ 706 #define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE) 688 707 689 struct ocfs2_unwritten_extent { 708 struct ocfs2_unwritten_extent { 690 struct list_head ue_node; 709 struct list_head ue_node; 691 struct list_head ue_ip_node; 710 struct list_head ue_ip_node; 692 u32 ue_cpos; 711 u32 ue_cpos; 693 u32 ue_phys; 712 u32 ue_phys; 694 }; 713 }; 695 714 696 /* 715 /* 697 * Describe the state of a single cluster to b 716 * Describe the state of a single cluster to be written to. 698 */ 717 */ 699 struct ocfs2_write_cluster_desc { 718 struct ocfs2_write_cluster_desc { 700 u32 c_cpos; 719 u32 c_cpos; 701 u32 c_phys; 720 u32 c_phys; 702 /* 721 /* 703 * Give this a unique field because c_ 722 * Give this a unique field because c_phys eventually gets 704 * filled. 723 * filled. 705 */ 724 */ 706 unsigned c_new; 725 unsigned c_new; 707 unsigned c_clear_unwritten; 726 unsigned c_clear_unwritten; 708 unsigned c_needs_zero; 727 unsigned c_needs_zero; 709 }; 728 }; 710 729 711 struct ocfs2_write_ctxt { 730 struct ocfs2_write_ctxt { 712 /* Logical cluster position / len of w 731 /* Logical cluster position / len of write */ 713 u32 w_cpos 732 u32 w_cpos; 714 u32 w_clen 733 u32 w_clen; 715 734 716 /* First cluster allocated in a nonspa 735 /* First cluster allocated in a nonsparse extend */ 717 u32 w_firs 736 u32 w_first_new_cpos; 718 737 719 /* Type of caller. Must be one of buff 738 /* Type of caller. Must be one of buffer, mmap, direct. */ 720 ocfs2_write_type_t w_type 739 ocfs2_write_type_t w_type; 721 740 722 struct ocfs2_write_cluster_desc w_desc 741 struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; 723 742 724 /* 743 /* 725 * This is true if page_size > cluster 744 * This is true if page_size > cluster_size. 726 * 745 * 727 * It triggers a set of special cases 746 * It triggers a set of special cases during write which might 728 * have to deal with allocating writes 747 * have to deal with allocating writes to partial pages. 729 */ 748 */ 730 unsigned int w_larg 749 unsigned int w_large_pages; 731 750 732 /* 751 /* 733 * Pages involved in this write. 752 * Pages involved in this write. 734 * 753 * 735 * w_target_page is the page being wri 754 * w_target_page is the page being written to by the user. 736 * 755 * 737 * w_pages is an array of pages which 756 * w_pages is an array of pages which always contains 738 * w_target_page, and in the case of a 757 * w_target_page, and in the case of an allocating write with 739 * page_size < cluster size, it will c 758 * page_size < cluster size, it will contain zero'd and mapped 740 * pages adjacent to w_target_page whi 759 * pages adjacent to w_target_page which need to be written 741 * out in so that future reads from th 760 * out in so that future reads from that region will get 742 * zero's. 761 * zero's. 743 */ 762 */ 744 unsigned int w_num_ 763 unsigned int w_num_pages; 745 struct page *w_pag 764 struct page *w_pages[OCFS2_MAX_CTXT_PAGES]; 746 struct page *w_tar 765 struct page *w_target_page; 747 766 748 /* 767 /* 749 * w_target_locked is used for page_mk 768 * w_target_locked is used for page_mkwrite path indicating no unlocking 750 * against w_target_page in ocfs2_writ 769 * against w_target_page in ocfs2_write_end_nolock. 751 */ 770 */ 752 unsigned int w_targ 771 unsigned int w_target_locked:1; 753 772 754 /* 773 /* 755 * ocfs2_write_end() uses this to know 774 * ocfs2_write_end() uses this to know what the real range to 756 * write in the target should be. 775 * write in the target should be. 757 */ 776 */ 758 unsigned int w_targ 777 unsigned int w_target_from; 759 unsigned int w_targ 778 unsigned int w_target_to; 760 779 761 /* 780 /* 762 * We could use journal_current_handle 781 * We could use journal_current_handle() but this is cleaner, 763 * IMHO -Mark 782 * IMHO -Mark 764 */ 783 */ 765 handle_t *w_han 784 handle_t *w_handle; 766 785 767 struct buffer_head *w_di_ 786 struct buffer_head *w_di_bh; 768 787 769 struct ocfs2_cached_dealloc_ctxt w_dea 788 struct ocfs2_cached_dealloc_ctxt w_dealloc; 770 789 771 struct list_head w_unwr 790 struct list_head w_unwritten_list; 772 unsigned int w_unwr << 773 }; 791 }; 774 792 775 void ocfs2_unlock_and_free_pages(struct page * 793 void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) 776 { 794 { 777 int i; 795 int i; 778 796 779 for(i = 0; i < num_pages; i++) { 797 for(i = 0; i < num_pages; i++) { 780 if (pages[i]) { 798 if (pages[i]) { 781 unlock_page(pages[i]); 799 unlock_page(pages[i]); 782 mark_page_accessed(pag 800 mark_page_accessed(pages[i]); 783 put_page(pages[i]); 801 put_page(pages[i]); 784 } 802 } 785 } 803 } 786 } 804 } 787 805 788 static void ocfs2_unlock_pages(struct ocfs2_wr 806 static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) 789 { 807 { 790 int i; 808 int i; 791 809 792 /* 810 /* 793 * w_target_locked is only set to true 811 * w_target_locked is only set to true in the page_mkwrite() case. 794 * The intent is to allow us to lock t 812 * The intent is to allow us to lock the target page from write_begin() 795 * to write_end(). The caller must hol 813 * to write_end(). The caller must hold a ref on w_target_page. 796 */ 814 */ 797 if (wc->w_target_locked) { 815 if (wc->w_target_locked) { 798 BUG_ON(!wc->w_target_page); 816 BUG_ON(!wc->w_target_page); 799 for (i = 0; i < wc->w_num_page 817 for (i = 0; i < wc->w_num_pages; i++) { 800 if (wc->w_target_page 818 if (wc->w_target_page == wc->w_pages[i]) { 801 wc->w_pages[i] 819 wc->w_pages[i] = NULL; 802 break; 820 break; 803 } 821 } 804 } 822 } 805 mark_page_accessed(wc->w_targe 823 mark_page_accessed(wc->w_target_page); 806 put_page(wc->w_target_page); 824 put_page(wc->w_target_page); 807 } 825 } 808 ocfs2_unlock_and_free_pages(wc->w_page 826 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); 809 } 827 } 810 828 811 static void ocfs2_free_unwritten_list(struct i 829 static void ocfs2_free_unwritten_list(struct inode *inode, 812 struct list_h 830 struct list_head *head) 813 { 831 { 814 struct ocfs2_inode_info *oi = OCFS2_I( 832 struct ocfs2_inode_info *oi = OCFS2_I(inode); 815 struct ocfs2_unwritten_extent *ue = NU 833 struct ocfs2_unwritten_extent *ue = NULL, *tmp = NULL; 816 834 817 list_for_each_entry_safe(ue, tmp, head 835 list_for_each_entry_safe(ue, tmp, head, ue_node) { 818 list_del(&ue->ue_node); 836 list_del(&ue->ue_node); 819 spin_lock(&oi->ip_lock); 837 spin_lock(&oi->ip_lock); 820 list_del(&ue->ue_ip_node); 838 list_del(&ue->ue_ip_node); 821 spin_unlock(&oi->ip_lock); 839 spin_unlock(&oi->ip_lock); 822 kfree(ue); 840 kfree(ue); 823 } 841 } 824 } 842 } 825 843 826 static void ocfs2_free_write_ctxt(struct inode 844 static void ocfs2_free_write_ctxt(struct inode *inode, 827 struct ocfs2 845 struct ocfs2_write_ctxt *wc) 828 { 846 { 829 ocfs2_free_unwritten_list(inode, &wc-> 847 ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list); 830 ocfs2_unlock_pages(wc); 848 ocfs2_unlock_pages(wc); 831 brelse(wc->w_di_bh); 849 brelse(wc->w_di_bh); 832 kfree(wc); 850 kfree(wc); 833 } 851 } 834 852 835 static int ocfs2_alloc_write_ctxt(struct ocfs2 853 static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, 836 struct ocfs2 854 struct ocfs2_super *osb, loff_t pos, 837 unsigned len 855 unsigned len, ocfs2_write_type_t type, 838 struct buffe 856 struct buffer_head *di_bh) 839 { 857 { 840 u32 cend; 858 u32 cend; 841 struct ocfs2_write_ctxt *wc; 859 struct ocfs2_write_ctxt *wc; 842 860 843 wc = kzalloc(sizeof(struct ocfs2_write 861 wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS); 844 if (!wc) 862 if (!wc) 845 return -ENOMEM; 863 return -ENOMEM; 846 864 847 wc->w_cpos = pos >> osb->s_clustersize 865 wc->w_cpos = pos >> osb->s_clustersize_bits; 848 wc->w_first_new_cpos = UINT_MAX; 866 wc->w_first_new_cpos = UINT_MAX; 849 cend = (pos + len - 1) >> osb->s_clust 867 cend = (pos + len - 1) >> osb->s_clustersize_bits; 850 wc->w_clen = cend - wc->w_cpos + 1; 868 wc->w_clen = cend - wc->w_cpos + 1; 851 get_bh(di_bh); 869 get_bh(di_bh); 852 wc->w_di_bh = di_bh; 870 wc->w_di_bh = di_bh; 853 wc->w_type = type; 871 wc->w_type = type; 854 872 855 if (unlikely(PAGE_SHIFT > osb->s_clust 873 if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) 856 wc->w_large_pages = 1; 874 wc->w_large_pages = 1; 857 else 875 else 858 wc->w_large_pages = 0; 876 wc->w_large_pages = 0; 859 877 860 ocfs2_init_dealloc_ctxt(&wc->w_dealloc 878 ocfs2_init_dealloc_ctxt(&wc->w_dealloc); 861 INIT_LIST_HEAD(&wc->w_unwritten_list); 879 INIT_LIST_HEAD(&wc->w_unwritten_list); 862 880 863 *wcp = wc; 881 *wcp = wc; 864 882 865 return 0; 883 return 0; 866 } 884 } 867 885 868 /* 886 /* 869 * If a page has any new buffers, zero them ou 887 * If a page has any new buffers, zero them out here, and mark them uptodate 870 * and dirty so they'll be written out (in ord 888 * and dirty so they'll be written out (in order to prevent uninitialised 871 * block data from leaking). And clear the new 889 * block data from leaking). And clear the new bit. 872 */ 890 */ 873 static void ocfs2_zero_new_buffers(struct page 891 static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to) 874 { 892 { 875 unsigned int block_start, block_end; 893 unsigned int block_start, block_end; 876 struct buffer_head *head, *bh; 894 struct buffer_head *head, *bh; 877 895 878 BUG_ON(!PageLocked(page)); 896 BUG_ON(!PageLocked(page)); 879 if (!page_has_buffers(page)) 897 if (!page_has_buffers(page)) 880 return; 898 return; 881 899 882 bh = head = page_buffers(page); 900 bh = head = page_buffers(page); 883 block_start = 0; 901 block_start = 0; 884 do { 902 do { 885 block_end = block_start + bh-> 903 block_end = block_start + bh->b_size; 886 904 887 if (buffer_new(bh)) { 905 if (buffer_new(bh)) { 888 if (block_end > from & 906 if (block_end > from && block_start < to) { 889 if (!PageUptod 907 if (!PageUptodate(page)) { 890 unsign 908 unsigned start, end; 891 909 892 start 910 start = max(from, block_start); 893 end = 911 end = min(to, block_end); 894 912 895 zero_u 913 zero_user_segment(page, start, end); 896 set_bu 914 set_buffer_uptodate(bh); 897 } 915 } 898 916 899 clear_buffer_n 917 clear_buffer_new(bh); 900 mark_buffer_di 918 mark_buffer_dirty(bh); 901 } 919 } 902 } 920 } 903 921 904 block_start = block_end; 922 block_start = block_end; 905 bh = bh->b_this_page; 923 bh = bh->b_this_page; 906 } while (bh != head); 924 } while (bh != head); 907 } 925 } 908 926 909 /* 927 /* 910 * Only called when we have a failure during a 928 * Only called when we have a failure during allocating write to write 911 * zero's to the newly allocated region. 929 * zero's to the newly allocated region. 912 */ 930 */ 913 static void ocfs2_write_failure(struct inode * 931 static void ocfs2_write_failure(struct inode *inode, 914 struct ocfs2_w 932 struct ocfs2_write_ctxt *wc, 915 loff_t user_po 933 loff_t user_pos, unsigned user_len) 916 { 934 { 917 int i; 935 int i; 918 unsigned from = user_pos & (PAGE_SIZE 936 unsigned from = user_pos & (PAGE_SIZE - 1), 919 to = user_pos + user_len; 937 to = user_pos + user_len; 920 struct page *tmppage; 938 struct page *tmppage; 921 939 922 if (wc->w_target_page) 940 if (wc->w_target_page) 923 ocfs2_zero_new_buffers(wc->w_t 941 ocfs2_zero_new_buffers(wc->w_target_page, from, to); 924 942 925 for(i = 0; i < wc->w_num_pages; i++) { 943 for(i = 0; i < wc->w_num_pages; i++) { 926 tmppage = wc->w_pages[i]; 944 tmppage = wc->w_pages[i]; 927 945 928 if (tmppage && page_has_buffer 946 if (tmppage && page_has_buffers(tmppage)) { 929 if (ocfs2_should_order 947 if (ocfs2_should_order_data(inode)) 930 ocfs2_jbd2_ino !! 948 ocfs2_jbd2_file_inode(wc->w_handle, inode); 931 << 932 949 933 block_commit_write(tmp 950 block_commit_write(tmppage, from, to); 934 } 951 } 935 } 952 } 936 } 953 } 937 954 938 static int ocfs2_prepare_page_for_write(struct 955 static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, 939 struct 956 struct ocfs2_write_ctxt *wc, 940 struct 957 struct page *page, u32 cpos, 941 loff_t 958 loff_t user_pos, unsigned user_len, 942 int ne 959 int new) 943 { 960 { 944 int ret; 961 int ret; 945 unsigned int map_from = 0, map_to = 0; 962 unsigned int map_from = 0, map_to = 0; 946 unsigned int cluster_start, cluster_en 963 unsigned int cluster_start, cluster_end; 947 unsigned int user_data_from = 0, user_ 964 unsigned int user_data_from = 0, user_data_to = 0; 948 965 949 ocfs2_figure_cluster_boundaries(OCFS2_ 966 ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, 950 &clust 967 &cluster_start, &cluster_end); 951 968 952 /* treat the write as new if the a hol 969 /* treat the write as new if the a hole/lseek spanned across 953 * the page boundary. 970 * the page boundary. 954 */ 971 */ 955 new = new | ((i_size_read(inode) <= pa 972 new = new | ((i_size_read(inode) <= page_offset(page)) && 956 (page_offset(page) <= 973 (page_offset(page) <= user_pos)); 957 974 958 if (page == wc->w_target_page) { 975 if (page == wc->w_target_page) { 959 map_from = user_pos & (PAGE_SI 976 map_from = user_pos & (PAGE_SIZE - 1); 960 map_to = map_from + user_len; 977 map_to = map_from + user_len; 961 978 962 if (new) 979 if (new) 963 ret = ocfs2_map_page_b 980 ret = ocfs2_map_page_blocks(page, p_blkno, inode, 964 981 cluster_start, cluster_end, 965 982 new); 966 else 983 else 967 ret = ocfs2_map_page_b 984 ret = ocfs2_map_page_blocks(page, p_blkno, inode, 968 985 map_from, map_to, new); 969 if (ret) { 986 if (ret) { 970 mlog_errno(ret); 987 mlog_errno(ret); 971 goto out; 988 goto out; 972 } 989 } 973 990 974 user_data_from = map_from; 991 user_data_from = map_from; 975 user_data_to = map_to; 992 user_data_to = map_to; 976 if (new) { 993 if (new) { 977 map_from = cluster_sta 994 map_from = cluster_start; 978 map_to = cluster_end; 995 map_to = cluster_end; 979 } 996 } 980 } else { 997 } else { 981 /* 998 /* 982 * If we haven't allocated the 999 * If we haven't allocated the new page yet, we 983 * shouldn't be writing it out 1000 * shouldn't be writing it out without copying user 984 * data. This is likely a math 1001 * data. This is likely a math error from the caller. 985 */ 1002 */ 986 BUG_ON(!new); 1003 BUG_ON(!new); 987 1004 988 map_from = cluster_start; 1005 map_from = cluster_start; 989 map_to = cluster_end; 1006 map_to = cluster_end; 990 1007 991 ret = ocfs2_map_page_blocks(pa 1008 ret = ocfs2_map_page_blocks(page, p_blkno, inode, 992 cl 1009 cluster_start, cluster_end, new); 993 if (ret) { 1010 if (ret) { 994 mlog_errno(ret); 1011 mlog_errno(ret); 995 goto out; 1012 goto out; 996 } 1013 } 997 } 1014 } 998 1015 999 /* 1016 /* 1000 * Parts of newly allocated pages nee 1017 * Parts of newly allocated pages need to be zero'd. 1001 * 1018 * 1002 * Above, we have also rewritten 'to' 1019 * Above, we have also rewritten 'to' and 'from' - as far as 1003 * the rest of the function is concer 1020 * the rest of the function is concerned, the entire cluster 1004 * range inside of a page needs to be 1021 * range inside of a page needs to be written. 1005 * 1022 * 1006 * We can skip this if the page is up 1023 * We can skip this if the page is up to date - it's already 1007 * been zero'd from being read in as 1024 * been zero'd from being read in as a hole. 1008 */ 1025 */ 1009 if (new && !PageUptodate(page)) 1026 if (new && !PageUptodate(page)) 1010 ocfs2_clear_page_regions(page 1027 ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb), 1011 cpos 1028 cpos, user_data_from, user_data_to); 1012 1029 1013 flush_dcache_page(page); 1030 flush_dcache_page(page); 1014 1031 1015 out: 1032 out: 1016 return ret; 1033 return ret; 1017 } 1034 } 1018 1035 1019 /* 1036 /* 1020 * This function will only grab one clusters 1037 * This function will only grab one clusters worth of pages. 1021 */ 1038 */ 1022 static int ocfs2_grab_pages_for_write(struct 1039 static int ocfs2_grab_pages_for_write(struct address_space *mapping, 1023 struct 1040 struct ocfs2_write_ctxt *wc, 1024 u32 cpo 1041 u32 cpos, loff_t user_pos, 1025 unsigne 1042 unsigned user_len, int new, 1026 struct 1043 struct page *mmap_page) 1027 { 1044 { 1028 int ret = 0, i; 1045 int ret = 0, i; 1029 unsigned long start, target_index, en 1046 unsigned long start, target_index, end_index, index; 1030 struct inode *inode = mapping->host; 1047 struct inode *inode = mapping->host; 1031 loff_t last_byte; 1048 loff_t last_byte; 1032 1049 1033 target_index = user_pos >> PAGE_SHIFT 1050 target_index = user_pos >> PAGE_SHIFT; 1034 1051 1035 /* 1052 /* 1036 * Figure out how many pages we'll be 1053 * Figure out how many pages we'll be manipulating here. For 1037 * non allocating write, we just chan 1054 * non allocating write, we just change the one 1038 * page. Otherwise, we'll need a whol 1055 * page. Otherwise, we'll need a whole clusters worth. If we're 1039 * writing past i_size, we only need 1056 * writing past i_size, we only need enough pages to cover the 1040 * last page of the write. 1057 * last page of the write. 1041 */ 1058 */ 1042 if (new) { 1059 if (new) { 1043 wc->w_num_pages = ocfs2_pages 1060 wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb); 1044 start = ocfs2_align_clusters_ 1061 start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); 1045 /* 1062 /* 1046 * We need the index *past* t 1063 * We need the index *past* the last page we could possibly 1047 * touch. This is the page p 1064 * touch. This is the page past the end of the write or 1048 * i_size, whichever is great 1065 * i_size, whichever is greater. 1049 */ 1066 */ 1050 last_byte = max(user_pos + us 1067 last_byte = max(user_pos + user_len, i_size_read(inode)); 1051 BUG_ON(last_byte < 1); 1068 BUG_ON(last_byte < 1); 1052 end_index = ((last_byte - 1) 1069 end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1; 1053 if ((start + wc->w_num_pages) 1070 if ((start + wc->w_num_pages) > end_index) 1054 wc->w_num_pages = end 1071 wc->w_num_pages = end_index - start; 1055 } else { 1072 } else { 1056 wc->w_num_pages = 1; 1073 wc->w_num_pages = 1; 1057 start = target_index; 1074 start = target_index; 1058 } 1075 } 1059 end_index = (user_pos + user_len - 1) 1076 end_index = (user_pos + user_len - 1) >> PAGE_SHIFT; 1060 1077 1061 for(i = 0; i < wc->w_num_pages; i++) 1078 for(i = 0; i < wc->w_num_pages; i++) { 1062 index = start + i; 1079 index = start + i; 1063 1080 1064 if (index >= target_index && 1081 if (index >= target_index && index <= end_index && 1065 wc->w_type == OCFS2_WRITE 1082 wc->w_type == OCFS2_WRITE_MMAP) { 1066 /* 1083 /* 1067 * ocfs2_pagemkwrite( 1084 * ocfs2_pagemkwrite() is a little different 1068 * and wants us to di 1085 * and wants us to directly use the page 1069 * passed in. 1086 * passed in. 1070 */ 1087 */ 1071 lock_page(mmap_page); 1088 lock_page(mmap_page); 1072 1089 1073 /* Exit and let the c 1090 /* Exit and let the caller retry */ 1074 if (mmap_page->mappin 1091 if (mmap_page->mapping != mapping) { 1075 WARN_ON(mmap_ 1092 WARN_ON(mmap_page->mapping); 1076 unlock_page(m 1093 unlock_page(mmap_page); 1077 ret = -EAGAIN 1094 ret = -EAGAIN; 1078 goto out; 1095 goto out; 1079 } 1096 } 1080 1097 1081 get_page(mmap_page); 1098 get_page(mmap_page); 1082 wc->w_pages[i] = mmap 1099 wc->w_pages[i] = mmap_page; 1083 wc->w_target_locked = 1100 wc->w_target_locked = true; 1084 } else if (index >= target_in 1101 } else if (index >= target_index && index <= end_index && 1085 wc->w_type == OCFS 1102 wc->w_type == OCFS2_WRITE_DIRECT) { 1086 /* Direct write has n 1103 /* Direct write has no mapping page. */ 1087 wc->w_pages[i] = NULL 1104 wc->w_pages[i] = NULL; 1088 continue; 1105 continue; 1089 } else { 1106 } else { 1090 wc->w_pages[i] = find 1107 wc->w_pages[i] = find_or_create_page(mapping, index, 1091 1108 GFP_NOFS); 1092 if (!wc->w_pages[i]) 1109 if (!wc->w_pages[i]) { 1093 ret = -ENOMEM 1110 ret = -ENOMEM; 1094 mlog_errno(re 1111 mlog_errno(ret); 1095 goto out; 1112 goto out; 1096 } 1113 } 1097 } 1114 } 1098 wait_for_stable_page(wc->w_pa 1115 wait_for_stable_page(wc->w_pages[i]); 1099 1116 1100 if (index == target_index) 1117 if (index == target_index) 1101 wc->w_target_page = w 1118 wc->w_target_page = wc->w_pages[i]; 1102 } 1119 } 1103 out: 1120 out: 1104 if (ret) 1121 if (ret) 1105 wc->w_target_locked = false; 1122 wc->w_target_locked = false; 1106 return ret; 1123 return ret; 1107 } 1124 } 1108 1125 1109 /* 1126 /* 1110 * Prepare a single cluster for write one clu 1127 * Prepare a single cluster for write one cluster into the file. 1111 */ 1128 */ 1112 static int ocfs2_write_cluster(struct address 1129 static int ocfs2_write_cluster(struct address_space *mapping, 1113 u32 *phys, uns 1130 u32 *phys, unsigned int new, 1114 unsigned int c 1131 unsigned int clear_unwritten, 1115 unsigned int s 1132 unsigned int should_zero, 1116 struct ocfs2_a 1133 struct ocfs2_alloc_context *data_ac, 1117 struct ocfs2_a 1134 struct ocfs2_alloc_context *meta_ac, 1118 struct ocfs2_w 1135 struct ocfs2_write_ctxt *wc, u32 cpos, 1119 loff_t user_po 1136 loff_t user_pos, unsigned user_len) 1120 { 1137 { 1121 int ret, i; 1138 int ret, i; 1122 u64 p_blkno; 1139 u64 p_blkno; 1123 struct inode *inode = mapping->host; 1140 struct inode *inode = mapping->host; 1124 struct ocfs2_extent_tree et; 1141 struct ocfs2_extent_tree et; 1125 int bpc = ocfs2_clusters_to_blocks(in 1142 int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); 1126 1143 1127 if (new) { 1144 if (new) { 1128 u32 tmp_pos; 1145 u32 tmp_pos; 1129 1146 1130 /* 1147 /* 1131 * This is safe to call with 1148 * This is safe to call with the page locks - it won't take 1132 * any additional semaphores 1149 * any additional semaphores or cluster locks. 1133 */ 1150 */ 1134 tmp_pos = cpos; 1151 tmp_pos = cpos; 1135 ret = ocfs2_add_inode_data(OC 1152 ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode, 1136 &t 1153 &tmp_pos, 1, !clear_unwritten, 1137 wc 1154 wc->w_di_bh, wc->w_handle, 1138 da 1155 data_ac, meta_ac, NULL); 1139 /* 1156 /* 1140 * This shouldn't happen beca 1157 * This shouldn't happen because we must have already 1141 * calculated the correct met 1158 * calculated the correct meta data allocation required. The 1142 * internal tree allocation c 1159 * internal tree allocation code should know how to increase 1143 * transaction credits itself 1160 * transaction credits itself. 1144 * 1161 * 1145 * If need be, we could handl 1162 * If need be, we could handle -EAGAIN for a 1146 * RESTART_TRANS here. 1163 * RESTART_TRANS here. 1147 */ 1164 */ 1148 mlog_bug_on_msg(ret == -EAGAI 1165 mlog_bug_on_msg(ret == -EAGAIN, 1149 "Inode %llu: 1166 "Inode %llu: EAGAIN return during allocation.\n", 1150 (unsigned lon 1167 (unsigned long long)OCFS2_I(inode)->ip_blkno); 1151 if (ret < 0) { 1168 if (ret < 0) { 1152 mlog_errno(ret); 1169 mlog_errno(ret); 1153 goto out; 1170 goto out; 1154 } 1171 } 1155 } else if (clear_unwritten) { 1172 } else if (clear_unwritten) { 1156 ocfs2_init_dinode_extent_tree 1173 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), 1157 1174 wc->w_di_bh); 1158 ret = ocfs2_mark_extent_writt 1175 ret = ocfs2_mark_extent_written(inode, &et, 1159 1176 wc->w_handle, cpos, 1, *phys, 1160 1177 meta_ac, &wc->w_dealloc); 1161 if (ret < 0) { 1178 if (ret < 0) { 1162 mlog_errno(ret); 1179 mlog_errno(ret); 1163 goto out; 1180 goto out; 1164 } 1181 } 1165 } 1182 } 1166 1183 1167 /* 1184 /* 1168 * The only reason this should fail i 1185 * The only reason this should fail is due to an inability to 1169 * find the extent added. 1186 * find the extent added. 1170 */ 1187 */ 1171 ret = ocfs2_get_clusters(inode, cpos, 1188 ret = ocfs2_get_clusters(inode, cpos, phys, NULL, NULL); 1172 if (ret < 0) { 1189 if (ret < 0) { 1173 mlog(ML_ERROR, "Get physical 1190 mlog(ML_ERROR, "Get physical blkno failed for inode %llu, " 1174 "at logical clust 1191 "at logical cluster %u", 1175 (unsigned long lo 1192 (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); 1176 goto out; 1193 goto out; 1177 } 1194 } 1178 1195 1179 BUG_ON(*phys == 0); 1196 BUG_ON(*phys == 0); 1180 1197 1181 p_blkno = ocfs2_clusters_to_blocks(in 1198 p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *phys); 1182 if (!should_zero) 1199 if (!should_zero) 1183 p_blkno += (user_pos >> inode 1200 p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1); 1184 1201 1185 for(i = 0; i < wc->w_num_pages; i++) 1202 for(i = 0; i < wc->w_num_pages; i++) { 1186 int tmpret; 1203 int tmpret; 1187 1204 1188 /* This is the direct io targ 1205 /* This is the direct io target page. */ 1189 if (wc->w_pages[i] == NULL) { 1206 if (wc->w_pages[i] == NULL) { 1190 p_blkno++; 1207 p_blkno++; 1191 continue; 1208 continue; 1192 } 1209 } 1193 1210 1194 tmpret = ocfs2_prepare_page_f 1211 tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc, 1195 1212 wc->w_pages[i], cpos, 1196 1213 user_pos, user_len, 1197 1214 should_zero); 1198 if (tmpret) { 1215 if (tmpret) { 1199 mlog_errno(tmpret); 1216 mlog_errno(tmpret); 1200 if (ret == 0) 1217 if (ret == 0) 1201 ret = tmpret; 1218 ret = tmpret; 1202 } 1219 } 1203 } 1220 } 1204 1221 1205 /* 1222 /* 1206 * We only have cleanup to do in case 1223 * We only have cleanup to do in case of allocating write. 1207 */ 1224 */ 1208 if (ret && new) 1225 if (ret && new) 1209 ocfs2_write_failure(inode, wc 1226 ocfs2_write_failure(inode, wc, user_pos, user_len); 1210 1227 1211 out: 1228 out: 1212 1229 1213 return ret; 1230 return ret; 1214 } 1231 } 1215 1232 1216 static int ocfs2_write_cluster_by_desc(struct 1233 static int ocfs2_write_cluster_by_desc(struct address_space *mapping, 1217 struct 1234 struct ocfs2_alloc_context *data_ac, 1218 struct 1235 struct ocfs2_alloc_context *meta_ac, 1219 struct 1236 struct ocfs2_write_ctxt *wc, 1220 loff_t 1237 loff_t pos, unsigned len) 1221 { 1238 { 1222 int ret, i; 1239 int ret, i; 1223 loff_t cluster_off; 1240 loff_t cluster_off; 1224 unsigned int local_len = len; 1241 unsigned int local_len = len; 1225 struct ocfs2_write_cluster_desc *desc 1242 struct ocfs2_write_cluster_desc *desc; 1226 struct ocfs2_super *osb = OCFS2_SB(ma 1243 struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb); 1227 1244 1228 for (i = 0; i < wc->w_clen; i++) { 1245 for (i = 0; i < wc->w_clen; i++) { 1229 desc = &wc->w_desc[i]; 1246 desc = &wc->w_desc[i]; 1230 1247 1231 /* 1248 /* 1232 * We have to make sure that 1249 * We have to make sure that the total write passed in 1233 * doesn't extend past a sing 1250 * doesn't extend past a single cluster. 1234 */ 1251 */ 1235 local_len = len; 1252 local_len = len; 1236 cluster_off = pos & (osb->s_c 1253 cluster_off = pos & (osb->s_clustersize - 1); 1237 if ((cluster_off + local_len) 1254 if ((cluster_off + local_len) > osb->s_clustersize) 1238 local_len = osb->s_cl 1255 local_len = osb->s_clustersize - cluster_off; 1239 1256 1240 ret = ocfs2_write_cluster(map 1257 ret = ocfs2_write_cluster(mapping, &desc->c_phys, 1241 des 1258 desc->c_new, 1242 des 1259 desc->c_clear_unwritten, 1243 des 1260 desc->c_needs_zero, 1244 dat 1261 data_ac, meta_ac, 1245 wc, 1262 wc, desc->c_cpos, pos, local_len); 1246 if (ret) { 1263 if (ret) { 1247 mlog_errno(ret); 1264 mlog_errno(ret); 1248 goto out; 1265 goto out; 1249 } 1266 } 1250 1267 1251 len -= local_len; 1268 len -= local_len; 1252 pos += local_len; 1269 pos += local_len; 1253 } 1270 } 1254 1271 1255 ret = 0; 1272 ret = 0; 1256 out: 1273 out: 1257 return ret; 1274 return ret; 1258 } 1275 } 1259 1276 1260 /* 1277 /* 1261 * ocfs2_write_end() wants to know which part 1278 * ocfs2_write_end() wants to know which parts of the target page it 1262 * should complete the write on. It's easiest 1279 * should complete the write on. It's easiest to compute them ahead of 1263 * time when a more complete view of the writ 1280 * time when a more complete view of the write is available. 1264 */ 1281 */ 1265 static void ocfs2_set_target_boundaries(struc 1282 static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, 1266 struc 1283 struct ocfs2_write_ctxt *wc, 1267 loff_ 1284 loff_t pos, unsigned len, int alloc) 1268 { 1285 { 1269 struct ocfs2_write_cluster_desc *desc 1286 struct ocfs2_write_cluster_desc *desc; 1270 1287 1271 wc->w_target_from = pos & (PAGE_SIZE 1288 wc->w_target_from = pos & (PAGE_SIZE - 1); 1272 wc->w_target_to = wc->w_target_from + 1289 wc->w_target_to = wc->w_target_from + len; 1273 1290 1274 if (alloc == 0) 1291 if (alloc == 0) 1275 return; 1292 return; 1276 1293 1277 /* 1294 /* 1278 * Allocating write - we may have dif 1295 * Allocating write - we may have different boundaries based 1279 * on page size and cluster size. 1296 * on page size and cluster size. 1280 * 1297 * 1281 * NOTE: We can no longer compute one 1298 * NOTE: We can no longer compute one value from the other as 1282 * the actual write length and user p 1299 * the actual write length and user provided length may be 1283 * different. 1300 * different. 1284 */ 1301 */ 1285 1302 1286 if (wc->w_large_pages) { 1303 if (wc->w_large_pages) { 1287 /* 1304 /* 1288 * We only care about the 1st 1305 * We only care about the 1st and last cluster within 1289 * our range and whether they 1306 * our range and whether they should be zero'd or not. Either 1290 * value may be extended out 1307 * value may be extended out to the start/end of a 1291 * newly allocated cluster. 1308 * newly allocated cluster. 1292 */ 1309 */ 1293 desc = &wc->w_desc[0]; 1310 desc = &wc->w_desc[0]; 1294 if (desc->c_needs_zero) 1311 if (desc->c_needs_zero) 1295 ocfs2_figure_cluster_ 1312 ocfs2_figure_cluster_boundaries(osb, 1296 1313 desc->c_cpos, 1297 1314 &wc->w_target_from, 1298 1315 NULL); 1299 1316 1300 desc = &wc->w_desc[wc->w_clen 1317 desc = &wc->w_desc[wc->w_clen - 1]; 1301 if (desc->c_needs_zero) 1318 if (desc->c_needs_zero) 1302 ocfs2_figure_cluster_ 1319 ocfs2_figure_cluster_boundaries(osb, 1303 1320 desc->c_cpos, 1304 1321 NULL, 1305 1322 &wc->w_target_to); 1306 } else { 1323 } else { 1307 wc->w_target_from = 0; 1324 wc->w_target_from = 0; 1308 wc->w_target_to = PAGE_SIZE; 1325 wc->w_target_to = PAGE_SIZE; 1309 } 1326 } 1310 } 1327 } 1311 1328 1312 /* 1329 /* 1313 * Check if this extent is marked UNWRITTEN b 1330 * Check if this extent is marked UNWRITTEN by direct io. If so, we need not to 1314 * do the zero work. And should not to clear 1331 * do the zero work. And should not to clear UNWRITTEN since it will be cleared 1315 * by the direct io procedure. 1332 * by the direct io procedure. 1316 * If this is a new extent that allocated by 1333 * If this is a new extent that allocated by direct io, we should mark it in 1317 * the ip_unwritten_list. 1334 * the ip_unwritten_list. 1318 */ 1335 */ 1319 static int ocfs2_unwritten_check(struct inode 1336 static int ocfs2_unwritten_check(struct inode *inode, 1320 struct ocfs2 1337 struct ocfs2_write_ctxt *wc, 1321 struct ocfs2 1338 struct ocfs2_write_cluster_desc *desc) 1322 { 1339 { 1323 struct ocfs2_inode_info *oi = OCFS2_I 1340 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1324 struct ocfs2_unwritten_extent *ue = N 1341 struct ocfs2_unwritten_extent *ue = NULL, *new = NULL; 1325 int ret = 0; 1342 int ret = 0; 1326 1343 1327 if (!desc->c_needs_zero) 1344 if (!desc->c_needs_zero) 1328 return 0; 1345 return 0; 1329 1346 1330 retry: 1347 retry: 1331 spin_lock(&oi->ip_lock); 1348 spin_lock(&oi->ip_lock); 1332 /* Needs not to zero no metter buffer 1349 /* Needs not to zero no metter buffer or direct. The one who is zero 1333 * the cluster is doing zero. And he 1350 * the cluster is doing zero. And he will clear unwritten after all 1334 * cluster io finished. */ 1351 * cluster io finished. */ 1335 list_for_each_entry(ue, &oi->ip_unwri 1352 list_for_each_entry(ue, &oi->ip_unwritten_list, ue_ip_node) { 1336 if (desc->c_cpos == ue->ue_cp 1353 if (desc->c_cpos == ue->ue_cpos) { 1337 BUG_ON(desc->c_new); 1354 BUG_ON(desc->c_new); 1338 desc->c_needs_zero = 1355 desc->c_needs_zero = 0; 1339 desc->c_clear_unwritt 1356 desc->c_clear_unwritten = 0; 1340 goto unlock; 1357 goto unlock; 1341 } 1358 } 1342 } 1359 } 1343 1360 1344 if (wc->w_type != OCFS2_WRITE_DIRECT) 1361 if (wc->w_type != OCFS2_WRITE_DIRECT) 1345 goto unlock; 1362 goto unlock; 1346 1363 1347 if (new == NULL) { 1364 if (new == NULL) { 1348 spin_unlock(&oi->ip_lock); 1365 spin_unlock(&oi->ip_lock); 1349 new = kmalloc(sizeof(struct o 1366 new = kmalloc(sizeof(struct ocfs2_unwritten_extent), 1350 GFP_NOFS); 1367 GFP_NOFS); 1351 if (new == NULL) { 1368 if (new == NULL) { 1352 ret = -ENOMEM; 1369 ret = -ENOMEM; 1353 goto out; 1370 goto out; 1354 } 1371 } 1355 goto retry; 1372 goto retry; 1356 } 1373 } 1357 /* This direct write will doing zero. 1374 /* This direct write will doing zero. */ 1358 new->ue_cpos = desc->c_cpos; 1375 new->ue_cpos = desc->c_cpos; 1359 new->ue_phys = desc->c_phys; 1376 new->ue_phys = desc->c_phys; 1360 desc->c_clear_unwritten = 0; 1377 desc->c_clear_unwritten = 0; 1361 list_add_tail(&new->ue_ip_node, &oi-> 1378 list_add_tail(&new->ue_ip_node, &oi->ip_unwritten_list); 1362 list_add_tail(&new->ue_node, &wc->w_u 1379 list_add_tail(&new->ue_node, &wc->w_unwritten_list); 1363 wc->w_unwritten_count++; << 1364 new = NULL; 1380 new = NULL; 1365 unlock: 1381 unlock: 1366 spin_unlock(&oi->ip_lock); 1382 spin_unlock(&oi->ip_lock); 1367 out: 1383 out: 1368 kfree(new); !! 1384 if (new) >> 1385 kfree(new); 1369 return ret; 1386 return ret; 1370 } 1387 } 1371 1388 1372 /* 1389 /* 1373 * Populate each single-cluster write descrip 1390 * Populate each single-cluster write descriptor in the write context 1374 * with information about the i/o to be done. 1391 * with information about the i/o to be done. 1375 * 1392 * 1376 * Returns the number of clusters that will h 1393 * Returns the number of clusters that will have to be allocated, as 1377 * well as a worst case estimate of the numbe 1394 * well as a worst case estimate of the number of extent records that 1378 * would have to be created during a write to 1395 * would have to be created during a write to an unwritten region. 1379 */ 1396 */ 1380 static int ocfs2_populate_write_desc(struct i 1397 static int ocfs2_populate_write_desc(struct inode *inode, 1381 struct o 1398 struct ocfs2_write_ctxt *wc, 1382 unsigned 1399 unsigned int *clusters_to_alloc, 1383 unsigned 1400 unsigned int *extents_to_split) 1384 { 1401 { 1385 int ret; 1402 int ret; 1386 struct ocfs2_write_cluster_desc *desc 1403 struct ocfs2_write_cluster_desc *desc; 1387 unsigned int num_clusters = 0; 1404 unsigned int num_clusters = 0; 1388 unsigned int ext_flags = 0; 1405 unsigned int ext_flags = 0; 1389 u32 phys = 0; 1406 u32 phys = 0; 1390 int i; 1407 int i; 1391 1408 1392 *clusters_to_alloc = 0; 1409 *clusters_to_alloc = 0; 1393 *extents_to_split = 0; 1410 *extents_to_split = 0; 1394 1411 1395 for (i = 0; i < wc->w_clen; i++) { 1412 for (i = 0; i < wc->w_clen; i++) { 1396 desc = &wc->w_desc[i]; 1413 desc = &wc->w_desc[i]; 1397 desc->c_cpos = wc->w_cpos + i 1414 desc->c_cpos = wc->w_cpos + i; 1398 1415 1399 if (num_clusters == 0) { 1416 if (num_clusters == 0) { 1400 /* 1417 /* 1401 * Need to look up th 1418 * Need to look up the next extent record. 1402 */ 1419 */ 1403 ret = ocfs2_get_clust 1420 ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys, 1404 1421 &num_clusters, &ext_flags); 1405 if (ret) { 1422 if (ret) { 1406 mlog_errno(re 1423 mlog_errno(ret); 1407 goto out; 1424 goto out; 1408 } 1425 } 1409 1426 1410 /* We should already 1427 /* We should already CoW the refcountd extent. */ 1411 BUG_ON(ext_flags & OC 1428 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); 1412 1429 1413 /* 1430 /* 1414 * Assume worst case 1431 * Assume worst case - that we're writing in 1415 * the middle of the 1432 * the middle of the extent. 1416 * 1433 * 1417 * We can assume that 1434 * We can assume that the write proceeds from 1418 * left to right, in 1435 * left to right, in which case the extent 1419 * insert code is sma 1436 * insert code is smart enough to coalesce the 1420 * next splits into t 1437 * next splits into the previous records created. 1421 */ 1438 */ 1422 if (ext_flags & OCFS2 1439 if (ext_flags & OCFS2_EXT_UNWRITTEN) 1423 *extents_to_s 1440 *extents_to_split = *extents_to_split + 2; 1424 } else if (phys) { 1441 } else if (phys) { 1425 /* 1442 /* 1426 * Only increment phy 1443 * Only increment phys if it doesn't describe 1427 * a hole. 1444 * a hole. 1428 */ 1445 */ 1429 phys++; 1446 phys++; 1430 } 1447 } 1431 1448 1432 /* 1449 /* 1433 * If w_first_new_cpos is < U 1450 * If w_first_new_cpos is < UINT_MAX, we have a non-sparse 1434 * file that got extended. w 1451 * file that got extended. w_first_new_cpos tells us 1435 * where the newly allocated 1452 * where the newly allocated clusters are so we can 1436 * zero them. 1453 * zero them. 1437 */ 1454 */ 1438 if (desc->c_cpos >= wc->w_fir 1455 if (desc->c_cpos >= wc->w_first_new_cpos) { 1439 BUG_ON(phys == 0); 1456 BUG_ON(phys == 0); 1440 desc->c_needs_zero = 1457 desc->c_needs_zero = 1; 1441 } 1458 } 1442 1459 1443 desc->c_phys = phys; 1460 desc->c_phys = phys; 1444 if (phys == 0) { 1461 if (phys == 0) { 1445 desc->c_new = 1; 1462 desc->c_new = 1; 1446 desc->c_needs_zero = 1463 desc->c_needs_zero = 1; 1447 desc->c_clear_unwritt 1464 desc->c_clear_unwritten = 1; 1448 *clusters_to_alloc = 1465 *clusters_to_alloc = *clusters_to_alloc + 1; 1449 } 1466 } 1450 1467 1451 if (ext_flags & OCFS2_EXT_UNW 1468 if (ext_flags & OCFS2_EXT_UNWRITTEN) { 1452 desc->c_clear_unwritt 1469 desc->c_clear_unwritten = 1; 1453 desc->c_needs_zero = 1470 desc->c_needs_zero = 1; 1454 } 1471 } 1455 1472 1456 ret = ocfs2_unwritten_check(i 1473 ret = ocfs2_unwritten_check(inode, wc, desc); 1457 if (ret) { 1474 if (ret) { 1458 mlog_errno(ret); 1475 mlog_errno(ret); 1459 goto out; 1476 goto out; 1460 } 1477 } 1461 1478 1462 num_clusters--; 1479 num_clusters--; 1463 } 1480 } 1464 1481 1465 ret = 0; 1482 ret = 0; 1466 out: 1483 out: 1467 return ret; 1484 return ret; 1468 } 1485 } 1469 1486 1470 static int ocfs2_write_begin_inline(struct ad 1487 static int ocfs2_write_begin_inline(struct address_space *mapping, 1471 struct in 1488 struct inode *inode, 1472 struct oc 1489 struct ocfs2_write_ctxt *wc) 1473 { 1490 { 1474 int ret; 1491 int ret; 1475 struct ocfs2_super *osb = OCFS2_SB(in 1492 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1476 struct page *page; 1493 struct page *page; 1477 handle_t *handle; 1494 handle_t *handle; 1478 struct ocfs2_dinode *di = (struct ocf 1495 struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; 1479 1496 1480 handle = ocfs2_start_trans(osb, OCFS2 1497 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 1481 if (IS_ERR(handle)) { 1498 if (IS_ERR(handle)) { 1482 ret = PTR_ERR(handle); 1499 ret = PTR_ERR(handle); 1483 mlog_errno(ret); 1500 mlog_errno(ret); 1484 goto out; 1501 goto out; 1485 } 1502 } 1486 1503 1487 page = find_or_create_page(mapping, 0 1504 page = find_or_create_page(mapping, 0, GFP_NOFS); 1488 if (!page) { 1505 if (!page) { 1489 ocfs2_commit_trans(osb, handl 1506 ocfs2_commit_trans(osb, handle); 1490 ret = -ENOMEM; 1507 ret = -ENOMEM; 1491 mlog_errno(ret); 1508 mlog_errno(ret); 1492 goto out; 1509 goto out; 1493 } 1510 } 1494 /* 1511 /* 1495 * If we don't set w_num_pages then t 1512 * If we don't set w_num_pages then this page won't get unlocked 1496 * and freed on cleanup of the write 1513 * and freed on cleanup of the write context. 1497 */ 1514 */ 1498 wc->w_pages[0] = wc->w_target_page = 1515 wc->w_pages[0] = wc->w_target_page = page; 1499 wc->w_num_pages = 1; 1516 wc->w_num_pages = 1; 1500 1517 1501 ret = ocfs2_journal_access_di(handle, 1518 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, 1502 OCFS2_J 1519 OCFS2_JOURNAL_ACCESS_WRITE); 1503 if (ret) { 1520 if (ret) { 1504 ocfs2_commit_trans(osb, handl 1521 ocfs2_commit_trans(osb, handle); 1505 1522 1506 mlog_errno(ret); 1523 mlog_errno(ret); 1507 goto out; 1524 goto out; 1508 } 1525 } 1509 1526 1510 if (!(OCFS2_I(inode)->ip_dyn_features 1527 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) 1511 ocfs2_set_inode_data_inline(i 1528 ocfs2_set_inode_data_inline(inode, di); 1512 1529 1513 if (!PageUptodate(page)) { 1530 if (!PageUptodate(page)) { 1514 ret = ocfs2_read_inline_data( 1531 ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh); 1515 if (ret) { 1532 if (ret) { 1516 ocfs2_commit_trans(os 1533 ocfs2_commit_trans(osb, handle); 1517 1534 1518 goto out; 1535 goto out; 1519 } 1536 } 1520 } 1537 } 1521 1538 1522 wc->w_handle = handle; 1539 wc->w_handle = handle; 1523 out: 1540 out: 1524 return ret; 1541 return ret; 1525 } 1542 } 1526 1543 1527 int ocfs2_size_fits_inline_data(struct buffer 1544 int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size) 1528 { 1545 { 1529 struct ocfs2_dinode *di = (struct ocf 1546 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 1530 1547 1531 if (new_size <= le16_to_cpu(di->id2.i 1548 if (new_size <= le16_to_cpu(di->id2.i_data.id_count)) 1532 return 1; 1549 return 1; 1533 return 0; 1550 return 0; 1534 } 1551 } 1535 1552 1536 static int ocfs2_try_to_write_inline_data(str 1553 static int ocfs2_try_to_write_inline_data(struct address_space *mapping, 1537 str 1554 struct inode *inode, loff_t pos, 1538 uns 1555 unsigned len, struct page *mmap_page, 1539 str 1556 struct ocfs2_write_ctxt *wc) 1540 { 1557 { 1541 int ret, written = 0; 1558 int ret, written = 0; 1542 loff_t end = pos + len; 1559 loff_t end = pos + len; 1543 struct ocfs2_inode_info *oi = OCFS2_I 1560 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1544 struct ocfs2_dinode *di = NULL; 1561 struct ocfs2_dinode *di = NULL; 1545 1562 1546 trace_ocfs2_try_to_write_inline_data( 1563 trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno, 1547 1564 len, (unsigned long long)pos, 1548 1565 oi->ip_dyn_features); 1549 1566 1550 /* 1567 /* 1551 * Handle inodes which already have i 1568 * Handle inodes which already have inline data 1st. 1552 */ 1569 */ 1553 if (oi->ip_dyn_features & OCFS2_INLIN 1570 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 1554 if (mmap_page == NULL && 1571 if (mmap_page == NULL && 1555 ocfs2_size_fits_inline_da 1572 ocfs2_size_fits_inline_data(wc->w_di_bh, end)) 1556 goto do_inline_write; 1573 goto do_inline_write; 1557 1574 1558 /* 1575 /* 1559 * The write won't fit - we h 1576 * The write won't fit - we have to give this inode an 1560 * inline extent list now. 1577 * inline extent list now. 1561 */ 1578 */ 1562 ret = ocfs2_convert_inline_da 1579 ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh); 1563 if (ret) 1580 if (ret) 1564 mlog_errno(ret); 1581 mlog_errno(ret); 1565 goto out; 1582 goto out; 1566 } 1583 } 1567 1584 1568 /* 1585 /* 1569 * Check whether the inode can accept 1586 * Check whether the inode can accept inline data. 1570 */ 1587 */ 1571 if (oi->ip_clusters != 0 || i_size_re 1588 if (oi->ip_clusters != 0 || i_size_read(inode) != 0) 1572 return 0; 1589 return 0; 1573 1590 1574 /* 1591 /* 1575 * Check whether the write can fit. 1592 * Check whether the write can fit. 1576 */ 1593 */ 1577 di = (struct ocfs2_dinode *)wc->w_di_ 1594 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; 1578 if (mmap_page || 1595 if (mmap_page || 1579 end > ocfs2_max_inline_data_with_ 1596 end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) 1580 return 0; 1597 return 0; 1581 1598 1582 do_inline_write: 1599 do_inline_write: 1583 ret = ocfs2_write_begin_inline(mappin 1600 ret = ocfs2_write_begin_inline(mapping, inode, wc); 1584 if (ret) { 1601 if (ret) { 1585 mlog_errno(ret); 1602 mlog_errno(ret); 1586 goto out; 1603 goto out; 1587 } 1604 } 1588 1605 1589 /* 1606 /* 1590 * This signals to the caller that th 1607 * This signals to the caller that the data can be written 1591 * inline. 1608 * inline. 1592 */ 1609 */ 1593 written = 1; 1610 written = 1; 1594 out: 1611 out: 1595 return written ? written : ret; 1612 return written ? written : ret; 1596 } 1613 } 1597 1614 1598 /* 1615 /* 1599 * This function only does anything for file 1616 * This function only does anything for file systems which can't 1600 * handle sparse files. 1617 * handle sparse files. 1601 * 1618 * 1602 * What we want to do here is fill in any hol 1619 * What we want to do here is fill in any hole between the current end 1603 * of allocation and the end of our write. Th 1620 * of allocation and the end of our write. That way the rest of the 1604 * write path can treat it as an non-allocati 1621 * write path can treat it as an non-allocating write, which has no 1605 * special case code for sparse/nonsparse fil 1622 * special case code for sparse/nonsparse files. 1606 */ 1623 */ 1607 static int ocfs2_expand_nonsparse_inode(struc 1624 static int ocfs2_expand_nonsparse_inode(struct inode *inode, 1608 struc 1625 struct buffer_head *di_bh, 1609 loff_ 1626 loff_t pos, unsigned len, 1610 struc 1627 struct ocfs2_write_ctxt *wc) 1611 { 1628 { 1612 int ret; 1629 int ret; 1613 loff_t newsize = pos + len; 1630 loff_t newsize = pos + len; 1614 1631 1615 BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(in 1632 BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); 1616 1633 1617 if (newsize <= i_size_read(inode)) 1634 if (newsize <= i_size_read(inode)) 1618 return 0; 1635 return 0; 1619 1636 1620 ret = ocfs2_extend_no_holes(inode, di 1637 ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos); 1621 if (ret) 1638 if (ret) 1622 mlog_errno(ret); 1639 mlog_errno(ret); 1623 1640 1624 /* There is no wc if this is call fro 1641 /* There is no wc if this is call from direct. */ 1625 if (wc) 1642 if (wc) 1626 wc->w_first_new_cpos = 1643 wc->w_first_new_cpos = 1627 ocfs2_clusters_for_by 1644 ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)); 1628 1645 1629 return ret; 1646 return ret; 1630 } 1647 } 1631 1648 1632 static int ocfs2_zero_tail(struct inode *inod 1649 static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh, 1633 loff_t pos) 1650 loff_t pos) 1634 { 1651 { 1635 int ret = 0; 1652 int ret = 0; 1636 1653 1637 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(i 1654 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); 1638 if (pos > i_size_read(inode)) 1655 if (pos > i_size_read(inode)) 1639 ret = ocfs2_zero_extend(inode 1656 ret = ocfs2_zero_extend(inode, di_bh, pos); 1640 1657 1641 return ret; 1658 return ret; 1642 } 1659 } 1643 1660 1644 int ocfs2_write_begin_nolock(struct address_s 1661 int ocfs2_write_begin_nolock(struct address_space *mapping, 1645 loff_t pos, unsi 1662 loff_t pos, unsigned len, ocfs2_write_type_t type, 1646 struct page **pa 1663 struct page **pagep, void **fsdata, 1647 struct buffer_he 1664 struct buffer_head *di_bh, struct page *mmap_page) 1648 { 1665 { 1649 int ret, cluster_of_pages, credits = 1666 int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; 1650 unsigned int clusters_to_alloc, exten 1667 unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0; 1651 struct ocfs2_write_ctxt *wc; 1668 struct ocfs2_write_ctxt *wc; 1652 struct inode *inode = mapping->host; 1669 struct inode *inode = mapping->host; 1653 struct ocfs2_super *osb = OCFS2_SB(in 1670 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1654 struct ocfs2_dinode *di; 1671 struct ocfs2_dinode *di; 1655 struct ocfs2_alloc_context *data_ac = 1672 struct ocfs2_alloc_context *data_ac = NULL; 1656 struct ocfs2_alloc_context *meta_ac = 1673 struct ocfs2_alloc_context *meta_ac = NULL; 1657 handle_t *handle; 1674 handle_t *handle; 1658 struct ocfs2_extent_tree et; 1675 struct ocfs2_extent_tree et; 1659 int try_free = 1, ret1; 1676 int try_free = 1, ret1; 1660 1677 1661 try_again: 1678 try_again: 1662 ret = ocfs2_alloc_write_ctxt(&wc, osb 1679 ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, type, di_bh); 1663 if (ret) { 1680 if (ret) { 1664 mlog_errno(ret); 1681 mlog_errno(ret); 1665 return ret; 1682 return ret; 1666 } 1683 } 1667 1684 1668 if (ocfs2_supports_inline_data(osb)) 1685 if (ocfs2_supports_inline_data(osb)) { 1669 ret = ocfs2_try_to_write_inli 1686 ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len, 1670 1687 mmap_page, wc); 1671 if (ret == 1) { 1688 if (ret == 1) { 1672 ret = 0; 1689 ret = 0; 1673 goto success; 1690 goto success; 1674 } 1691 } 1675 if (ret < 0) { 1692 if (ret < 0) { 1676 mlog_errno(ret); 1693 mlog_errno(ret); 1677 goto out; 1694 goto out; 1678 } 1695 } 1679 } 1696 } 1680 1697 1681 /* Direct io change i_size late, shou 1698 /* Direct io change i_size late, should not zero tail here. */ 1682 if (type != OCFS2_WRITE_DIRECT) { 1699 if (type != OCFS2_WRITE_DIRECT) { 1683 if (ocfs2_sparse_alloc(osb)) 1700 if (ocfs2_sparse_alloc(osb)) 1684 ret = ocfs2_zero_tail 1701 ret = ocfs2_zero_tail(inode, di_bh, pos); 1685 else 1702 else 1686 ret = ocfs2_expand_no 1703 ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, 1687 1704 len, wc); 1688 if (ret) { 1705 if (ret) { 1689 mlog_errno(ret); 1706 mlog_errno(ret); 1690 goto out; 1707 goto out; 1691 } 1708 } 1692 } 1709 } 1693 1710 1694 ret = ocfs2_check_range_for_refcount( 1711 ret = ocfs2_check_range_for_refcount(inode, pos, len); 1695 if (ret < 0) { 1712 if (ret < 0) { 1696 mlog_errno(ret); 1713 mlog_errno(ret); 1697 goto out; 1714 goto out; 1698 } else if (ret == 1) { 1715 } else if (ret == 1) { 1699 clusters_need = wc->w_clen; 1716 clusters_need = wc->w_clen; 1700 ret = ocfs2_refcount_cow(inod 1717 ret = ocfs2_refcount_cow(inode, di_bh, 1701 wc-> 1718 wc->w_cpos, wc->w_clen, UINT_MAX); 1702 if (ret) { 1719 if (ret) { 1703 mlog_errno(ret); 1720 mlog_errno(ret); 1704 goto out; 1721 goto out; 1705 } 1722 } 1706 } 1723 } 1707 1724 1708 ret = ocfs2_populate_write_desc(inode 1725 ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc, 1709 &exte 1726 &extents_to_split); 1710 if (ret) { 1727 if (ret) { 1711 mlog_errno(ret); 1728 mlog_errno(ret); 1712 goto out; 1729 goto out; 1713 } 1730 } 1714 clusters_need += clusters_to_alloc; 1731 clusters_need += clusters_to_alloc; 1715 1732 1716 di = (struct ocfs2_dinode *)wc->w_di_ 1733 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; 1717 1734 1718 trace_ocfs2_write_begin_nolock( 1735 trace_ocfs2_write_begin_nolock( 1719 (unsigned long long)O 1736 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1720 (long long)i_size_rea 1737 (long long)i_size_read(inode), 1721 le32_to_cpu(di->i_clu 1738 le32_to_cpu(di->i_clusters), 1722 pos, len, type, mmap_ 1739 pos, len, type, mmap_page, 1723 clusters_to_alloc, ex 1740 clusters_to_alloc, extents_to_split); 1724 1741 1725 /* 1742 /* 1726 * We set w_target_from, w_target_to 1743 * We set w_target_from, w_target_to here so that 1727 * ocfs2_write_end() knows which rang 1744 * ocfs2_write_end() knows which range in the target page to 1728 * write out. An allocation requires 1745 * write out. An allocation requires that we write the entire 1729 * cluster range. 1746 * cluster range. 1730 */ 1747 */ 1731 if (clusters_to_alloc || extents_to_s 1748 if (clusters_to_alloc || extents_to_split) { 1732 /* 1749 /* 1733 * XXX: We are stretching the 1750 * XXX: We are stretching the limits of 1734 * ocfs2_lock_allocators(). I 1751 * ocfs2_lock_allocators(). It greatly over-estimates 1735 * the work to be done. 1752 * the work to be done. 1736 */ 1753 */ 1737 ocfs2_init_dinode_extent_tree 1754 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), 1738 1755 wc->w_di_bh); 1739 ret = ocfs2_lock_allocators(i 1756 ret = ocfs2_lock_allocators(inode, &et, 1740 c 1757 clusters_to_alloc, extents_to_split, 1741 & 1758 &data_ac, &meta_ac); 1742 if (ret) { 1759 if (ret) { 1743 mlog_errno(ret); 1760 mlog_errno(ret); 1744 goto out; 1761 goto out; 1745 } 1762 } 1746 1763 1747 if (data_ac) 1764 if (data_ac) 1748 data_ac->ac_resv = &O 1765 data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; 1749 1766 1750 credits = ocfs2_calc_extend_c 1767 credits = ocfs2_calc_extend_credits(inode->i_sb, 1751 1768 &di->id2.i_list); 1752 } else if (type == OCFS2_WRITE_DIRECT 1769 } else if (type == OCFS2_WRITE_DIRECT) 1753 /* direct write needs not to 1770 /* direct write needs not to start trans if no extents alloc. */ 1754 goto success; 1771 goto success; 1755 1772 1756 /* 1773 /* 1757 * We have to zero sparse allocated c 1774 * We have to zero sparse allocated clusters, unwritten extent clusters, 1758 * and non-sparse clusters we just ex 1775 * and non-sparse clusters we just extended. For non-sparse writes, 1759 * we know zeros will only be needed 1776 * we know zeros will only be needed in the first and/or last cluster. 1760 */ 1777 */ 1761 if (wc->w_clen && (wc->w_desc[0].c_ne 1778 if (wc->w_clen && (wc->w_desc[0].c_needs_zero || 1762 wc->w_desc[wc->w_c 1779 wc->w_desc[wc->w_clen - 1].c_needs_zero)) 1763 cluster_of_pages = 1; 1780 cluster_of_pages = 1; 1764 else 1781 else 1765 cluster_of_pages = 0; 1782 cluster_of_pages = 0; 1766 1783 1767 ocfs2_set_target_boundaries(osb, wc, 1784 ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages); 1768 1785 1769 handle = ocfs2_start_trans(osb, credi 1786 handle = ocfs2_start_trans(osb, credits); 1770 if (IS_ERR(handle)) { 1787 if (IS_ERR(handle)) { 1771 ret = PTR_ERR(handle); 1788 ret = PTR_ERR(handle); 1772 mlog_errno(ret); 1789 mlog_errno(ret); 1773 goto out; 1790 goto out; 1774 } 1791 } 1775 1792 1776 wc->w_handle = handle; 1793 wc->w_handle = handle; 1777 1794 1778 if (clusters_to_alloc) { 1795 if (clusters_to_alloc) { 1779 ret = dquot_alloc_space_nodir 1796 ret = dquot_alloc_space_nodirty(inode, 1780 ocfs2_clusters_to_byt 1797 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); 1781 if (ret) 1798 if (ret) 1782 goto out_commit; 1799 goto out_commit; 1783 } 1800 } 1784 1801 1785 ret = ocfs2_journal_access_di(handle, 1802 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, 1786 OCFS2_J 1803 OCFS2_JOURNAL_ACCESS_WRITE); 1787 if (ret) { 1804 if (ret) { 1788 mlog_errno(ret); 1805 mlog_errno(ret); 1789 goto out_quota; 1806 goto out_quota; 1790 } 1807 } 1791 1808 1792 /* 1809 /* 1793 * Fill our page array first. That wa 1810 * Fill our page array first. That way we've grabbed enough so 1794 * that we can zero and flush if we e 1811 * that we can zero and flush if we error after adding the 1795 * extent. 1812 * extent. 1796 */ 1813 */ 1797 ret = ocfs2_grab_pages_for_write(mapp 1814 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, 1798 clus 1815 cluster_of_pages, mmap_page); 1799 if (ret) { !! 1816 if (ret && ret != -EAGAIN) { 1800 /* << 1801 * ocfs2_grab_pages_for_write << 1802 * the target page. In this c << 1803 * page. This will trigger th << 1804 * the operation. << 1805 */ << 1806 if (type == OCFS2_WRITE_MMAP << 1807 BUG_ON(wc->w_target_p << 1808 ret = 0; << 1809 goto out_quota; << 1810 } << 1811 << 1812 mlog_errno(ret); 1817 mlog_errno(ret); 1813 goto out_quota; 1818 goto out_quota; 1814 } 1819 } 1815 1820 >> 1821 /* >> 1822 * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock >> 1823 * the target page. In this case, we exit with no error and no target >> 1824 * page. This will trigger the caller, page_mkwrite(), to re-try >> 1825 * the operation. >> 1826 */ >> 1827 if (ret == -EAGAIN) { >> 1828 BUG_ON(wc->w_target_page); >> 1829 ret = 0; >> 1830 goto out_quota; >> 1831 } >> 1832 1816 ret = ocfs2_write_cluster_by_desc(map 1833 ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, 1817 len 1834 len); 1818 if (ret) { 1835 if (ret) { 1819 mlog_errno(ret); 1836 mlog_errno(ret); 1820 goto out_quota; 1837 goto out_quota; 1821 } 1838 } 1822 1839 1823 if (data_ac) 1840 if (data_ac) 1824 ocfs2_free_alloc_context(data 1841 ocfs2_free_alloc_context(data_ac); 1825 if (meta_ac) 1842 if (meta_ac) 1826 ocfs2_free_alloc_context(meta 1843 ocfs2_free_alloc_context(meta_ac); 1827 1844 1828 success: 1845 success: 1829 if (pagep) 1846 if (pagep) 1830 *pagep = wc->w_target_page; 1847 *pagep = wc->w_target_page; 1831 *fsdata = wc; 1848 *fsdata = wc; 1832 return 0; 1849 return 0; 1833 out_quota: 1850 out_quota: 1834 if (clusters_to_alloc) 1851 if (clusters_to_alloc) 1835 dquot_free_space(inode, 1852 dquot_free_space(inode, 1836 ocfs2_clusters_to_b 1853 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); 1837 out_commit: 1854 out_commit: 1838 ocfs2_commit_trans(osb, handle); 1855 ocfs2_commit_trans(osb, handle); 1839 1856 1840 out: 1857 out: 1841 /* 1858 /* 1842 * The mmapped page won't be unlocked 1859 * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(), 1843 * even in case of error here like EN 1860 * even in case of error here like ENOSPC and ENOMEM. So, we need 1844 * to unlock the target page manually 1861 * to unlock the target page manually to prevent deadlocks when 1845 * retrying again on ENOSPC, or when 1862 * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED 1846 * to VM code. 1863 * to VM code. 1847 */ 1864 */ 1848 if (wc->w_target_locked) 1865 if (wc->w_target_locked) 1849 unlock_page(mmap_page); 1866 unlock_page(mmap_page); 1850 1867 1851 ocfs2_free_write_ctxt(inode, wc); 1868 ocfs2_free_write_ctxt(inode, wc); 1852 1869 1853 if (data_ac) { 1870 if (data_ac) { 1854 ocfs2_free_alloc_context(data 1871 ocfs2_free_alloc_context(data_ac); 1855 data_ac = NULL; 1872 data_ac = NULL; 1856 } 1873 } 1857 if (meta_ac) { 1874 if (meta_ac) { 1858 ocfs2_free_alloc_context(meta 1875 ocfs2_free_alloc_context(meta_ac); 1859 meta_ac = NULL; 1876 meta_ac = NULL; 1860 } 1877 } 1861 1878 1862 if (ret == -ENOSPC && try_free) { 1879 if (ret == -ENOSPC && try_free) { 1863 /* 1880 /* 1864 * Try to free some truncate 1881 * Try to free some truncate log so that we can have enough 1865 * clusters to allocate. 1882 * clusters to allocate. 1866 */ 1883 */ 1867 try_free = 0; 1884 try_free = 0; 1868 1885 1869 ret1 = ocfs2_try_to_free_trun 1886 ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need); 1870 if (ret1 == 1) 1887 if (ret1 == 1) 1871 goto try_again; 1888 goto try_again; 1872 1889 1873 if (ret1 < 0) 1890 if (ret1 < 0) 1874 mlog_errno(ret1); 1891 mlog_errno(ret1); 1875 } 1892 } 1876 1893 1877 return ret; 1894 return ret; 1878 } 1895 } 1879 1896 1880 static int ocfs2_write_begin(struct file *fil 1897 static int ocfs2_write_begin(struct file *file, struct address_space *mapping, 1881 loff_t pos, unsi !! 1898 loff_t pos, unsigned len, unsigned flags, 1882 struct page **pa 1899 struct page **pagep, void **fsdata) 1883 { 1900 { 1884 int ret; 1901 int ret; 1885 struct buffer_head *di_bh = NULL; 1902 struct buffer_head *di_bh = NULL; 1886 struct inode *inode = mapping->host; 1903 struct inode *inode = mapping->host; 1887 1904 1888 ret = ocfs2_inode_lock(inode, &di_bh, 1905 ret = ocfs2_inode_lock(inode, &di_bh, 1); 1889 if (ret) { 1906 if (ret) { 1890 mlog_errno(ret); 1907 mlog_errno(ret); 1891 return ret; 1908 return ret; 1892 } 1909 } 1893 1910 1894 /* 1911 /* 1895 * Take alloc sem here to prevent con 1912 * Take alloc sem here to prevent concurrent lookups. That way 1896 * the mapping, zeroing and tree mani 1913 * the mapping, zeroing and tree manipulation within 1897 * ocfs2_write() will be safe against !! 1914 * ocfs2_write() will be safe against ->readpage(). This 1898 * should also serve to lock out allo 1915 * should also serve to lock out allocation from a shared 1899 * writeable region. 1916 * writeable region. 1900 */ 1917 */ 1901 down_write(&OCFS2_I(inode)->ip_alloc_ 1918 down_write(&OCFS2_I(inode)->ip_alloc_sem); 1902 1919 1903 ret = ocfs2_write_begin_nolock(mappin 1920 ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER, 1904 pagep, 1921 pagep, fsdata, di_bh, NULL); 1905 if (ret) { 1922 if (ret) { 1906 mlog_errno(ret); 1923 mlog_errno(ret); 1907 goto out_fail; 1924 goto out_fail; 1908 } 1925 } 1909 1926 1910 brelse(di_bh); 1927 brelse(di_bh); 1911 1928 1912 return 0; 1929 return 0; 1913 1930 1914 out_fail: 1931 out_fail: 1915 up_write(&OCFS2_I(inode)->ip_alloc_se 1932 up_write(&OCFS2_I(inode)->ip_alloc_sem); 1916 1933 1917 brelse(di_bh); 1934 brelse(di_bh); 1918 ocfs2_inode_unlock(inode, 1); 1935 ocfs2_inode_unlock(inode, 1); 1919 1936 1920 return ret; 1937 return ret; 1921 } 1938 } 1922 1939 1923 static void ocfs2_write_end_inline(struct ino 1940 static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, 1924 unsigned l 1941 unsigned len, unsigned *copied, 1925 struct ocf 1942 struct ocfs2_dinode *di, 1926 struct ocf 1943 struct ocfs2_write_ctxt *wc) 1927 { 1944 { 1928 void *kaddr; 1945 void *kaddr; 1929 1946 1930 if (unlikely(*copied < len)) { 1947 if (unlikely(*copied < len)) { 1931 if (!PageUptodate(wc->w_targe 1948 if (!PageUptodate(wc->w_target_page)) { 1932 *copied = 0; 1949 *copied = 0; 1933 return; 1950 return; 1934 } 1951 } 1935 } 1952 } 1936 1953 1937 kaddr = kmap_atomic(wc->w_target_page 1954 kaddr = kmap_atomic(wc->w_target_page); 1938 memcpy(di->id2.i_data.id_data + pos, 1955 memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); 1939 kunmap_atomic(kaddr); 1956 kunmap_atomic(kaddr); 1940 1957 1941 trace_ocfs2_write_end_inline( 1958 trace_ocfs2_write_end_inline( 1942 (unsigned long long)OCFS2_I(inod 1959 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1943 (unsigned long long)pos, *copied 1960 (unsigned long long)pos, *copied, 1944 le16_to_cpu(di->id2.i_data.id_co 1961 le16_to_cpu(di->id2.i_data.id_count), 1945 le16_to_cpu(di->i_dyn_features)) 1962 le16_to_cpu(di->i_dyn_features)); 1946 } 1963 } 1947 1964 1948 int ocfs2_write_end_nolock(struct address_spa 1965 int ocfs2_write_end_nolock(struct address_space *mapping, 1949 loff_t pos, unsign !! 1966 loff_t pos, unsigned len, unsigned copied, >> 1967 struct page *page, void *fsdata) 1950 { 1968 { 1951 int i, ret; 1969 int i, ret; 1952 unsigned from, to, start = pos & (PAG 1970 unsigned from, to, start = pos & (PAGE_SIZE - 1); 1953 struct inode *inode = mapping->host; 1971 struct inode *inode = mapping->host; 1954 struct ocfs2_super *osb = OCFS2_SB(in 1972 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1955 struct ocfs2_write_ctxt *wc = fsdata; 1973 struct ocfs2_write_ctxt *wc = fsdata; 1956 struct ocfs2_dinode *di = (struct ocf 1974 struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; 1957 handle_t *handle = wc->w_handle; 1975 handle_t *handle = wc->w_handle; 1958 struct page *tmppage; 1976 struct page *tmppage; 1959 1977 1960 BUG_ON(!list_empty(&wc->w_unwritten_l 1978 BUG_ON(!list_empty(&wc->w_unwritten_list)); 1961 1979 1962 if (handle) { 1980 if (handle) { 1963 ret = ocfs2_journal_access_di 1981 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), 1964 wc->w_di_bh, 1982 wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); 1965 if (ret) { 1983 if (ret) { 1966 copied = ret; 1984 copied = ret; 1967 mlog_errno(ret); 1985 mlog_errno(ret); 1968 goto out; 1986 goto out; 1969 } 1987 } 1970 } 1988 } 1971 1989 1972 if (OCFS2_I(inode)->ip_dyn_features & 1990 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 1973 ocfs2_write_end_inline(inode, 1991 ocfs2_write_end_inline(inode, pos, len, &copied, di, wc); 1974 goto out_write_size; 1992 goto out_write_size; 1975 } 1993 } 1976 1994 1977 if (unlikely(copied < len) && wc->w_t 1995 if (unlikely(copied < len) && wc->w_target_page) { 1978 loff_t new_isize; << 1979 << 1980 if (!PageUptodate(wc->w_targe 1996 if (!PageUptodate(wc->w_target_page)) 1981 copied = 0; 1997 copied = 0; 1982 1998 1983 new_isize = max_t(loff_t, i_s !! 1999 ocfs2_zero_new_buffers(wc->w_target_page, start+copied, 1984 if (new_isize > page_offset(w !! 2000 start+len); 1985 ocfs2_zero_new_buffer << 1986 << 1987 else { << 1988 /* << 1989 * When page is fully << 1990 * failed), do not bo << 1991 * it instead so that << 1992 * put page & buffer << 1993 * state. << 1994 */ << 1995 block_invalidate_foli << 1996 << 1997 } << 1998 } 2001 } 1999 if (wc->w_target_page) 2002 if (wc->w_target_page) 2000 flush_dcache_page(wc->w_targe 2003 flush_dcache_page(wc->w_target_page); 2001 2004 2002 for(i = 0; i < wc->w_num_pages; i++) 2005 for(i = 0; i < wc->w_num_pages; i++) { 2003 tmppage = wc->w_pages[i]; 2006 tmppage = wc->w_pages[i]; 2004 2007 2005 /* This is the direct io targ 2008 /* This is the direct io target page. */ 2006 if (tmppage == NULL) 2009 if (tmppage == NULL) 2007 continue; 2010 continue; 2008 2011 2009 if (tmppage == wc->w_target_p 2012 if (tmppage == wc->w_target_page) { 2010 from = wc->w_target_f 2013 from = wc->w_target_from; 2011 to = wc->w_target_to; 2014 to = wc->w_target_to; 2012 2015 2013 BUG_ON(from > PAGE_SI 2016 BUG_ON(from > PAGE_SIZE || 2014 to > PAGE_SIZE 2017 to > PAGE_SIZE || 2015 to < from); 2018 to < from); 2016 } else { 2019 } else { 2017 /* 2020 /* 2018 * Pages adjacent to 2021 * Pages adjacent to the target (if any) imply 2019 * a hole-filling wri 2022 * a hole-filling write in which case we want 2020 * to flush their ent 2023 * to flush their entire range. 2021 */ 2024 */ 2022 from = 0; 2025 from = 0; 2023 to = PAGE_SIZE; 2026 to = PAGE_SIZE; 2024 } 2027 } 2025 2028 2026 if (page_has_buffers(tmppage) 2029 if (page_has_buffers(tmppage)) { 2027 if (handle && ocfs2_s !! 2030 if (handle && ocfs2_should_order_data(inode)) 2028 loff_t start_ !! 2031 ocfs2_jbd2_file_inode(handle, inode); 2029 ((lof << 2030 from; << 2031 loff_t length << 2032 ocfs2_jbd2_in << 2033 << 2034 } << 2035 block_commit_write(tm 2032 block_commit_write(tmppage, from, to); 2036 } 2033 } 2037 } 2034 } 2038 2035 2039 out_write_size: 2036 out_write_size: 2040 /* Direct io do not update i_size her 2037 /* Direct io do not update i_size here. */ 2041 if (wc->w_type != OCFS2_WRITE_DIRECT) 2038 if (wc->w_type != OCFS2_WRITE_DIRECT) { 2042 pos += copied; 2039 pos += copied; 2043 if (pos > i_size_read(inode)) 2040 if (pos > i_size_read(inode)) { 2044 i_size_write(inode, p 2041 i_size_write(inode, pos); 2045 mark_inode_dirty(inod 2042 mark_inode_dirty(inode); 2046 } 2043 } 2047 inode->i_blocks = ocfs2_inode 2044 inode->i_blocks = ocfs2_inode_sector_count(inode); 2048 di->i_size = cpu_to_le64((u64 2045 di->i_size = cpu_to_le64((u64)i_size_read(inode)); 2049 inode_set_mtime_to_ts(inode, !! 2046 inode->i_mtime = inode->i_ctime = current_time(inode); 2050 di->i_mtime = di->i_ctime = c !! 2047 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); 2051 di->i_mtime_nsec = di->i_ctim !! 2048 di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 2052 if (handle) 2049 if (handle) 2053 ocfs2_update_inode_fs 2050 ocfs2_update_inode_fsync_trans(handle, inode, 1); 2054 } 2051 } 2055 if (handle) 2052 if (handle) 2056 ocfs2_journal_dirty(handle, w 2053 ocfs2_journal_dirty(handle, wc->w_di_bh); 2057 2054 2058 out: 2055 out: 2059 /* unlock pages before dealloc since 2056 /* unlock pages before dealloc since it needs acquiring j_trans_barrier 2060 * lock, or it will cause a deadlock 2057 * lock, or it will cause a deadlock since journal commit threads holds 2061 * this lock and will ask for the pag 2058 * this lock and will ask for the page lock when flushing the data. 2062 * put it here to preserve the unlock 2059 * put it here to preserve the unlock order. 2063 */ 2060 */ 2064 ocfs2_unlock_pages(wc); 2061 ocfs2_unlock_pages(wc); 2065 2062 2066 if (handle) 2063 if (handle) 2067 ocfs2_commit_trans(osb, handl 2064 ocfs2_commit_trans(osb, handle); 2068 2065 2069 ocfs2_run_deallocs(osb, &wc->w_deallo 2066 ocfs2_run_deallocs(osb, &wc->w_dealloc); 2070 2067 2071 brelse(wc->w_di_bh); 2068 brelse(wc->w_di_bh); 2072 kfree(wc); 2069 kfree(wc); 2073 2070 2074 return copied; 2071 return copied; 2075 } 2072 } 2076 2073 2077 static int ocfs2_write_end(struct file *file, 2074 static int ocfs2_write_end(struct file *file, struct address_space *mapping, 2078 loff_t pos, unsign 2075 loff_t pos, unsigned len, unsigned copied, 2079 struct page *page, 2076 struct page *page, void *fsdata) 2080 { 2077 { 2081 int ret; 2078 int ret; 2082 struct inode *inode = mapping->host; 2079 struct inode *inode = mapping->host; 2083 2080 2084 ret = ocfs2_write_end_nolock(mapping, !! 2081 ret = ocfs2_write_end_nolock(mapping, pos, len, copied, page, fsdata); 2085 2082 2086 up_write(&OCFS2_I(inode)->ip_alloc_se 2083 up_write(&OCFS2_I(inode)->ip_alloc_sem); 2087 ocfs2_inode_unlock(inode, 1); 2084 ocfs2_inode_unlock(inode, 1); 2088 2085 2089 return ret; 2086 return ret; 2090 } 2087 } 2091 2088 2092 struct ocfs2_dio_write_ctxt { 2089 struct ocfs2_dio_write_ctxt { 2093 struct list_head dw_zero_list; 2090 struct list_head dw_zero_list; 2094 unsigned dw_zero_count 2091 unsigned dw_zero_count; 2095 int dw_orphaned; 2092 int dw_orphaned; 2096 pid_t dw_writer_pid 2093 pid_t dw_writer_pid; 2097 }; 2094 }; 2098 2095 2099 static struct ocfs2_dio_write_ctxt * 2096 static struct ocfs2_dio_write_ctxt * 2100 ocfs2_dio_alloc_write_ctx(struct buffer_head 2097 ocfs2_dio_alloc_write_ctx(struct buffer_head *bh, int *alloc) 2101 { 2098 { 2102 struct ocfs2_dio_write_ctxt *dwc = NU 2099 struct ocfs2_dio_write_ctxt *dwc = NULL; 2103 2100 2104 if (bh->b_private) 2101 if (bh->b_private) 2105 return bh->b_private; 2102 return bh->b_private; 2106 2103 2107 dwc = kmalloc(sizeof(struct ocfs2_dio 2104 dwc = kmalloc(sizeof(struct ocfs2_dio_write_ctxt), GFP_NOFS); 2108 if (dwc == NULL) 2105 if (dwc == NULL) 2109 return NULL; 2106 return NULL; 2110 INIT_LIST_HEAD(&dwc->dw_zero_list); 2107 INIT_LIST_HEAD(&dwc->dw_zero_list); 2111 dwc->dw_zero_count = 0; 2108 dwc->dw_zero_count = 0; 2112 dwc->dw_orphaned = 0; 2109 dwc->dw_orphaned = 0; 2113 dwc->dw_writer_pid = task_pid_nr(curr 2110 dwc->dw_writer_pid = task_pid_nr(current); 2114 bh->b_private = dwc; 2111 bh->b_private = dwc; 2115 *alloc = 1; 2112 *alloc = 1; 2116 2113 2117 return dwc; 2114 return dwc; 2118 } 2115 } 2119 2116 2120 static void ocfs2_dio_free_write_ctx(struct i 2117 static void ocfs2_dio_free_write_ctx(struct inode *inode, 2121 struct o 2118 struct ocfs2_dio_write_ctxt *dwc) 2122 { 2119 { 2123 ocfs2_free_unwritten_list(inode, &dwc 2120 ocfs2_free_unwritten_list(inode, &dwc->dw_zero_list); 2124 kfree(dwc); 2121 kfree(dwc); 2125 } 2122 } 2126 2123 2127 /* 2124 /* 2128 * TODO: Make this into a generic get_blocks 2125 * TODO: Make this into a generic get_blocks function. 2129 * 2126 * 2130 * From do_direct_io in direct-io.c: 2127 * From do_direct_io in direct-io.c: 2131 * "So what we do is to permit the ->get_blo 2128 * "So what we do is to permit the ->get_blocks function to populate 2132 * bh.b_size with the size of IO which is p 2129 * bh.b_size with the size of IO which is permitted at this offset and 2133 * this i_blkbits." 2130 * this i_blkbits." 2134 * 2131 * 2135 * This function is called directly from get_ 2132 * This function is called directly from get_more_blocks in direct-io.c. 2136 * 2133 * 2137 * called like this: dio->get_blocks(dio->ino 2134 * called like this: dio->get_blocks(dio->inode, fs_startblk, 2138 * fs_co 2135 * fs_count, map_bh, dio->rw == WRITE); 2139 */ 2136 */ 2140 static int ocfs2_dio_wr_get_block(struct inod 2137 static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock, 2141 struct buffer_ 2138 struct buffer_head *bh_result, int create) 2142 { 2139 { 2143 struct ocfs2_super *osb = OCFS2_SB(in 2140 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2144 struct ocfs2_inode_info *oi = OCFS2_I 2141 struct ocfs2_inode_info *oi = OCFS2_I(inode); 2145 struct ocfs2_write_ctxt *wc; 2142 struct ocfs2_write_ctxt *wc; 2146 struct ocfs2_write_cluster_desc *desc 2143 struct ocfs2_write_cluster_desc *desc = NULL; 2147 struct ocfs2_dio_write_ctxt *dwc = NU 2144 struct ocfs2_dio_write_ctxt *dwc = NULL; 2148 struct buffer_head *di_bh = NULL; 2145 struct buffer_head *di_bh = NULL; 2149 u64 p_blkno; 2146 u64 p_blkno; 2150 unsigned int i_blkbits = inode->i_sb- 2147 unsigned int i_blkbits = inode->i_sb->s_blocksize_bits; 2151 loff_t pos = iblock << i_blkbits; 2148 loff_t pos = iblock << i_blkbits; 2152 sector_t endblk = (i_size_read(inode) 2149 sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits; 2153 unsigned len, total_len = bh_result-> 2150 unsigned len, total_len = bh_result->b_size; 2154 int ret = 0, first_get_block = 0; 2151 int ret = 0, first_get_block = 0; 2155 2152 2156 len = osb->s_clustersize - (pos & (os 2153 len = osb->s_clustersize - (pos & (osb->s_clustersize - 1)); 2157 len = min(total_len, len); 2154 len = min(total_len, len); 2158 2155 2159 /* 2156 /* 2160 * bh_result->b_size is count in get_ 2157 * bh_result->b_size is count in get_more_blocks according to write 2161 * "pos" and "end", we need map twice 2158 * "pos" and "end", we need map twice to return different buffer state: 2162 * 1. area in file size, not set NEW; 2159 * 1. area in file size, not set NEW; 2163 * 2. area out file size, set NEW. 2160 * 2. area out file size, set NEW. 2164 * 2161 * 2165 * iblock endblk 2162 * iblock endblk 2166 * |--------|---------|---------|---- 2163 * |--------|---------|---------|--------- 2167 * |<-------area in file------->| 2164 * |<-------area in file------->| 2168 */ 2165 */ 2169 2166 2170 if ((iblock <= endblk) && 2167 if ((iblock <= endblk) && 2171 ((iblock + ((len - 1) >> i_blkbit 2168 ((iblock + ((len - 1) >> i_blkbits)) > endblk)) 2172 len = (endblk - iblock + 1) < 2169 len = (endblk - iblock + 1) << i_blkbits; 2173 2170 2174 mlog(0, "get block of %lu at %llu:%u 2171 mlog(0, "get block of %lu at %llu:%u req %u\n", 2175 inode->i_ino, pos, le 2172 inode->i_ino, pos, len, total_len); 2176 2173 2177 /* 2174 /* 2178 * Because we need to change file siz 2175 * Because we need to change file size in ocfs2_dio_end_io_write(), or 2179 * we may need to add it to orphan di 2176 * we may need to add it to orphan dir. So can not fall to fast path 2180 * while file size will be changed. 2177 * while file size will be changed. 2181 */ 2178 */ 2182 if (pos + total_len <= i_size_read(in 2179 if (pos + total_len <= i_size_read(inode)) { 2183 2180 2184 /* This is the fast path for 2181 /* This is the fast path for re-write. */ 2185 ret = ocfs2_lock_get_block(in 2182 ret = ocfs2_lock_get_block(inode, iblock, bh_result, create); 2186 if (buffer_mapped(bh_result) 2183 if (buffer_mapped(bh_result) && 2187 !buffer_new(bh_result) && 2184 !buffer_new(bh_result) && 2188 ret == 0) 2185 ret == 0) 2189 goto out; 2186 goto out; 2190 2187 2191 /* Clear state set by ocfs2_g 2188 /* Clear state set by ocfs2_get_block. */ 2192 bh_result->b_state = 0; 2189 bh_result->b_state = 0; 2193 } 2190 } 2194 2191 2195 dwc = ocfs2_dio_alloc_write_ctx(bh_re 2192 dwc = ocfs2_dio_alloc_write_ctx(bh_result, &first_get_block); 2196 if (unlikely(dwc == NULL)) { 2193 if (unlikely(dwc == NULL)) { 2197 ret = -ENOMEM; 2194 ret = -ENOMEM; 2198 mlog_errno(ret); 2195 mlog_errno(ret); 2199 goto out; 2196 goto out; 2200 } 2197 } 2201 2198 2202 if (ocfs2_clusters_for_bytes(inode->i 2199 if (ocfs2_clusters_for_bytes(inode->i_sb, pos + total_len) > 2203 ocfs2_clusters_for_bytes(inode->i 2200 ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)) && 2204 !dwc->dw_orphaned) { 2201 !dwc->dw_orphaned) { 2205 /* 2202 /* 2206 * when we are going to alloc 2203 * when we are going to alloc extents beyond file size, add the 2207 * inode to orphan dir, so we 2204 * inode to orphan dir, so we can recall those spaces when 2208 * system crashed during writ 2205 * system crashed during write. 2209 */ 2206 */ 2210 ret = ocfs2_add_inode_to_orph 2207 ret = ocfs2_add_inode_to_orphan(osb, inode); 2211 if (ret < 0) { 2208 if (ret < 0) { 2212 mlog_errno(ret); 2209 mlog_errno(ret); 2213 goto out; 2210 goto out; 2214 } 2211 } 2215 dwc->dw_orphaned = 1; 2212 dwc->dw_orphaned = 1; 2216 } 2213 } 2217 2214 2218 ret = ocfs2_inode_lock(inode, &di_bh, 2215 ret = ocfs2_inode_lock(inode, &di_bh, 1); 2219 if (ret) { 2216 if (ret) { 2220 mlog_errno(ret); 2217 mlog_errno(ret); 2221 goto out; 2218 goto out; 2222 } 2219 } 2223 2220 2224 down_write(&oi->ip_alloc_sem); 2221 down_write(&oi->ip_alloc_sem); 2225 2222 2226 if (first_get_block) { 2223 if (first_get_block) { 2227 if (ocfs2_sparse_alloc(osb)) !! 2224 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) 2228 ret = ocfs2_zero_tail 2225 ret = ocfs2_zero_tail(inode, di_bh, pos); 2229 else 2226 else 2230 ret = ocfs2_expand_no 2227 ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, 2231 2228 total_len, NULL); 2232 if (ret < 0) { 2229 if (ret < 0) { 2233 mlog_errno(ret); 2230 mlog_errno(ret); 2234 goto unlock; 2231 goto unlock; 2235 } 2232 } 2236 } 2233 } 2237 2234 2238 ret = ocfs2_write_begin_nolock(inode- 2235 ret = ocfs2_write_begin_nolock(inode->i_mapping, pos, len, 2239 OCFS2_ 2236 OCFS2_WRITE_DIRECT, NULL, 2240 (void 2237 (void **)&wc, di_bh, NULL); 2241 if (ret) { 2238 if (ret) { 2242 mlog_errno(ret); 2239 mlog_errno(ret); 2243 goto unlock; 2240 goto unlock; 2244 } 2241 } 2245 2242 2246 desc = &wc->w_desc[0]; 2243 desc = &wc->w_desc[0]; 2247 2244 2248 p_blkno = ocfs2_clusters_to_blocks(in 2245 p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, desc->c_phys); 2249 BUG_ON(p_blkno == 0); 2246 BUG_ON(p_blkno == 0); 2250 p_blkno += iblock & (u64)(ocfs2_clust 2247 p_blkno += iblock & (u64)(ocfs2_clusters_to_blocks(inode->i_sb, 1) - 1); 2251 2248 2252 map_bh(bh_result, inode->i_sb, p_blkn 2249 map_bh(bh_result, inode->i_sb, p_blkno); 2253 bh_result->b_size = len; 2250 bh_result->b_size = len; 2254 if (desc->c_needs_zero) 2251 if (desc->c_needs_zero) 2255 set_buffer_new(bh_result); 2252 set_buffer_new(bh_result); 2256 2253 2257 if (iblock > endblk) 2254 if (iblock > endblk) 2258 set_buffer_new(bh_result); 2255 set_buffer_new(bh_result); 2259 2256 2260 /* May sleep in end_io. It should not 2257 /* May sleep in end_io. It should not happen in a irq context. So defer 2261 * it to dio work queue. */ 2258 * it to dio work queue. */ 2262 set_buffer_defer_completion(bh_result 2259 set_buffer_defer_completion(bh_result); 2263 2260 2264 if (!list_empty(&wc->w_unwritten_list 2261 if (!list_empty(&wc->w_unwritten_list)) { 2265 struct ocfs2_unwritten_extent 2262 struct ocfs2_unwritten_extent *ue = NULL; 2266 2263 2267 ue = list_first_entry(&wc->w_ 2264 ue = list_first_entry(&wc->w_unwritten_list, 2268 struct 2265 struct ocfs2_unwritten_extent, 2269 ue_node 2266 ue_node); 2270 BUG_ON(ue->ue_cpos != desc->c 2267 BUG_ON(ue->ue_cpos != desc->c_cpos); 2271 /* The physical address may b 2268 /* The physical address may be 0, fill it. */ 2272 ue->ue_phys = desc->c_phys; 2269 ue->ue_phys = desc->c_phys; 2273 2270 2274 list_splice_tail_init(&wc->w_ 2271 list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list); 2275 dwc->dw_zero_count += wc->w_u !! 2272 dwc->dw_zero_count++; 2276 } 2273 } 2277 2274 2278 ret = ocfs2_write_end_nolock(inode->i !! 2275 ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, NULL, wc); 2279 BUG_ON(ret != len); 2276 BUG_ON(ret != len); 2280 ret = 0; 2277 ret = 0; 2281 unlock: 2278 unlock: 2282 up_write(&oi->ip_alloc_sem); 2279 up_write(&oi->ip_alloc_sem); 2283 ocfs2_inode_unlock(inode, 1); 2280 ocfs2_inode_unlock(inode, 1); 2284 brelse(di_bh); 2281 brelse(di_bh); 2285 out: 2282 out: >> 2283 if (ret < 0) >> 2284 ret = -EIO; 2286 return ret; 2285 return ret; 2287 } 2286 } 2288 2287 2289 static int ocfs2_dio_end_io_write(struct inod !! 2288 static void ocfs2_dio_end_io_write(struct inode *inode, 2290 struct ocfs !! 2289 struct ocfs2_dio_write_ctxt *dwc, 2291 loff_t offs !! 2290 loff_t offset, 2292 ssize_t byt !! 2291 ssize_t bytes) 2293 { 2292 { 2294 struct ocfs2_cached_dealloc_ctxt deal 2293 struct ocfs2_cached_dealloc_ctxt dealloc; 2295 struct ocfs2_extent_tree et; 2294 struct ocfs2_extent_tree et; 2296 struct ocfs2_super *osb = OCFS2_SB(in 2295 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2297 struct ocfs2_inode_info *oi = OCFS2_I 2296 struct ocfs2_inode_info *oi = OCFS2_I(inode); 2298 struct ocfs2_unwritten_extent *ue = N 2297 struct ocfs2_unwritten_extent *ue = NULL; 2299 struct buffer_head *di_bh = NULL; 2298 struct buffer_head *di_bh = NULL; 2300 struct ocfs2_dinode *di; 2299 struct ocfs2_dinode *di; 2301 struct ocfs2_alloc_context *data_ac = 2300 struct ocfs2_alloc_context *data_ac = NULL; 2302 struct ocfs2_alloc_context *meta_ac = 2301 struct ocfs2_alloc_context *meta_ac = NULL; 2303 handle_t *handle = NULL; 2302 handle_t *handle = NULL; 2304 loff_t end = offset + bytes; 2303 loff_t end = offset + bytes; 2305 int ret = 0, credits = 0; 2304 int ret = 0, credits = 0; 2306 2305 2307 ocfs2_init_dealloc_ctxt(&dealloc); 2306 ocfs2_init_dealloc_ctxt(&dealloc); 2308 2307 2309 /* We do clear unwritten, delete orph 2308 /* We do clear unwritten, delete orphan, change i_size here. If neither 2310 * of these happen, we can skip all t 2309 * of these happen, we can skip all this. */ 2311 if (list_empty(&dwc->dw_zero_list) && 2310 if (list_empty(&dwc->dw_zero_list) && 2312 end <= i_size_read(inode) && 2311 end <= i_size_read(inode) && 2313 !dwc->dw_orphaned) 2312 !dwc->dw_orphaned) 2314 goto out; 2313 goto out; 2315 2314 2316 ret = ocfs2_inode_lock(inode, &di_bh, 2315 ret = ocfs2_inode_lock(inode, &di_bh, 1); 2317 if (ret < 0) { 2316 if (ret < 0) { 2318 mlog_errno(ret); 2317 mlog_errno(ret); 2319 goto out; 2318 goto out; 2320 } 2319 } 2321 2320 2322 down_write(&oi->ip_alloc_sem); 2321 down_write(&oi->ip_alloc_sem); 2323 2322 2324 /* Delete orphan before acquire i_rws !! 2323 /* Delete orphan before acquire i_mutex. */ 2325 if (dwc->dw_orphaned) { 2324 if (dwc->dw_orphaned) { 2326 BUG_ON(dwc->dw_writer_pid != 2325 BUG_ON(dwc->dw_writer_pid != task_pid_nr(current)); 2327 2326 2328 end = end > i_size_read(inode 2327 end = end > i_size_read(inode) ? end : 0; 2329 2328 2330 ret = ocfs2_del_inode_from_or 2329 ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, 2331 !!end, end); 2330 !!end, end); 2332 if (ret < 0) 2331 if (ret < 0) 2333 mlog_errno(ret); 2332 mlog_errno(ret); 2334 } 2333 } 2335 2334 2336 di = (struct ocfs2_dinode *)di_bh->b_ !! 2335 di = (struct ocfs2_dinode *)di_bh; 2337 2336 2338 ocfs2_init_dinode_extent_tree(&et, IN 2337 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); 2339 2338 2340 /* Attach dealloc with extent tree in << 2341 * which are already unlinked from cu << 2342 * rotation and merging. << 2343 */ << 2344 et.et_dealloc = &dealloc; << 2345 << 2346 ret = ocfs2_lock_allocators(inode, &e 2339 ret = ocfs2_lock_allocators(inode, &et, 0, dwc->dw_zero_count*2, 2347 &data_ac, 2340 &data_ac, &meta_ac); 2348 if (ret) { 2341 if (ret) { 2349 mlog_errno(ret); 2342 mlog_errno(ret); 2350 goto unlock; 2343 goto unlock; 2351 } 2344 } 2352 2345 2353 credits = ocfs2_calc_extend_credits(i 2346 credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list); 2354 2347 2355 handle = ocfs2_start_trans(osb, credi 2348 handle = ocfs2_start_trans(osb, credits); 2356 if (IS_ERR(handle)) { 2349 if (IS_ERR(handle)) { 2357 ret = PTR_ERR(handle); 2350 ret = PTR_ERR(handle); 2358 mlog_errno(ret); 2351 mlog_errno(ret); 2359 goto unlock; 2352 goto unlock; 2360 } 2353 } 2361 ret = ocfs2_journal_access_di(handle, 2354 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, 2362 OCFS2_J 2355 OCFS2_JOURNAL_ACCESS_WRITE); 2363 if (ret) { 2356 if (ret) { 2364 mlog_errno(ret); 2357 mlog_errno(ret); 2365 goto commit; 2358 goto commit; 2366 } 2359 } 2367 2360 2368 list_for_each_entry(ue, &dwc->dw_zero 2361 list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) { 2369 ret = ocfs2_assure_trans_cred << 2370 if (ret < 0) { << 2371 mlog_errno(ret); << 2372 break; << 2373 } << 2374 ret = ocfs2_mark_extent_writt 2362 ret = ocfs2_mark_extent_written(inode, &et, handle, 2375 2363 ue->ue_cpos, 1, 2376 2364 ue->ue_phys, 2377 2365 meta_ac, &dealloc); 2378 if (ret < 0) { 2366 if (ret < 0) { 2379 mlog_errno(ret); 2367 mlog_errno(ret); 2380 break; 2368 break; 2381 } 2369 } 2382 } 2370 } 2383 2371 2384 if (end > i_size_read(inode)) { 2372 if (end > i_size_read(inode)) { 2385 ret = ocfs2_set_inode_size(ha 2373 ret = ocfs2_set_inode_size(handle, inode, di_bh, end); 2386 if (ret < 0) 2374 if (ret < 0) 2387 mlog_errno(ret); 2375 mlog_errno(ret); 2388 } 2376 } 2389 commit: 2377 commit: 2390 ocfs2_commit_trans(osb, handle); 2378 ocfs2_commit_trans(osb, handle); 2391 unlock: 2379 unlock: 2392 up_write(&oi->ip_alloc_sem); 2380 up_write(&oi->ip_alloc_sem); 2393 ocfs2_inode_unlock(inode, 1); 2381 ocfs2_inode_unlock(inode, 1); 2394 brelse(di_bh); 2382 brelse(di_bh); 2395 out: 2383 out: 2396 if (data_ac) 2384 if (data_ac) 2397 ocfs2_free_alloc_context(data 2385 ocfs2_free_alloc_context(data_ac); 2398 if (meta_ac) 2386 if (meta_ac) 2399 ocfs2_free_alloc_context(meta 2387 ocfs2_free_alloc_context(meta_ac); 2400 ocfs2_run_deallocs(osb, &dealloc); 2388 ocfs2_run_deallocs(osb, &dealloc); 2401 ocfs2_dio_free_write_ctx(inode, dwc); 2389 ocfs2_dio_free_write_ctx(inode, dwc); 2402 << 2403 return ret; << 2404 } 2390 } 2405 2391 2406 /* 2392 /* 2407 * ocfs2_dio_end_io is called by the dio core 2393 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're 2408 * particularly interested in the aio/dio cas 2394 * particularly interested in the aio/dio case. We use the rw_lock DLM lock 2409 * to protect io on one node from truncation 2395 * to protect io on one node from truncation on another. 2410 */ 2396 */ 2411 static int ocfs2_dio_end_io(struct kiocb *ioc 2397 static int ocfs2_dio_end_io(struct kiocb *iocb, 2412 loff_t offset, 2398 loff_t offset, 2413 ssize_t bytes, 2399 ssize_t bytes, 2414 void *private) 2400 void *private) 2415 { 2401 { 2416 struct inode *inode = file_inode(iocb 2402 struct inode *inode = file_inode(iocb->ki_filp); 2417 int level; 2403 int level; 2418 int ret = 0; !! 2404 >> 2405 if (bytes <= 0) >> 2406 return 0; 2419 2407 2420 /* this io's submitter should not hav 2408 /* this io's submitter should not have unlocked this before we could */ 2421 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb) 2409 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); 2422 2410 2423 if (bytes <= 0) !! 2411 if (private) 2424 mlog_ratelimited(ML_ERROR, "D !! 2412 ocfs2_dio_end_io_write(inode, private, offset, bytes); 2425 (long long)b << 2426 if (private) { << 2427 if (bytes > 0) << 2428 ret = ocfs2_dio_end_i << 2429 << 2430 else << 2431 ocfs2_dio_free_write_ << 2432 } << 2433 2413 2434 ocfs2_iocb_clear_rw_locked(iocb); 2414 ocfs2_iocb_clear_rw_locked(iocb); 2435 2415 2436 level = ocfs2_iocb_rw_locked_level(io 2416 level = ocfs2_iocb_rw_locked_level(iocb); 2437 ocfs2_rw_unlock(inode, level); 2417 ocfs2_rw_unlock(inode, level); 2438 return ret; !! 2418 return 0; 2439 } 2419 } 2440 2420 2441 static ssize_t ocfs2_direct_IO(struct kiocb * 2421 static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 2442 { 2422 { 2443 struct file *file = iocb->ki_filp; 2423 struct file *file = iocb->ki_filp; 2444 struct inode *inode = file->f_mapping 2424 struct inode *inode = file->f_mapping->host; 2445 struct ocfs2_super *osb = OCFS2_SB(in 2425 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2446 get_block_t *get_block; 2426 get_block_t *get_block; 2447 2427 2448 /* 2428 /* 2449 * Fallback to buffered I/O if we see 2429 * Fallback to buffered I/O if we see an inode without 2450 * extents. 2430 * extents. 2451 */ 2431 */ 2452 if (OCFS2_I(inode)->ip_dyn_features & 2432 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) 2453 return 0; 2433 return 0; 2454 2434 2455 /* Fallback to buffered I/O if we do 2435 /* Fallback to buffered I/O if we do not support append dio. */ 2456 if (iocb->ki_pos + iter->count > i_si 2436 if (iocb->ki_pos + iter->count > i_size_read(inode) && 2457 !ocfs2_supports_append_dio(osb)) 2437 !ocfs2_supports_append_dio(osb)) 2458 return 0; 2438 return 0; 2459 2439 2460 if (iov_iter_rw(iter) == READ) 2440 if (iov_iter_rw(iter) == READ) 2461 get_block = ocfs2_lock_get_bl 2441 get_block = ocfs2_lock_get_block; 2462 else 2442 else 2463 get_block = ocfs2_dio_wr_get_ 2443 get_block = ocfs2_dio_wr_get_block; 2464 2444 2465 return __blockdev_direct_IO(iocb, ino 2445 return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, 2466 iter, get 2446 iter, get_block, 2467 ocfs2_dio !! 2447 ocfs2_dio_end_io, NULL, 0); 2468 } 2448 } 2469 2449 2470 const struct address_space_operations ocfs2_a 2450 const struct address_space_operations ocfs2_aops = { 2471 .dirty_folio = block_dirty !! 2451 .readpage = ocfs2_readpage, 2472 .read_folio = ocfs2_read_ !! 2452 .readpages = ocfs2_readpages, 2473 .readahead = ocfs2_reada !! 2453 .writepage = ocfs2_writepage, 2474 .writepages = ocfs2_write << 2475 .write_begin = ocfs2_write 2454 .write_begin = ocfs2_write_begin, 2476 .write_end = ocfs2_write 2455 .write_end = ocfs2_write_end, 2477 .bmap = ocfs2_bmap, 2456 .bmap = ocfs2_bmap, 2478 .direct_IO = ocfs2_direc 2457 .direct_IO = ocfs2_direct_IO, 2479 .invalidate_folio = block_inval !! 2458 .invalidatepage = block_invalidatepage, 2480 .release_folio = ocfs2_relea !! 2459 .releasepage = ocfs2_releasepage, 2481 .migrate_folio = buffer_migr !! 2460 .migratepage = buffer_migrate_page, 2482 .is_partially_uptodate = block_is_pa 2461 .is_partially_uptodate = block_is_partially_uptodate, 2483 .error_remove_folio = generic_err !! 2462 .error_remove_page = generic_error_remove_page, 2484 }; 2463 }; 2485 2464
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.