1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * linux/fs/ufs/inode.c 3 * linux/fs/ufs/inode.c 4 * 4 * 5 * Copyright (C) 1998 5 * Copyright (C) 1998 6 * Daniel Pirkl <daniel.pirkl@email.cz> 6 * Daniel Pirkl <daniel.pirkl@email.cz> 7 * Charles University, Faculty of Mathematics 7 * Charles University, Faculty of Mathematics and Physics 8 * 8 * 9 * from 9 * from 10 * 10 * 11 * linux/fs/ext2/inode.c 11 * linux/fs/ext2/inode.c 12 * 12 * 13 * Copyright (C) 1992, 1993, 1994, 1995 13 * Copyright (C) 1992, 1993, 1994, 1995 14 * Remy Card (card@masi.ibp.fr) 14 * Remy Card (card@masi.ibp.fr) 15 * Laboratoire MASI - Institut Blaise Pascal 15 * Laboratoire MASI - Institut Blaise Pascal 16 * Universite Pierre et Marie Curie (Paris VI) 16 * Universite Pierre et Marie Curie (Paris VI) 17 * 17 * 18 * from 18 * from 19 * 19 * 20 * linux/fs/minix/inode.c 20 * linux/fs/minix/inode.c 21 * 21 * 22 * Copyright (C) 1991, 1992 Linus Torvalds 22 * Copyright (C) 1991, 1992 Linus Torvalds 23 * 23 * 24 * Goal-directed block allocation by Stephen 24 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993 25 * Big-endian to little-endian byte-swapping/ 25 * Big-endian to little-endian byte-swapping/bitmaps by 26 * David S. Miller (davem@caip.rutgers. 26 * David S. Miller (davem@caip.rutgers.edu), 1995 27 */ 27 */ 28 28 29 #include <linux/uaccess.h> 29 #include <linux/uaccess.h> 30 30 31 #include <linux/errno.h> 31 #include <linux/errno.h> 32 #include <linux/fs.h> 32 #include <linux/fs.h> 33 #include <linux/time.h> 33 #include <linux/time.h> 34 #include <linux/stat.h> 34 #include <linux/stat.h> 35 #include <linux/string.h> 35 #include <linux/string.h> 36 #include <linux/mm.h> 36 #include <linux/mm.h> 37 #include <linux/buffer_head.h> 37 #include <linux/buffer_head.h> 38 #include <linux/mpage.h> 38 #include <linux/mpage.h> 39 #include <linux/writeback.h> 39 #include <linux/writeback.h> 40 #include <linux/iversion.h> 40 #include <linux/iversion.h> 41 41 42 #include "ufs_fs.h" 42 #include "ufs_fs.h" 43 #include "ufs.h" 43 #include "ufs.h" 44 #include "swab.h" 44 #include "swab.h" 45 #include "util.h" 45 #include "util.h" 46 46 47 static int ufs_block_to_path(struct inode *ino 47 static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4]) 48 { 48 { 49 struct ufs_sb_private_info *uspi = UFS 49 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; 50 int ptrs = uspi->s_apb; 50 int ptrs = uspi->s_apb; 51 int ptrs_bits = uspi->s_apbshift; 51 int ptrs_bits = uspi->s_apbshift; 52 const long direct_blocks = UFS_NDADDR, 52 const long direct_blocks = UFS_NDADDR, 53 indirect_blocks = ptrs, 53 indirect_blocks = ptrs, 54 double_blocks = (1 << (ptrs_bi 54 double_blocks = (1 << (ptrs_bits * 2)); 55 int n = 0; 55 int n = 0; 56 56 57 57 58 UFSD("ptrs=uspi->s_apb = %d,double_blo 58 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks); 59 if (i_block < direct_blocks) { 59 if (i_block < direct_blocks) { 60 offsets[n++] = i_block; 60 offsets[n++] = i_block; 61 } else if ((i_block -= direct_blocks) 61 } else if ((i_block -= direct_blocks) < indirect_blocks) { 62 offsets[n++] = UFS_IND_BLOCK; 62 offsets[n++] = UFS_IND_BLOCK; 63 offsets[n++] = i_block; 63 offsets[n++] = i_block; 64 } else if ((i_block -= indirect_blocks 64 } else if ((i_block -= indirect_blocks) < double_blocks) { 65 offsets[n++] = UFS_DIND_BLOCK; 65 offsets[n++] = UFS_DIND_BLOCK; 66 offsets[n++] = i_block >> ptrs 66 offsets[n++] = i_block >> ptrs_bits; 67 offsets[n++] = i_block & (ptrs 67 offsets[n++] = i_block & (ptrs - 1); 68 } else if (((i_block -= double_blocks) 68 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 69 offsets[n++] = UFS_TIND_BLOCK; 69 offsets[n++] = UFS_TIND_BLOCK; 70 offsets[n++] = i_block >> (ptr 70 offsets[n++] = i_block >> (ptrs_bits * 2); 71 offsets[n++] = (i_block >> ptr 71 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 72 offsets[n++] = i_block & (ptrs 72 offsets[n++] = i_block & (ptrs - 1); 73 } else { 73 } else { 74 ufs_warning(inode->i_sb, "ufs_ 74 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big"); 75 } 75 } 76 return n; 76 return n; 77 } 77 } 78 78 79 typedef struct { 79 typedef struct { 80 void *p; 80 void *p; 81 union { 81 union { 82 __fs32 key32; 82 __fs32 key32; 83 __fs64 key64; 83 __fs64 key64; 84 }; 84 }; 85 struct buffer_head *bh; 85 struct buffer_head *bh; 86 } Indirect; 86 } Indirect; 87 87 88 static inline int grow_chain32(struct ufs_inod 88 static inline int grow_chain32(struct ufs_inode_info *ufsi, 89 struct buffer_h 89 struct buffer_head *bh, __fs32 *v, 90 Indirect *from, 90 Indirect *from, Indirect *to) 91 { 91 { 92 Indirect *p; 92 Indirect *p; 93 unsigned seq; 93 unsigned seq; 94 to->bh = bh; 94 to->bh = bh; 95 do { 95 do { 96 seq = read_seqbegin(&ufsi->met 96 seq = read_seqbegin(&ufsi->meta_lock); 97 to->key32 = *(__fs32 *)(to->p 97 to->key32 = *(__fs32 *)(to->p = v); 98 for (p = from; p <= to && p->k 98 for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++) 99 ; 99 ; 100 } while (read_seqretry(&ufsi->meta_loc 100 } while (read_seqretry(&ufsi->meta_lock, seq)); 101 return (p > to); 101 return (p > to); 102 } 102 } 103 103 104 static inline int grow_chain64(struct ufs_inod 104 static inline int grow_chain64(struct ufs_inode_info *ufsi, 105 struct buffer_h 105 struct buffer_head *bh, __fs64 *v, 106 Indirect *from, 106 Indirect *from, Indirect *to) 107 { 107 { 108 Indirect *p; 108 Indirect *p; 109 unsigned seq; 109 unsigned seq; 110 to->bh = bh; 110 to->bh = bh; 111 do { 111 do { 112 seq = read_seqbegin(&ufsi->met 112 seq = read_seqbegin(&ufsi->meta_lock); 113 to->key64 = *(__fs64 *)(to->p 113 to->key64 = *(__fs64 *)(to->p = v); 114 for (p = from; p <= to && p->k 114 for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++) 115 ; 115 ; 116 } while (read_seqretry(&ufsi->meta_loc 116 } while (read_seqretry(&ufsi->meta_lock, seq)); 117 return (p > to); 117 return (p > to); 118 } 118 } 119 119 120 /* 120 /* 121 * Returns the location of the fragment from 121 * Returns the location of the fragment from 122 * the beginning of the filesystem. 122 * the beginning of the filesystem. 123 */ 123 */ 124 124 125 static u64 ufs_frag_map(struct inode *inode, u 125 static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth) 126 { 126 { 127 struct ufs_inode_info *ufsi = UFS_I(in 127 struct ufs_inode_info *ufsi = UFS_I(inode); 128 struct super_block *sb = inode->i_sb; 128 struct super_block *sb = inode->i_sb; 129 struct ufs_sb_private_info *uspi = UFS 129 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 130 u64 mask = (u64) uspi->s_apbmask>>uspi 130 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift; 131 int shift = uspi->s_apbshift-uspi->s_f 131 int shift = uspi->s_apbshift-uspi->s_fpbshift; 132 Indirect chain[4], *q = chain; 132 Indirect chain[4], *q = chain; 133 unsigned *p; 133 unsigned *p; 134 unsigned flags = UFS_SB(sb)->s_flags; 134 unsigned flags = UFS_SB(sb)->s_flags; 135 u64 res = 0; 135 u64 res = 0; 136 136 137 UFSD(": uspi->s_fpbshift = %d ,uspi->s 137 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n", 138 uspi->s_fpbshift, uspi->s_apbm 138 uspi->s_fpbshift, uspi->s_apbmask, 139 (unsigned long long)mask); 139 (unsigned long long)mask); 140 140 141 if (depth == 0) 141 if (depth == 0) 142 goto no_block; 142 goto no_block; 143 143 144 again: 144 again: 145 p = offsets; 145 p = offsets; 146 146 147 if ((flags & UFS_TYPE_MASK) == UFS_TYP 147 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 148 goto ufs2; 148 goto ufs2; 149 149 150 if (!grow_chain32(ufsi, NULL, &ufsi->i 150 if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q)) 151 goto changed; 151 goto changed; 152 if (!q->key32) 152 if (!q->key32) 153 goto no_block; 153 goto no_block; 154 while (--depth) { 154 while (--depth) { 155 __fs32 *ptr; 155 __fs32 *ptr; 156 struct buffer_head *bh; 156 struct buffer_head *bh; 157 unsigned n = *p++; 157 unsigned n = *p++; 158 158 159 bh = sb_bread(sb, uspi->s_sbba 159 bh = sb_bread(sb, uspi->s_sbbase + 160 fs32_to_cpu( 160 fs32_to_cpu(sb, q->key32) + (n>>shift)); 161 if (!bh) 161 if (!bh) 162 goto no_block; 162 goto no_block; 163 ptr = (__fs32 *)bh->b_data + ( 163 ptr = (__fs32 *)bh->b_data + (n & mask); 164 if (!grow_chain32(ufsi, bh, pt 164 if (!grow_chain32(ufsi, bh, ptr, chain, ++q)) 165 goto changed; 165 goto changed; 166 if (!q->key32) 166 if (!q->key32) 167 goto no_block; 167 goto no_block; 168 } 168 } 169 res = fs32_to_cpu(sb, q->key32); 169 res = fs32_to_cpu(sb, q->key32); 170 goto found; 170 goto found; 171 171 172 ufs2: 172 ufs2: 173 if (!grow_chain64(ufsi, NULL, &ufsi->i 173 if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q)) 174 goto changed; 174 goto changed; 175 if (!q->key64) 175 if (!q->key64) 176 goto no_block; 176 goto no_block; 177 177 178 while (--depth) { 178 while (--depth) { 179 __fs64 *ptr; 179 __fs64 *ptr; 180 struct buffer_head *bh; 180 struct buffer_head *bh; 181 unsigned n = *p++; 181 unsigned n = *p++; 182 182 183 bh = sb_bread(sb, uspi->s_sbba 183 bh = sb_bread(sb, uspi->s_sbbase + 184 fs64_to_cpu( 184 fs64_to_cpu(sb, q->key64) + (n>>shift)); 185 if (!bh) 185 if (!bh) 186 goto no_block; 186 goto no_block; 187 ptr = (__fs64 *)bh->b_data + ( 187 ptr = (__fs64 *)bh->b_data + (n & mask); 188 if (!grow_chain64(ufsi, bh, pt 188 if (!grow_chain64(ufsi, bh, ptr, chain, ++q)) 189 goto changed; 189 goto changed; 190 if (!q->key64) 190 if (!q->key64) 191 goto no_block; 191 goto no_block; 192 } 192 } 193 res = fs64_to_cpu(sb, q->key64); 193 res = fs64_to_cpu(sb, q->key64); 194 found: 194 found: 195 res += uspi->s_sbbase; 195 res += uspi->s_sbbase; 196 no_block: 196 no_block: 197 while (q > chain) { 197 while (q > chain) { 198 brelse(q->bh); 198 brelse(q->bh); 199 q--; 199 q--; 200 } 200 } 201 return res; 201 return res; 202 202 203 changed: 203 changed: 204 while (q > chain) { 204 while (q > chain) { 205 brelse(q->bh); 205 brelse(q->bh); 206 q--; 206 q--; 207 } 207 } 208 goto again; 208 goto again; 209 } 209 } 210 210 211 /* 211 /* 212 * Unpacking tails: we have a file with partia 212 * Unpacking tails: we have a file with partial final block and 213 * we had been asked to extend it. If the fra 213 * we had been asked to extend it. If the fragment being written 214 * is within the same block, we need to extend 214 * is within the same block, we need to extend the tail just to cover 215 * that fragment. Otherwise the tail is exten 215 * that fragment. Otherwise the tail is extended to full block. 216 * 216 * 217 * Note that we might need to create a _new_ t 217 * Note that we might need to create a _new_ tail, but that will 218 * be handled elsewhere; this is strictly for 218 * be handled elsewhere; this is strictly for resizing old 219 * ones. 219 * ones. 220 */ 220 */ 221 static bool 221 static bool 222 ufs_extend_tail(struct inode *inode, u64 write 222 ufs_extend_tail(struct inode *inode, u64 writes_to, 223 int *err, struct page *locke 223 int *err, struct page *locked_page) 224 { 224 { 225 struct ufs_inode_info *ufsi = UFS_I(in 225 struct ufs_inode_info *ufsi = UFS_I(inode); 226 struct super_block *sb = inode->i_sb; 226 struct super_block *sb = inode->i_sb; 227 struct ufs_sb_private_info *uspi = UFS 227 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 228 unsigned lastfrag = ufsi->i_lastfrag; 228 unsigned lastfrag = ufsi->i_lastfrag; /* it's a short file, so unsigned is enough */ 229 unsigned block = ufs_fragstoblks(lastf 229 unsigned block = ufs_fragstoblks(lastfrag); 230 unsigned new_size; 230 unsigned new_size; 231 void *p; 231 void *p; 232 u64 tmp; 232 u64 tmp; 233 233 234 if (writes_to < (lastfrag | uspi->s_fp 234 if (writes_to < (lastfrag | uspi->s_fpbmask)) 235 new_size = (writes_to & uspi-> 235 new_size = (writes_to & uspi->s_fpbmask) + 1; 236 else 236 else 237 new_size = uspi->s_fpb; 237 new_size = uspi->s_fpb; 238 238 239 p = ufs_get_direct_data_ptr(uspi, ufsi 239 p = ufs_get_direct_data_ptr(uspi, ufsi, block); 240 tmp = ufs_new_fragments(inode, p, last 240 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p), 241 new_size - (la 241 new_size - (lastfrag & uspi->s_fpbmask), err, 242 locked_page); 242 locked_page); 243 return tmp != 0; 243 return tmp != 0; 244 } 244 } 245 245 246 /** 246 /** 247 * ufs_inode_getfrag() - allocate new fragment 247 * ufs_inode_getfrag() - allocate new fragment(s) 248 * @inode: pointer to inode 248 * @inode: pointer to inode 249 * @index: number of block pointer within the 249 * @index: number of block pointer within the inode's array. 250 * @new_fragment: number of new allocated frag 250 * @new_fragment: number of new allocated fragment(s) 251 * @err: we set it if something wrong 251 * @err: we set it if something wrong 252 * @new: we set it if we allocate new block 252 * @new: we set it if we allocate new block 253 * @locked_page: for ufs_new_fragments() 253 * @locked_page: for ufs_new_fragments() 254 */ 254 */ 255 static u64 255 static u64 256 ufs_inode_getfrag(struct inode *inode, unsigne 256 ufs_inode_getfrag(struct inode *inode, unsigned index, 257 sector_t new_fragment, int * 257 sector_t new_fragment, int *err, 258 int *new, struct page *locke 258 int *new, struct page *locked_page) 259 { 259 { 260 struct ufs_inode_info *ufsi = UFS_I(in 260 struct ufs_inode_info *ufsi = UFS_I(inode); 261 struct super_block *sb = inode->i_sb; 261 struct super_block *sb = inode->i_sb; 262 struct ufs_sb_private_info *uspi = UFS 262 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 263 u64 tmp, goal, lastfrag; 263 u64 tmp, goal, lastfrag; 264 unsigned nfrags = uspi->s_fpb; 264 unsigned nfrags = uspi->s_fpb; 265 void *p; 265 void *p; 266 266 267 /* TODO : to be done for write support 267 /* TODO : to be done for write support 268 if ( (flags & UFS_TYPE_MASK) == UFS_TY 268 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 269 goto ufs2; 269 goto ufs2; 270 */ 270 */ 271 271 272 p = ufs_get_direct_data_ptr(uspi, ufsi 272 p = ufs_get_direct_data_ptr(uspi, ufsi, index); 273 tmp = ufs_data_ptr_to_cpu(sb, p); 273 tmp = ufs_data_ptr_to_cpu(sb, p); 274 if (tmp) 274 if (tmp) 275 goto out; 275 goto out; 276 276 277 lastfrag = ufsi->i_lastfrag; 277 lastfrag = ufsi->i_lastfrag; 278 278 279 /* will that be a new tail? */ 279 /* will that be a new tail? */ 280 if (new_fragment < UFS_NDIR_FRAGMENT & 280 if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag) 281 nfrags = (new_fragment & uspi- 281 nfrags = (new_fragment & uspi->s_fpbmask) + 1; 282 282 283 goal = 0; 283 goal = 0; 284 if (index) { 284 if (index) { 285 goal = ufs_data_ptr_to_cpu(sb, 285 goal = ufs_data_ptr_to_cpu(sb, 286 ufs_get_direc 286 ufs_get_direct_data_ptr(uspi, ufsi, index - 1)); 287 if (goal) 287 if (goal) 288 goal += uspi->s_fpb; 288 goal += uspi->s_fpb; 289 } 289 } 290 tmp = ufs_new_fragments(inode, p, ufs_ 290 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), 291 goal, nfrags, 291 goal, nfrags, err, locked_page); 292 292 293 if (!tmp) { 293 if (!tmp) { 294 *err = -ENOSPC; 294 *err = -ENOSPC; 295 return 0; 295 return 0; 296 } 296 } 297 297 298 if (new) 298 if (new) 299 *new = 1; 299 *new = 1; 300 inode_set_ctime_current(inode); 300 inode_set_ctime_current(inode); 301 if (IS_SYNC(inode)) 301 if (IS_SYNC(inode)) 302 ufs_sync_inode (inode); 302 ufs_sync_inode (inode); 303 mark_inode_dirty(inode); 303 mark_inode_dirty(inode); 304 out: 304 out: 305 return tmp + uspi->s_sbbase; 305 return tmp + uspi->s_sbbase; 306 306 307 /* This part : To be implemented .... 307 /* This part : To be implemented .... 308 Required only for writing, not require 308 Required only for writing, not required for READ-ONLY. 309 ufs2: 309 ufs2: 310 310 311 u2_block = ufs_fragstoblks(fragment); 311 u2_block = ufs_fragstoblks(fragment); 312 u2_blockoff = ufs_fragnum(fragment); 312 u2_blockoff = ufs_fragnum(fragment); 313 p = ufsi->i_u1.u2_i_data + block; 313 p = ufsi->i_u1.u2_i_data + block; 314 goal = 0; 314 goal = 0; 315 315 316 repeat2: 316 repeat2: 317 tmp = fs32_to_cpu(sb, *p); 317 tmp = fs32_to_cpu(sb, *p); 318 lastfrag = ufsi->i_lastfrag; 318 lastfrag = ufsi->i_lastfrag; 319 319 320 */ 320 */ 321 } 321 } 322 322 323 /** 323 /** 324 * ufs_inode_getblock() - allocate new block 324 * ufs_inode_getblock() - allocate new block 325 * @inode: pointer to inode 325 * @inode: pointer to inode 326 * @ind_block: block number of the indirect bl 326 * @ind_block: block number of the indirect block 327 * @index: number of pointer within the indire 327 * @index: number of pointer within the indirect block 328 * @new_fragment: number of new allocated frag 328 * @new_fragment: number of new allocated fragment 329 * (block will hold this fragment and also us 329 * (block will hold this fragment and also uspi->s_fpb-1) 330 * @err: see ufs_inode_getfrag() 330 * @err: see ufs_inode_getfrag() 331 * @new: see ufs_inode_getfrag() 331 * @new: see ufs_inode_getfrag() 332 * @locked_page: see ufs_inode_getfrag() 332 * @locked_page: see ufs_inode_getfrag() 333 */ 333 */ 334 static u64 334 static u64 335 ufs_inode_getblock(struct inode *inode, u64 in 335 ufs_inode_getblock(struct inode *inode, u64 ind_block, 336 unsigned index, sector_t new 336 unsigned index, sector_t new_fragment, int *err, 337 int *new, struct page *locke 337 int *new, struct page *locked_page) 338 { 338 { 339 struct super_block *sb = inode->i_sb; 339 struct super_block *sb = inode->i_sb; 340 struct ufs_sb_private_info *uspi = UFS 340 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 341 int shift = uspi->s_apbshift - uspi->s 341 int shift = uspi->s_apbshift - uspi->s_fpbshift; 342 u64 tmp = 0, goal; 342 u64 tmp = 0, goal; 343 struct buffer_head *bh; 343 struct buffer_head *bh; 344 void *p; 344 void *p; 345 345 346 if (!ind_block) 346 if (!ind_block) 347 return 0; 347 return 0; 348 348 349 bh = sb_bread(sb, ind_block + (index > 349 bh = sb_bread(sb, ind_block + (index >> shift)); 350 if (unlikely(!bh)) { 350 if (unlikely(!bh)) { 351 *err = -EIO; 351 *err = -EIO; 352 return 0; 352 return 0; 353 } 353 } 354 354 355 index &= uspi->s_apbmask >> uspi->s_fp 355 index &= uspi->s_apbmask >> uspi->s_fpbshift; 356 if (uspi->fs_magic == UFS2_MAGIC) 356 if (uspi->fs_magic == UFS2_MAGIC) 357 p = (__fs64 *)bh->b_data + ind 357 p = (__fs64 *)bh->b_data + index; 358 else 358 else 359 p = (__fs32 *)bh->b_data + ind 359 p = (__fs32 *)bh->b_data + index; 360 360 361 tmp = ufs_data_ptr_to_cpu(sb, p); 361 tmp = ufs_data_ptr_to_cpu(sb, p); 362 if (tmp) 362 if (tmp) 363 goto out; 363 goto out; 364 364 365 if (index && (uspi->fs_magic == UFS2_M 365 if (index && (uspi->fs_magic == UFS2_MAGIC ? 366 (tmp = fs64_to_cpu(sb, ( 366 (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) : 367 (tmp = fs32_to_cpu(sb, ( 367 (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1])))) 368 goal = tmp + uspi->s_fpb; 368 goal = tmp + uspi->s_fpb; 369 else 369 else 370 goal = bh->b_blocknr + uspi->s 370 goal = bh->b_blocknr + uspi->s_fpb; 371 tmp = ufs_new_fragments(inode, p, ufs_ 371 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal, 372 uspi->s_fpb, e 372 uspi->s_fpb, err, locked_page); 373 if (!tmp) 373 if (!tmp) 374 goto out; 374 goto out; 375 375 376 if (new) 376 if (new) 377 *new = 1; 377 *new = 1; 378 378 379 mark_buffer_dirty(bh); 379 mark_buffer_dirty(bh); 380 if (IS_SYNC(inode)) 380 if (IS_SYNC(inode)) 381 sync_dirty_buffer(bh); 381 sync_dirty_buffer(bh); 382 inode_set_ctime_current(inode); 382 inode_set_ctime_current(inode); 383 mark_inode_dirty(inode); 383 mark_inode_dirty(inode); 384 out: 384 out: 385 brelse (bh); 385 brelse (bh); 386 UFSD("EXIT\n"); 386 UFSD("EXIT\n"); 387 if (tmp) 387 if (tmp) 388 tmp += uspi->s_sbbase; 388 tmp += uspi->s_sbbase; 389 return tmp; 389 return tmp; 390 } 390 } 391 391 392 /** 392 /** 393 * ufs_getfrag_block() - `get_block_t' functio 393 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and 394 * read_folio, writepages and so on 394 * read_folio, writepages and so on 395 */ 395 */ 396 396 397 static int ufs_getfrag_block(struct inode *ino 397 static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) 398 { 398 { 399 struct super_block *sb = inode->i_sb; 399 struct super_block *sb = inode->i_sb; 400 struct ufs_sb_private_info *uspi = UFS 400 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 401 int err = 0, new = 0; 401 int err = 0, new = 0; 402 unsigned offsets[4]; 402 unsigned offsets[4]; 403 int depth = ufs_block_to_path(inode, f 403 int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets); 404 u64 phys64 = 0; 404 u64 phys64 = 0; 405 unsigned frag = fragment & uspi->s_fpb 405 unsigned frag = fragment & uspi->s_fpbmask; 406 406 407 phys64 = ufs_frag_map(inode, offsets, 407 phys64 = ufs_frag_map(inode, offsets, depth); 408 if (!create) 408 if (!create) 409 goto done; 409 goto done; 410 410 411 if (phys64) { 411 if (phys64) { 412 if (fragment >= UFS_NDIR_FRAGM 412 if (fragment >= UFS_NDIR_FRAGMENT) 413 goto done; 413 goto done; 414 read_seqlock_excl(&UFS_I(inode 414 read_seqlock_excl(&UFS_I(inode)->meta_lock); 415 if (fragment < UFS_I(inode)->i 415 if (fragment < UFS_I(inode)->i_lastfrag) { 416 read_sequnlock_excl(&U 416 read_sequnlock_excl(&UFS_I(inode)->meta_lock); 417 goto done; 417 goto done; 418 } 418 } 419 read_sequnlock_excl(&UFS_I(ino 419 read_sequnlock_excl(&UFS_I(inode)->meta_lock); 420 } 420 } 421 /* This code entered only while writin 421 /* This code entered only while writing ....? */ 422 422 423 mutex_lock(&UFS_I(inode)->truncate_mut 423 mutex_lock(&UFS_I(inode)->truncate_mutex); 424 424 425 UFSD("ENTER, ino %lu, fragment %llu\n" 425 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment); 426 if (unlikely(!depth)) { 426 if (unlikely(!depth)) { 427 ufs_warning(sb, "ufs_get_block 427 ufs_warning(sb, "ufs_get_block", "block > big"); 428 err = -EIO; 428 err = -EIO; 429 goto out; 429 goto out; 430 } 430 } 431 431 432 if (UFS_I(inode)->i_lastfrag < UFS_NDI 432 if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) { 433 unsigned lastfrag = UFS_I(inod 433 unsigned lastfrag = UFS_I(inode)->i_lastfrag; 434 unsigned tailfrags = lastfrag 434 unsigned tailfrags = lastfrag & uspi->s_fpbmask; 435 if (tailfrags && fragment >= l 435 if (tailfrags && fragment >= lastfrag) { 436 if (!ufs_extend_tail(i 436 if (!ufs_extend_tail(inode, fragment, 437 & 437 &err, bh_result->b_page)) 438 goto out; 438 goto out; 439 } 439 } 440 } 440 } 441 441 442 if (depth == 1) { 442 if (depth == 1) { 443 phys64 = ufs_inode_getfrag(ino 443 phys64 = ufs_inode_getfrag(inode, offsets[0], fragment, 444 &er 444 &err, &new, bh_result->b_page); 445 } else { 445 } else { 446 int i; 446 int i; 447 phys64 = ufs_inode_getfrag(ino 447 phys64 = ufs_inode_getfrag(inode, offsets[0], fragment, 448 &er 448 &err, NULL, NULL); 449 for (i = 1; i < depth - 1; i++ 449 for (i = 1; i < depth - 1; i++) 450 phys64 = ufs_inode_get 450 phys64 = ufs_inode_getblock(inode, phys64, offsets[i], 451 451 fragment, &err, NULL, NULL); 452 phys64 = ufs_inode_getblock(in 452 phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1], 453 fragme 453 fragment, &err, &new, bh_result->b_page); 454 } 454 } 455 out: 455 out: 456 if (phys64) { 456 if (phys64) { 457 phys64 += frag; 457 phys64 += frag; 458 map_bh(bh_result, sb, phys64); 458 map_bh(bh_result, sb, phys64); 459 if (new) 459 if (new) 460 set_buffer_new(bh_resu 460 set_buffer_new(bh_result); 461 } 461 } 462 mutex_unlock(&UFS_I(inode)->truncate_m 462 mutex_unlock(&UFS_I(inode)->truncate_mutex); 463 return err; 463 return err; 464 464 465 done: 465 done: 466 if (phys64) 466 if (phys64) 467 map_bh(bh_result, sb, phys64 + 467 map_bh(bh_result, sb, phys64 + frag); 468 return 0; 468 return 0; 469 } 469 } 470 470 471 static int ufs_writepages(struct address_space 471 static int ufs_writepages(struct address_space *mapping, 472 struct writeback_control *wbc) 472 struct writeback_control *wbc) 473 { 473 { 474 return mpage_writepages(mapping, wbc, 474 return mpage_writepages(mapping, wbc, ufs_getfrag_block); 475 } 475 } 476 476 477 static int ufs_read_folio(struct file *file, s 477 static int ufs_read_folio(struct file *file, struct folio *folio) 478 { 478 { 479 return block_read_full_folio(folio, uf 479 return block_read_full_folio(folio, ufs_getfrag_block); 480 } 480 } 481 481 482 int ufs_prepare_chunk(struct page *page, loff_ 482 int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len) 483 { 483 { 484 return __block_write_begin(page, pos, 484 return __block_write_begin(page, pos, len, ufs_getfrag_block); 485 } 485 } 486 486 487 static void ufs_truncate_blocks(struct inode * 487 static void ufs_truncate_blocks(struct inode *); 488 488 489 static void ufs_write_failed(struct address_sp 489 static void ufs_write_failed(struct address_space *mapping, loff_t to) 490 { 490 { 491 struct inode *inode = mapping->host; 491 struct inode *inode = mapping->host; 492 492 493 if (to > inode->i_size) { 493 if (to > inode->i_size) { 494 truncate_pagecache(inode, inod 494 truncate_pagecache(inode, inode->i_size); 495 ufs_truncate_blocks(inode); 495 ufs_truncate_blocks(inode); 496 } 496 } 497 } 497 } 498 498 499 static int ufs_write_begin(struct file *file, 499 static int ufs_write_begin(struct file *file, struct address_space *mapping, 500 loff_t pos, unsigned l 500 loff_t pos, unsigned len, 501 struct page **pagep, v 501 struct page **pagep, void **fsdata) 502 { 502 { 503 int ret; 503 int ret; 504 504 505 ret = block_write_begin(mapping, pos, 505 ret = block_write_begin(mapping, pos, len, pagep, ufs_getfrag_block); 506 if (unlikely(ret)) 506 if (unlikely(ret)) 507 ufs_write_failed(mapping, pos 507 ufs_write_failed(mapping, pos + len); 508 508 509 return ret; 509 return ret; 510 } 510 } 511 511 512 static int ufs_write_end(struct file *file, st 512 static int ufs_write_end(struct file *file, struct address_space *mapping, 513 loff_t pos, unsigned l 513 loff_t pos, unsigned len, unsigned copied, 514 struct page *page, voi 514 struct page *page, void *fsdata) 515 { 515 { 516 int ret; 516 int ret; 517 517 518 ret = generic_write_end(file, mapping, 518 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 519 if (ret < len) 519 if (ret < len) 520 ufs_write_failed(mapping, pos 520 ufs_write_failed(mapping, pos + len); 521 return ret; 521 return ret; 522 } 522 } 523 523 524 static sector_t ufs_bmap(struct address_space 524 static sector_t ufs_bmap(struct address_space *mapping, sector_t block) 525 { 525 { 526 return generic_block_bmap(mapping,bloc 526 return generic_block_bmap(mapping,block,ufs_getfrag_block); 527 } 527 } 528 528 529 const struct address_space_operations ufs_aops 529 const struct address_space_operations ufs_aops = { 530 .dirty_folio = block_dirty_folio, 530 .dirty_folio = block_dirty_folio, 531 .invalidate_folio = block_invalidate_f 531 .invalidate_folio = block_invalidate_folio, 532 .read_folio = ufs_read_folio, 532 .read_folio = ufs_read_folio, 533 .writepages = ufs_writepages, 533 .writepages = ufs_writepages, 534 .write_begin = ufs_write_begin, 534 .write_begin = ufs_write_begin, 535 .write_end = ufs_write_end, 535 .write_end = ufs_write_end, 536 .migrate_folio = buffer_migrate_folio, 536 .migrate_folio = buffer_migrate_folio, 537 .bmap = ufs_bmap 537 .bmap = ufs_bmap 538 }; 538 }; 539 539 540 static void ufs_set_inode_ops(struct inode *in 540 static void ufs_set_inode_ops(struct inode *inode) 541 { 541 { 542 if (S_ISREG(inode->i_mode)) { 542 if (S_ISREG(inode->i_mode)) { 543 inode->i_op = &ufs_file_inode_ 543 inode->i_op = &ufs_file_inode_operations; 544 inode->i_fop = &ufs_file_opera 544 inode->i_fop = &ufs_file_operations; 545 inode->i_mapping->a_ops = &ufs 545 inode->i_mapping->a_ops = &ufs_aops; 546 } else if (S_ISDIR(inode->i_mode)) { 546 } else if (S_ISDIR(inode->i_mode)) { 547 inode->i_op = &ufs_dir_inode_o 547 inode->i_op = &ufs_dir_inode_operations; 548 inode->i_fop = &ufs_dir_operat 548 inode->i_fop = &ufs_dir_operations; 549 inode->i_mapping->a_ops = &ufs 549 inode->i_mapping->a_ops = &ufs_aops; 550 } else if (S_ISLNK(inode->i_mode)) { 550 } else if (S_ISLNK(inode->i_mode)) { 551 if (!inode->i_blocks) { 551 if (!inode->i_blocks) { 552 inode->i_link = (char 552 inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink; 553 inode->i_op = &simple_ 553 inode->i_op = &simple_symlink_inode_operations; 554 } else { 554 } else { 555 inode->i_mapping->a_op 555 inode->i_mapping->a_ops = &ufs_aops; 556 inode->i_op = &page_sy 556 inode->i_op = &page_symlink_inode_operations; 557 inode_nohighmem(inode) 557 inode_nohighmem(inode); 558 } 558 } 559 } else 559 } else 560 init_special_inode(inode, inod 560 init_special_inode(inode, inode->i_mode, 561 ufs_get_ino 561 ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); 562 } 562 } 563 563 564 static int ufs1_read_inode(struct inode *inode 564 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) 565 { 565 { 566 struct ufs_inode_info *ufsi = UFS_I(in 566 struct ufs_inode_info *ufsi = UFS_I(inode); 567 struct super_block *sb = inode->i_sb; 567 struct super_block *sb = inode->i_sb; 568 umode_t mode; 568 umode_t mode; 569 569 570 /* 570 /* 571 * Copy data to the in-core inode. 571 * Copy data to the in-core inode. 572 */ 572 */ 573 inode->i_mode = mode = fs16_to_cpu(sb, 573 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); 574 set_nlink(inode, fs16_to_cpu(sb, ufs_i 574 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); 575 if (inode->i_nlink == 0) 575 if (inode->i_nlink == 0) 576 return -ESTALE; 576 return -ESTALE; 577 577 578 /* 578 /* 579 * Linux now has 32-bit uid and gid, s 579 * Linux now has 32-bit uid and gid, so we can support EFT. 580 */ 580 */ 581 i_uid_write(inode, ufs_get_inode_uid(s 581 i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode)); 582 i_gid_write(inode, ufs_get_inode_gid(s 582 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); 583 583 584 inode->i_size = fs64_to_cpu(sb, ufs_in 584 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); 585 inode_set_atime(inode, 585 inode_set_atime(inode, 586 (signed)fs32_to_cpu(sb 586 (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec), 587 0); 587 0); 588 inode_set_ctime(inode, 588 inode_set_ctime(inode, 589 (signed)fs32_to_cpu(sb 589 (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec), 590 0); 590 0); 591 inode_set_mtime(inode, 591 inode_set_mtime(inode, 592 (signed)fs32_to_cpu(sb 592 (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec), 593 0); 593 0); 594 inode->i_blocks = fs32_to_cpu(sb, ufs_ 594 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); 595 inode->i_generation = fs32_to_cpu(sb, 595 inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen); 596 ufsi->i_flags = fs32_to_cpu(sb, ufs_in 596 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); 597 ufsi->i_shadow = fs32_to_cpu(sb, ufs_i 597 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 598 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs 598 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 599 599 600 600 601 if (S_ISCHR(mode) || S_ISBLK(mode) || 601 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 602 memcpy(ufsi->i_u1.i_data, &ufs 602 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr, 603 sizeof(ufs_inode->ui_u2 603 sizeof(ufs_inode->ui_u2.ui_addr)); 604 } else { 604 } else { 605 memcpy(ufsi->i_u1.i_symlink, u 605 memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink, 606 sizeof(ufs_inode->ui_u2 606 sizeof(ufs_inode->ui_u2.ui_symlink) - 1); 607 ufsi->i_u1.i_symlink[sizeof(uf 607 ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0; 608 } 608 } 609 return 0; 609 return 0; 610 } 610 } 611 611 612 static int ufs2_read_inode(struct inode *inode 612 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) 613 { 613 { 614 struct ufs_inode_info *ufsi = UFS_I(in 614 struct ufs_inode_info *ufsi = UFS_I(inode); 615 struct super_block *sb = inode->i_sb; 615 struct super_block *sb = inode->i_sb; 616 umode_t mode; 616 umode_t mode; 617 617 618 UFSD("Reading ufs2 inode, ino %lu\n", 618 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); 619 /* 619 /* 620 * Copy data to the in-core inode. 620 * Copy data to the in-core inode. 621 */ 621 */ 622 inode->i_mode = mode = fs16_to_cpu(sb, 622 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); 623 set_nlink(inode, fs16_to_cpu(sb, ufs2_ 623 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); 624 if (inode->i_nlink == 0) 624 if (inode->i_nlink == 0) 625 return -ESTALE; 625 return -ESTALE; 626 626 627 /* 627 /* 628 * Linux now has 32-bit uid and gid, s 628 * Linux now has 32-bit uid and gid, so we can support EFT. 629 */ 629 */ 630 i_uid_write(inode, fs32_to_cpu(sb, ufs 630 i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid)); 631 i_gid_write(inode, fs32_to_cpu(sb, ufs 631 i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid)); 632 632 633 inode->i_size = fs64_to_cpu(sb, ufs2_i 633 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); 634 inode_set_atime(inode, fs64_to_cpu(sb, 634 inode_set_atime(inode, fs64_to_cpu(sb, ufs2_inode->ui_atime), 635 fs32_to_cpu(sb, ufs2_i 635 fs32_to_cpu(sb, ufs2_inode->ui_atimensec)); 636 inode_set_ctime(inode, fs64_to_cpu(sb, 636 inode_set_ctime(inode, fs64_to_cpu(sb, ufs2_inode->ui_ctime), 637 fs32_to_cpu(sb, ufs2_i 637 fs32_to_cpu(sb, ufs2_inode->ui_ctimensec)); 638 inode_set_mtime(inode, fs64_to_cpu(sb, 638 inode_set_mtime(inode, fs64_to_cpu(sb, ufs2_inode->ui_mtime), 639 fs32_to_cpu(sb, ufs2_i 639 fs32_to_cpu(sb, ufs2_inode->ui_mtimensec)); 640 inode->i_blocks = fs64_to_cpu(sb, ufs2 640 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); 641 inode->i_generation = fs32_to_cpu(sb, 641 inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen); 642 ufsi->i_flags = fs32_to_cpu(sb, ufs2_i 642 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); 643 /* 643 /* 644 ufsi->i_shadow = fs32_to_cpu(sb, ufs_i 644 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 645 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs 645 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 646 */ 646 */ 647 647 648 if (S_ISCHR(mode) || S_ISBLK(mode) || 648 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 649 memcpy(ufsi->i_u1.u2_i_data, & 649 memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr, 650 sizeof(ufs2_inode->ui_u 650 sizeof(ufs2_inode->ui_u2.ui_addr)); 651 } else { 651 } else { 652 memcpy(ufsi->i_u1.i_symlink, u 652 memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink, 653 sizeof(ufs2_inode->ui_u 653 sizeof(ufs2_inode->ui_u2.ui_symlink) - 1); 654 ufsi->i_u1.i_symlink[sizeof(uf 654 ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0; 655 } 655 } 656 return 0; 656 return 0; 657 } 657 } 658 658 659 struct inode *ufs_iget(struct super_block *sb, 659 struct inode *ufs_iget(struct super_block *sb, unsigned long ino) 660 { 660 { 661 struct ufs_inode_info *ufsi; 661 struct ufs_inode_info *ufsi; 662 struct ufs_sb_private_info *uspi = UFS 662 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 663 struct buffer_head * bh; 663 struct buffer_head * bh; 664 struct inode *inode; 664 struct inode *inode; 665 int err = -EIO; 665 int err = -EIO; 666 666 667 UFSD("ENTER, ino %lu\n", ino); 667 UFSD("ENTER, ino %lu\n", ino); 668 668 669 if (ino < UFS_ROOTINO || ino > (uspi-> 669 if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) { 670 ufs_warning(sb, "ufs_read_inod 670 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n", 671 ino); 671 ino); 672 return ERR_PTR(-EIO); 672 return ERR_PTR(-EIO); 673 } 673 } 674 674 675 inode = iget_locked(sb, ino); 675 inode = iget_locked(sb, ino); 676 if (!inode) 676 if (!inode) 677 return ERR_PTR(-ENOMEM); 677 return ERR_PTR(-ENOMEM); 678 if (!(inode->i_state & I_NEW)) 678 if (!(inode->i_state & I_NEW)) 679 return inode; 679 return inode; 680 680 681 ufsi = UFS_I(inode); 681 ufsi = UFS_I(inode); 682 682 683 bh = sb_bread(sb, uspi->s_sbbase + ufs 683 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); 684 if (!bh) { 684 if (!bh) { 685 ufs_warning(sb, "ufs_read_inod 685 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", 686 inode->i_ino); 686 inode->i_ino); 687 goto bad_inode; 687 goto bad_inode; 688 } 688 } 689 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MA 689 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 690 struct ufs2_inode *ufs2_inode 690 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; 691 691 692 err = ufs2_read_inode(inode, 692 err = ufs2_read_inode(inode, 693 ufs2_ino 693 ufs2_inode + ufs_inotofsbo(inode->i_ino)); 694 } else { 694 } else { 695 struct ufs_inode *ufs_inode = 695 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data; 696 696 697 err = ufs1_read_inode(inode, 697 err = ufs1_read_inode(inode, 698 ufs_inod 698 ufs_inode + ufs_inotofsbo(inode->i_ino)); 699 } 699 } 700 brelse(bh); 700 brelse(bh); 701 if (err) 701 if (err) 702 goto bad_inode; 702 goto bad_inode; 703 703 704 inode_inc_iversion(inode); 704 inode_inc_iversion(inode); 705 ufsi->i_lastfrag = 705 ufsi->i_lastfrag = 706 (inode->i_size + uspi->s_fsize 706 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; 707 ufsi->i_dir_start_lookup = 0; 707 ufsi->i_dir_start_lookup = 0; 708 ufsi->i_osync = 0; 708 ufsi->i_osync = 0; 709 709 710 ufs_set_inode_ops(inode); 710 ufs_set_inode_ops(inode); 711 711 712 UFSD("EXIT\n"); 712 UFSD("EXIT\n"); 713 unlock_new_inode(inode); 713 unlock_new_inode(inode); 714 return inode; 714 return inode; 715 715 716 bad_inode: 716 bad_inode: 717 iget_failed(inode); 717 iget_failed(inode); 718 return ERR_PTR(err); 718 return ERR_PTR(err); 719 } 719 } 720 720 721 static void ufs1_update_inode(struct inode *in 721 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) 722 { 722 { 723 struct super_block *sb = inode->i_sb; 723 struct super_block *sb = inode->i_sb; 724 struct ufs_inode_info *ufsi = UFS_I(in 724 struct ufs_inode_info *ufsi = UFS_I(inode); 725 725 726 ufs_inode->ui_mode = cpu_to_fs16(sb, i 726 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 727 ufs_inode->ui_nlink = cpu_to_fs16(sb, 727 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 728 728 729 ufs_set_inode_uid(sb, ufs_inode, i_uid 729 ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode)); 730 ufs_set_inode_gid(sb, ufs_inode, i_gid 730 ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode)); 731 731 732 ufs_inode->ui_size = cpu_to_fs64(sb, i 732 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 733 ufs_inode->ui_atime.tv_sec = cpu_to_fs 733 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, 734 734 inode_get_atime_sec(inode)); 735 ufs_inode->ui_atime.tv_usec = 0; 735 ufs_inode->ui_atime.tv_usec = 0; 736 ufs_inode->ui_ctime.tv_sec = cpu_to_fs 736 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, 737 737 inode_get_ctime_sec(inode)); 738 ufs_inode->ui_ctime.tv_usec = 0; 738 ufs_inode->ui_ctime.tv_usec = 0; 739 ufs_inode->ui_mtime.tv_sec = cpu_to_fs 739 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, 740 740 inode_get_mtime_sec(inode)); 741 ufs_inode->ui_mtime.tv_usec = 0; 741 ufs_inode->ui_mtime.tv_usec = 0; 742 ufs_inode->ui_blocks = cpu_to_fs32(sb, 742 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks); 743 ufs_inode->ui_flags = cpu_to_fs32(sb, 743 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); 744 ufs_inode->ui_gen = cpu_to_fs32(sb, in 744 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); 745 745 746 if ((UFS_SB(sb)->s_flags & UFS_UID_MAS 746 if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) { 747 ufs_inode->ui_u3.ui_sun.ui_sha 747 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow); 748 ufs_inode->ui_u3.ui_sun.ui_oef 748 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag); 749 } 749 } 750 750 751 if (S_ISCHR(inode->i_mode) || S_ISBLK( 751 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 752 /* ufs_inode->ui_u2.ui_addr.ui 752 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 753 ufs_inode->ui_u2.ui_addr.ui_db 753 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; 754 } else if (inode->i_blocks) { 754 } else if (inode->i_blocks) { 755 memcpy(&ufs_inode->ui_u2.ui_ad 755 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data, 756 sizeof(ufs_inode->ui_u2 756 sizeof(ufs_inode->ui_u2.ui_addr)); 757 } 757 } 758 else { 758 else { 759 memcpy(&ufs_inode->ui_u2.ui_sy 759 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, 760 sizeof(ufs_inode->ui_u2 760 sizeof(ufs_inode->ui_u2.ui_symlink)); 761 } 761 } 762 762 763 if (!inode->i_nlink) 763 if (!inode->i_nlink) 764 memset (ufs_inode, 0, sizeof(s 764 memset (ufs_inode, 0, sizeof(struct ufs_inode)); 765 } 765 } 766 766 767 static void ufs2_update_inode(struct inode *in 767 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode) 768 { 768 { 769 struct super_block *sb = inode->i_sb; 769 struct super_block *sb = inode->i_sb; 770 struct ufs_inode_info *ufsi = UFS_I(in 770 struct ufs_inode_info *ufsi = UFS_I(inode); 771 771 772 UFSD("ENTER\n"); 772 UFSD("ENTER\n"); 773 ufs_inode->ui_mode = cpu_to_fs16(sb, i 773 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 774 ufs_inode->ui_nlink = cpu_to_fs16(sb, 774 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 775 775 776 ufs_inode->ui_uid = cpu_to_fs32(sb, i_ 776 ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode)); 777 ufs_inode->ui_gid = cpu_to_fs32(sb, i_ 777 ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode)); 778 778 779 ufs_inode->ui_size = cpu_to_fs64(sb, i 779 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 780 ufs_inode->ui_atime = cpu_to_fs64(sb, 780 ufs_inode->ui_atime = cpu_to_fs64(sb, inode_get_atime_sec(inode)); 781 ufs_inode->ui_atimensec = cpu_to_fs32( 781 ufs_inode->ui_atimensec = cpu_to_fs32(sb, 782 782 inode_get_atime_nsec(inode)); 783 ufs_inode->ui_ctime = cpu_to_fs64(sb, 783 ufs_inode->ui_ctime = cpu_to_fs64(sb, inode_get_ctime_sec(inode)); 784 ufs_inode->ui_ctimensec = cpu_to_fs32( 784 ufs_inode->ui_ctimensec = cpu_to_fs32(sb, 785 785 inode_get_ctime_nsec(inode)); 786 ufs_inode->ui_mtime = cpu_to_fs64(sb, 786 ufs_inode->ui_mtime = cpu_to_fs64(sb, inode_get_mtime_sec(inode)); 787 ufs_inode->ui_mtimensec = cpu_to_fs32( 787 ufs_inode->ui_mtimensec = cpu_to_fs32(sb, 788 788 inode_get_mtime_nsec(inode)); 789 789 790 ufs_inode->ui_blocks = cpu_to_fs64(sb, 790 ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks); 791 ufs_inode->ui_flags = cpu_to_fs32(sb, 791 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); 792 ufs_inode->ui_gen = cpu_to_fs32(sb, in 792 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); 793 793 794 if (S_ISCHR(inode->i_mode) || S_ISBLK( 794 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 795 /* ufs_inode->ui_u2.ui_addr.ui 795 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 796 ufs_inode->ui_u2.ui_addr.ui_db 796 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0]; 797 } else if (inode->i_blocks) { 797 } else if (inode->i_blocks) { 798 memcpy(&ufs_inode->ui_u2.ui_ad 798 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data, 799 sizeof(ufs_inode->ui_u2 799 sizeof(ufs_inode->ui_u2.ui_addr)); 800 } else { 800 } else { 801 memcpy(&ufs_inode->ui_u2.ui_sy 801 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, 802 sizeof(ufs_inode->ui_u2 802 sizeof(ufs_inode->ui_u2.ui_symlink)); 803 } 803 } 804 804 805 if (!inode->i_nlink) 805 if (!inode->i_nlink) 806 memset (ufs_inode, 0, sizeof(s 806 memset (ufs_inode, 0, sizeof(struct ufs2_inode)); 807 UFSD("EXIT\n"); 807 UFSD("EXIT\n"); 808 } 808 } 809 809 810 static int ufs_update_inode(struct inode * ino 810 static int ufs_update_inode(struct inode * inode, int do_sync) 811 { 811 { 812 struct super_block *sb = inode->i_sb; 812 struct super_block *sb = inode->i_sb; 813 struct ufs_sb_private_info *uspi = UFS 813 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 814 struct buffer_head * bh; 814 struct buffer_head * bh; 815 815 816 UFSD("ENTER, ino %lu\n", inode->i_ino) 816 UFSD("ENTER, ino %lu\n", inode->i_ino); 817 817 818 if (inode->i_ino < UFS_ROOTINO || 818 if (inode->i_ino < UFS_ROOTINO || 819 inode->i_ino > (uspi->s_ncg * uspi 819 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { 820 ufs_warning (sb, "ufs_read_ino 820 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); 821 return -1; 821 return -1; 822 } 822 } 823 823 824 bh = sb_bread(sb, ufs_inotofsba(inode- 824 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino)); 825 if (!bh) { 825 if (!bh) { 826 ufs_warning (sb, "ufs_read_ino 826 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); 827 return -1; 827 return -1; 828 } 828 } 829 if (uspi->fs_magic == UFS2_MAGIC) { 829 if (uspi->fs_magic == UFS2_MAGIC) { 830 struct ufs2_inode *ufs2_inode 830 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; 831 831 832 ufs2_update_inode(inode, 832 ufs2_update_inode(inode, 833 ufs2_inode + 833 ufs2_inode + ufs_inotofsbo(inode->i_ino)); 834 } else { 834 } else { 835 struct ufs_inode *ufs_inode = 835 struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data; 836 836 837 ufs1_update_inode(inode, ufs_i 837 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); 838 } 838 } 839 839 840 mark_buffer_dirty(bh); 840 mark_buffer_dirty(bh); 841 if (do_sync) 841 if (do_sync) 842 sync_dirty_buffer(bh); 842 sync_dirty_buffer(bh); 843 brelse (bh); 843 brelse (bh); 844 844 845 UFSD("EXIT\n"); 845 UFSD("EXIT\n"); 846 return 0; 846 return 0; 847 } 847 } 848 848 849 int ufs_write_inode(struct inode *inode, struc 849 int ufs_write_inode(struct inode *inode, struct writeback_control *wbc) 850 { 850 { 851 return ufs_update_inode(inode, wbc->sy 851 return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); 852 } 852 } 853 853 854 int ufs_sync_inode (struct inode *inode) 854 int ufs_sync_inode (struct inode *inode) 855 { 855 { 856 return ufs_update_inode (inode, 1); 856 return ufs_update_inode (inode, 1); 857 } 857 } 858 858 859 void ufs_evict_inode(struct inode * inode) 859 void ufs_evict_inode(struct inode * inode) 860 { 860 { 861 int want_delete = 0; 861 int want_delete = 0; 862 862 863 if (!inode->i_nlink && !is_bad_inode(i 863 if (!inode->i_nlink && !is_bad_inode(inode)) 864 want_delete = 1; 864 want_delete = 1; 865 865 866 truncate_inode_pages_final(&inode->i_d 866 truncate_inode_pages_final(&inode->i_data); 867 if (want_delete) { 867 if (want_delete) { 868 inode->i_size = 0; 868 inode->i_size = 0; 869 if (inode->i_blocks && 869 if (inode->i_blocks && 870 (S_ISREG(inode->i_mode) || 870 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 871 S_ISLNK(inode->i_mode))) 871 S_ISLNK(inode->i_mode))) 872 ufs_truncate_blocks(in 872 ufs_truncate_blocks(inode); 873 ufs_update_inode(inode, inode_ 873 ufs_update_inode(inode, inode_needs_sync(inode)); 874 } 874 } 875 875 876 invalidate_inode_buffers(inode); 876 invalidate_inode_buffers(inode); 877 clear_inode(inode); 877 clear_inode(inode); 878 878 879 if (want_delete) 879 if (want_delete) 880 ufs_free_inode(inode); 880 ufs_free_inode(inode); 881 } 881 } 882 882 883 struct to_free { 883 struct to_free { 884 struct inode *inode; 884 struct inode *inode; 885 u64 to; 885 u64 to; 886 unsigned count; 886 unsigned count; 887 }; 887 }; 888 888 889 static inline void free_data(struct to_free *c 889 static inline void free_data(struct to_free *ctx, u64 from, unsigned count) 890 { 890 { 891 if (ctx->count && ctx->to != from) { 891 if (ctx->count && ctx->to != from) { 892 ufs_free_blocks(ctx->inode, ct 892 ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count); 893 ctx->count = 0; 893 ctx->count = 0; 894 } 894 } 895 ctx->count += count; 895 ctx->count += count; 896 ctx->to = from + count; 896 ctx->to = from + count; 897 } 897 } 898 898 899 #define DIRECT_FRAGMENT ((inode->i_size + uspi 899 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) 900 900 901 static void ufs_trunc_direct(struct inode *ino 901 static void ufs_trunc_direct(struct inode *inode) 902 { 902 { 903 struct ufs_inode_info *ufsi = UFS_I(in 903 struct ufs_inode_info *ufsi = UFS_I(inode); 904 struct super_block * sb; 904 struct super_block * sb; 905 struct ufs_sb_private_info * uspi; 905 struct ufs_sb_private_info * uspi; 906 void *p; 906 void *p; 907 u64 frag1, frag2, frag3, frag4, block1 907 u64 frag1, frag2, frag3, frag4, block1, block2; 908 struct to_free ctx = {.inode = inode}; 908 struct to_free ctx = {.inode = inode}; 909 unsigned i, tmp; 909 unsigned i, tmp; 910 910 911 UFSD("ENTER: ino %lu\n", inode->i_ino) 911 UFSD("ENTER: ino %lu\n", inode->i_ino); 912 912 913 sb = inode->i_sb; 913 sb = inode->i_sb; 914 uspi = UFS_SB(sb)->s_uspi; 914 uspi = UFS_SB(sb)->s_uspi; 915 915 916 frag1 = DIRECT_FRAGMENT; 916 frag1 = DIRECT_FRAGMENT; 917 frag4 = min_t(u64, UFS_NDIR_FRAGMENT, 917 frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag); 918 frag2 = ((frag1 & uspi->s_fpbmask) ? ( 918 frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1); 919 frag3 = frag4 & ~uspi->s_fpbmask; 919 frag3 = frag4 & ~uspi->s_fpbmask; 920 block1 = block2 = 0; 920 block1 = block2 = 0; 921 if (frag2 > frag3) { 921 if (frag2 > frag3) { 922 frag2 = frag4; 922 frag2 = frag4; 923 frag3 = frag4 = 0; 923 frag3 = frag4 = 0; 924 } else if (frag2 < frag3) { 924 } else if (frag2 < frag3) { 925 block1 = ufs_fragstoblks (frag 925 block1 = ufs_fragstoblks (frag2); 926 block2 = ufs_fragstoblks (frag 926 block2 = ufs_fragstoblks (frag3); 927 } 927 } 928 928 929 UFSD("ino %lu, frag1 %llu, frag2 %llu, 929 UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu," 930 " frag3 %llu, frag4 %llu\n", inod 930 " frag3 %llu, frag4 %llu\n", inode->i_ino, 931 (unsigned long long)frag1, (unsig 931 (unsigned long long)frag1, (unsigned long long)frag2, 932 (unsigned long long)block1, (unsi 932 (unsigned long long)block1, (unsigned long long)block2, 933 (unsigned long long)frag3, (unsig 933 (unsigned long long)frag3, (unsigned long long)frag4); 934 934 935 if (frag1 >= frag2) 935 if (frag1 >= frag2) 936 goto next1; 936 goto next1; 937 937 938 /* 938 /* 939 * Free first free fragments 939 * Free first free fragments 940 */ 940 */ 941 p = ufs_get_direct_data_ptr(uspi, ufsi 941 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1)); 942 tmp = ufs_data_ptr_to_cpu(sb, p); 942 tmp = ufs_data_ptr_to_cpu(sb, p); 943 if (!tmp ) 943 if (!tmp ) 944 ufs_panic (sb, "ufs_trunc_dire 944 ufs_panic (sb, "ufs_trunc_direct", "internal error"); 945 frag2 -= frag1; 945 frag2 -= frag1; 946 frag1 = ufs_fragnum (frag1); 946 frag1 = ufs_fragnum (frag1); 947 947 948 ufs_free_fragments(inode, tmp + frag1, 948 ufs_free_fragments(inode, tmp + frag1, frag2); 949 949 950 next1: 950 next1: 951 /* 951 /* 952 * Free whole blocks 952 * Free whole blocks 953 */ 953 */ 954 for (i = block1 ; i < block2; i++) { 954 for (i = block1 ; i < block2; i++) { 955 p = ufs_get_direct_data_ptr(us 955 p = ufs_get_direct_data_ptr(uspi, ufsi, i); 956 tmp = ufs_data_ptr_to_cpu(sb, 956 tmp = ufs_data_ptr_to_cpu(sb, p); 957 if (!tmp) 957 if (!tmp) 958 continue; 958 continue; 959 write_seqlock(&ufsi->meta_lock 959 write_seqlock(&ufsi->meta_lock); 960 ufs_data_ptr_clear(uspi, p); 960 ufs_data_ptr_clear(uspi, p); 961 write_sequnlock(&ufsi->meta_lo 961 write_sequnlock(&ufsi->meta_lock); 962 962 963 free_data(&ctx, tmp, uspi->s_f 963 free_data(&ctx, tmp, uspi->s_fpb); 964 } 964 } 965 965 966 free_data(&ctx, 0, 0); 966 free_data(&ctx, 0, 0); 967 967 968 if (frag3 >= frag4) 968 if (frag3 >= frag4) 969 goto next3; 969 goto next3; 970 970 971 /* 971 /* 972 * Free last free fragments 972 * Free last free fragments 973 */ 973 */ 974 p = ufs_get_direct_data_ptr(uspi, ufsi 974 p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3)); 975 tmp = ufs_data_ptr_to_cpu(sb, p); 975 tmp = ufs_data_ptr_to_cpu(sb, p); 976 if (!tmp ) 976 if (!tmp ) 977 ufs_panic(sb, "ufs_truncate_di 977 ufs_panic(sb, "ufs_truncate_direct", "internal error"); 978 frag4 = ufs_fragnum (frag4); 978 frag4 = ufs_fragnum (frag4); 979 write_seqlock(&ufsi->meta_lock); 979 write_seqlock(&ufsi->meta_lock); 980 ufs_data_ptr_clear(uspi, p); 980 ufs_data_ptr_clear(uspi, p); 981 write_sequnlock(&ufsi->meta_lock); 981 write_sequnlock(&ufsi->meta_lock); 982 982 983 ufs_free_fragments (inode, tmp, frag4) 983 ufs_free_fragments (inode, tmp, frag4); 984 next3: 984 next3: 985 985 986 UFSD("EXIT: ino %lu\n", inode->i_ino); 986 UFSD("EXIT: ino %lu\n", inode->i_ino); 987 } 987 } 988 988 989 static void free_full_branch(struct inode *ino 989 static void free_full_branch(struct inode *inode, u64 ind_block, int depth) 990 { 990 { 991 struct super_block *sb = inode->i_sb; 991 struct super_block *sb = inode->i_sb; 992 struct ufs_sb_private_info *uspi = UFS 992 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 993 struct ufs_buffer_head *ubh = ubh_brea 993 struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize); 994 unsigned i; 994 unsigned i; 995 995 996 if (!ubh) 996 if (!ubh) 997 return; 997 return; 998 998 999 if (--depth) { 999 if (--depth) { 1000 for (i = 0; i < uspi->s_apb; 1000 for (i = 0; i < uspi->s_apb; i++) { 1001 void *p = ubh_get_dat 1001 void *p = ubh_get_data_ptr(uspi, ubh, i); 1002 u64 block = ufs_data_ 1002 u64 block = ufs_data_ptr_to_cpu(sb, p); 1003 if (block) 1003 if (block) 1004 free_full_bra 1004 free_full_branch(inode, block, depth); 1005 } 1005 } 1006 } else { 1006 } else { 1007 struct to_free ctx = {.inode 1007 struct to_free ctx = {.inode = inode}; 1008 1008 1009 for (i = 0; i < uspi->s_apb; 1009 for (i = 0; i < uspi->s_apb; i++) { 1010 void *p = ubh_get_dat 1010 void *p = ubh_get_data_ptr(uspi, ubh, i); 1011 u64 block = ufs_data_ 1011 u64 block = ufs_data_ptr_to_cpu(sb, p); 1012 if (block) 1012 if (block) 1013 free_data(&ct 1013 free_data(&ctx, block, uspi->s_fpb); 1014 } 1014 } 1015 free_data(&ctx, 0, 0); 1015 free_data(&ctx, 0, 0); 1016 } 1016 } 1017 1017 1018 ubh_bforget(ubh); 1018 ubh_bforget(ubh); 1019 ufs_free_blocks(inode, ind_block, usp 1019 ufs_free_blocks(inode, ind_block, uspi->s_fpb); 1020 } 1020 } 1021 1021 1022 static void free_branch_tail(struct inode *in 1022 static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth) 1023 { 1023 { 1024 struct super_block *sb = inode->i_sb; 1024 struct super_block *sb = inode->i_sb; 1025 struct ufs_sb_private_info *uspi = UF 1025 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1026 unsigned i; 1026 unsigned i; 1027 1027 1028 if (--depth) { 1028 if (--depth) { 1029 for (i = from; i < uspi->s_ap 1029 for (i = from; i < uspi->s_apb ; i++) { 1030 void *p = ubh_get_dat 1030 void *p = ubh_get_data_ptr(uspi, ubh, i); 1031 u64 block = ufs_data_ 1031 u64 block = ufs_data_ptr_to_cpu(sb, p); 1032 if (block) { 1032 if (block) { 1033 write_seqlock 1033 write_seqlock(&UFS_I(inode)->meta_lock); 1034 ufs_data_ptr_ 1034 ufs_data_ptr_clear(uspi, p); 1035 write_sequnlo 1035 write_sequnlock(&UFS_I(inode)->meta_lock); 1036 ubh_mark_buff 1036 ubh_mark_buffer_dirty(ubh); 1037 free_full_bra 1037 free_full_branch(inode, block, depth); 1038 } 1038 } 1039 } 1039 } 1040 } else { 1040 } else { 1041 struct to_free ctx = {.inode 1041 struct to_free ctx = {.inode = inode}; 1042 1042 1043 for (i = from; i < uspi->s_ap 1043 for (i = from; i < uspi->s_apb; i++) { 1044 void *p = ubh_get_dat 1044 void *p = ubh_get_data_ptr(uspi, ubh, i); 1045 u64 block = ufs_data_ 1045 u64 block = ufs_data_ptr_to_cpu(sb, p); 1046 if (block) { 1046 if (block) { 1047 write_seqlock 1047 write_seqlock(&UFS_I(inode)->meta_lock); 1048 ufs_data_ptr_ 1048 ufs_data_ptr_clear(uspi, p); 1049 write_sequnlo 1049 write_sequnlock(&UFS_I(inode)->meta_lock); 1050 ubh_mark_buff 1050 ubh_mark_buffer_dirty(ubh); 1051 free_data(&ct 1051 free_data(&ctx, block, uspi->s_fpb); 1052 } 1052 } 1053 } 1053 } 1054 free_data(&ctx, 0, 0); 1054 free_data(&ctx, 0, 0); 1055 } 1055 } 1056 if (IS_SYNC(inode) && ubh_buffer_dirt 1056 if (IS_SYNC(inode) && ubh_buffer_dirty(ubh)) 1057 ubh_sync_block(ubh); 1057 ubh_sync_block(ubh); 1058 ubh_brelse(ubh); 1058 ubh_brelse(ubh); 1059 } 1059 } 1060 1060 1061 static int ufs_alloc_lastblock(struct inode * 1061 static int ufs_alloc_lastblock(struct inode *inode, loff_t size) 1062 { 1062 { 1063 int err = 0; 1063 int err = 0; 1064 struct super_block *sb = inode->i_sb; 1064 struct super_block *sb = inode->i_sb; 1065 struct address_space *mapping = inode 1065 struct address_space *mapping = inode->i_mapping; 1066 struct ufs_sb_private_info *uspi = UF 1066 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1067 unsigned i, end; 1067 unsigned i, end; 1068 sector_t lastfrag; 1068 sector_t lastfrag; 1069 struct folio *folio; 1069 struct folio *folio; 1070 struct buffer_head *bh; 1070 struct buffer_head *bh; 1071 u64 phys64; 1071 u64 phys64; 1072 1072 1073 lastfrag = (size + uspi->s_fsize - 1) 1073 lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift; 1074 1074 1075 if (!lastfrag) 1075 if (!lastfrag) 1076 goto out; 1076 goto out; 1077 1077 1078 lastfrag--; 1078 lastfrag--; 1079 1079 1080 folio = ufs_get_locked_folio(mapping, 1080 folio = ufs_get_locked_folio(mapping, lastfrag >> 1081 (PAGE_ 1081 (PAGE_SHIFT - inode->i_blkbits)); 1082 if (IS_ERR(folio)) { 1082 if (IS_ERR(folio)) { 1083 err = -EIO; 1083 err = -EIO; 1084 goto out; 1084 goto out; 1085 } 1085 } 1086 1086 1087 end = lastfrag & ((1 << (PAGE_SHIFT - 1087 end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1); 1088 bh = folio_buffers(folio); 1088 bh = folio_buffers(folio); 1089 for (i = 0; i < end; ++i) 1089 for (i = 0; i < end; ++i) 1090 bh = bh->b_this_page; 1090 bh = bh->b_this_page; 1091 1091 1092 err = ufs_getfrag_block(inode, lastfra 1092 err = ufs_getfrag_block(inode, lastfrag, bh, 1); 1093 1093 1094 if (unlikely(err)) 1094 if (unlikely(err)) 1095 goto out_unlock; 1095 goto out_unlock; 1096 1096 1097 if (buffer_new(bh)) { 1097 if (buffer_new(bh)) { 1098 clear_buffer_new(bh); 1098 clear_buffer_new(bh); 1099 clean_bdev_bh_alias(bh); 1099 clean_bdev_bh_alias(bh); 1100 /* 1100 /* 1101 * we do not zeroize fragment, 1101 * we do not zeroize fragment, because of 1102 * if it maped to hole, it alr 1102 * if it maped to hole, it already contains zeroes 1103 */ 1103 */ 1104 set_buffer_uptodate(bh); 1104 set_buffer_uptodate(bh); 1105 mark_buffer_dirty(bh); 1105 mark_buffer_dirty(bh); 1106 folio_mark_dirty(folio); 1106 folio_mark_dirty(folio); 1107 } 1107 } 1108 1108 1109 if (lastfrag >= UFS_IND_FRAGMENT) { 1109 if (lastfrag >= UFS_IND_FRAGMENT) { 1110 end = uspi->s_fpb - ufs_fragnu 1110 end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1; 1111 phys64 = bh->b_blocknr + 1; 1111 phys64 = bh->b_blocknr + 1; 1112 for (i = 0; i < end; ++i) { 1112 for (i = 0; i < end; ++i) { 1113 bh = sb_getblk(sb, i + 1113 bh = sb_getblk(sb, i + phys64); 1114 lock_buffer(bh); 1114 lock_buffer(bh); 1115 memset(bh->b_data, 0, 1115 memset(bh->b_data, 0, sb->s_blocksize); 1116 set_buffer_uptodate(bh 1116 set_buffer_uptodate(bh); 1117 mark_buffer_dirty(bh); 1117 mark_buffer_dirty(bh); 1118 unlock_buffer(bh); 1118 unlock_buffer(bh); 1119 sync_dirty_buffer(bh); 1119 sync_dirty_buffer(bh); 1120 brelse(bh); 1120 brelse(bh); 1121 } 1121 } 1122 } 1122 } 1123 out_unlock: 1123 out_unlock: 1124 ufs_put_locked_folio(folio); 1124 ufs_put_locked_folio(folio); 1125 out: 1125 out: 1126 return err; 1126 return err; 1127 } 1127 } 1128 1128 1129 static void ufs_truncate_blocks(struct inode 1129 static void ufs_truncate_blocks(struct inode *inode) 1130 { 1130 { 1131 struct ufs_inode_info *ufsi = UFS_I(i 1131 struct ufs_inode_info *ufsi = UFS_I(inode); 1132 struct super_block *sb = inode->i_sb; 1132 struct super_block *sb = inode->i_sb; 1133 struct ufs_sb_private_info *uspi = UF 1133 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1134 unsigned offsets[4]; 1134 unsigned offsets[4]; 1135 int depth; 1135 int depth; 1136 int depth2; 1136 int depth2; 1137 unsigned i; 1137 unsigned i; 1138 struct ufs_buffer_head *ubh[3]; 1138 struct ufs_buffer_head *ubh[3]; 1139 void *p; 1139 void *p; 1140 u64 block; 1140 u64 block; 1141 1141 1142 if (inode->i_size) { 1142 if (inode->i_size) { 1143 sector_t last = (inode->i_siz 1143 sector_t last = (inode->i_size - 1) >> uspi->s_bshift; 1144 depth = ufs_block_to_path(ino 1144 depth = ufs_block_to_path(inode, last, offsets); 1145 if (!depth) 1145 if (!depth) 1146 return; 1146 return; 1147 } else { 1147 } else { 1148 depth = 1; 1148 depth = 1; 1149 } 1149 } 1150 1150 1151 for (depth2 = depth - 1; depth2; dept 1151 for (depth2 = depth - 1; depth2; depth2--) 1152 if (offsets[depth2] != uspi-> 1152 if (offsets[depth2] != uspi->s_apb - 1) 1153 break; 1153 break; 1154 1154 1155 mutex_lock(&ufsi->truncate_mutex); 1155 mutex_lock(&ufsi->truncate_mutex); 1156 if (depth == 1) { 1156 if (depth == 1) { 1157 ufs_trunc_direct(inode); 1157 ufs_trunc_direct(inode); 1158 offsets[0] = UFS_IND_BLOCK; 1158 offsets[0] = UFS_IND_BLOCK; 1159 } else { 1159 } else { 1160 /* get the blocks that should 1160 /* get the blocks that should be partially emptied */ 1161 p = ufs_get_direct_data_ptr(u 1161 p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++); 1162 for (i = 0; i < depth2; i++) 1162 for (i = 0; i < depth2; i++) { 1163 block = ufs_data_ptr_ 1163 block = ufs_data_ptr_to_cpu(sb, p); 1164 if (!block) 1164 if (!block) 1165 break; 1165 break; 1166 ubh[i] = ubh_bread(sb 1166 ubh[i] = ubh_bread(sb, block, uspi->s_bsize); 1167 if (!ubh[i]) { 1167 if (!ubh[i]) { 1168 write_seqlock 1168 write_seqlock(&ufsi->meta_lock); 1169 ufs_data_ptr_ 1169 ufs_data_ptr_clear(uspi, p); 1170 write_sequnlo 1170 write_sequnlock(&ufsi->meta_lock); 1171 break; 1171 break; 1172 } 1172 } 1173 p = ubh_get_data_ptr( 1173 p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++); 1174 } 1174 } 1175 while (i--) 1175 while (i--) 1176 free_branch_tail(inod 1176 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1); 1177 } 1177 } 1178 for (i = offsets[0]; i <= UFS_TIND_BL 1178 for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) { 1179 p = ufs_get_direct_data_ptr(u 1179 p = ufs_get_direct_data_ptr(uspi, ufsi, i); 1180 block = ufs_data_ptr_to_cpu(s 1180 block = ufs_data_ptr_to_cpu(sb, p); 1181 if (block) { 1181 if (block) { 1182 write_seqlock(&ufsi-> 1182 write_seqlock(&ufsi->meta_lock); 1183 ufs_data_ptr_clear(us 1183 ufs_data_ptr_clear(uspi, p); 1184 write_sequnlock(&ufsi 1184 write_sequnlock(&ufsi->meta_lock); 1185 free_full_branch(inod 1185 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1); 1186 } 1186 } 1187 } 1187 } 1188 read_seqlock_excl(&ufsi->meta_lock); 1188 read_seqlock_excl(&ufsi->meta_lock); 1189 ufsi->i_lastfrag = DIRECT_FRAGMENT; 1189 ufsi->i_lastfrag = DIRECT_FRAGMENT; 1190 read_sequnlock_excl(&ufsi->meta_lock) 1190 read_sequnlock_excl(&ufsi->meta_lock); 1191 mark_inode_dirty(inode); 1191 mark_inode_dirty(inode); 1192 mutex_unlock(&ufsi->truncate_mutex); 1192 mutex_unlock(&ufsi->truncate_mutex); 1193 } 1193 } 1194 1194 1195 static int ufs_truncate(struct inode *inode, 1195 static int ufs_truncate(struct inode *inode, loff_t size) 1196 { 1196 { 1197 int err = 0; 1197 int err = 0; 1198 1198 1199 UFSD("ENTER: ino %lu, i_size: %llu, o 1199 UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n", 1200 inode->i_ino, (unsigned long lon 1200 inode->i_ino, (unsigned long long)size, 1201 (unsigned long long)i_size_read( 1201 (unsigned long long)i_size_read(inode)); 1202 1202 1203 if (!(S_ISREG(inode->i_mode) || S_ISD 1203 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1204 S_ISLNK(inode->i_mode))) 1204 S_ISLNK(inode->i_mode))) 1205 return -EINVAL; 1205 return -EINVAL; 1206 if (IS_APPEND(inode) || IS_IMMUTABLE( 1206 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1207 return -EPERM; 1207 return -EPERM; 1208 1208 1209 err = ufs_alloc_lastblock(inode, size 1209 err = ufs_alloc_lastblock(inode, size); 1210 1210 1211 if (err) 1211 if (err) 1212 goto out; 1212 goto out; 1213 1213 1214 block_truncate_page(inode->i_mapping, 1214 block_truncate_page(inode->i_mapping, size, ufs_getfrag_block); 1215 1215 1216 truncate_setsize(inode, size); 1216 truncate_setsize(inode, size); 1217 1217 1218 ufs_truncate_blocks(inode); 1218 ufs_truncate_blocks(inode); 1219 inode_set_mtime_to_ts(inode, inode_se 1219 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 1220 mark_inode_dirty(inode); 1220 mark_inode_dirty(inode); 1221 out: 1221 out: 1222 UFSD("EXIT: err %d\n", err); 1222 UFSD("EXIT: err %d\n", err); 1223 return err; 1223 return err; 1224 } 1224 } 1225 1225 1226 int ufs_setattr(struct mnt_idmap *idmap, stru 1226 int ufs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 1227 struct iattr *attr) 1227 struct iattr *attr) 1228 { 1228 { 1229 struct inode *inode = d_inode(dentry) 1229 struct inode *inode = d_inode(dentry); 1230 unsigned int ia_valid = attr->ia_vali 1230 unsigned int ia_valid = attr->ia_valid; 1231 int error; 1231 int error; 1232 1232 1233 error = setattr_prepare(&nop_mnt_idma 1233 error = setattr_prepare(&nop_mnt_idmap, dentry, attr); 1234 if (error) 1234 if (error) 1235 return error; 1235 return error; 1236 1236 1237 if (ia_valid & ATTR_SIZE && attr->ia_ 1237 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) { 1238 error = ufs_truncate(inode, a 1238 error = ufs_truncate(inode, attr->ia_size); 1239 if (error) 1239 if (error) 1240 return error; 1240 return error; 1241 } 1241 } 1242 1242 1243 setattr_copy(&nop_mnt_idmap, inode, a 1243 setattr_copy(&nop_mnt_idmap, inode, attr); 1244 mark_inode_dirty(inode); 1244 mark_inode_dirty(inode); 1245 return 0; 1245 return 0; 1246 } 1246 } 1247 1247 1248 const struct inode_operations ufs_file_inode_ 1248 const struct inode_operations ufs_file_inode_operations = { 1249 .setattr = ufs_setattr, 1249 .setattr = ufs_setattr, 1250 }; 1250 }; 1251 1251
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.