1 // SPDX-License-Identifier: GPL-2.0-or-later 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* !! 2 /* -*- mode: c; c-basic-offset: 8; -*- >> 3 * vim: noexpandtab sw=8 ts=8 sts=0: >> 4 * 3 * dcache.c 5 * dcache.c 4 * 6 * 5 * dentry cache handling code 7 * dentry cache handling code 6 * 8 * 7 * Copyright (C) 2002, 2004 Oracle. All right 9 * Copyright (C) 2002, 2004 Oracle. All rights reserved. 8 */ 10 */ 9 11 10 #include <linux/fs.h> 12 #include <linux/fs.h> 11 #include <linux/types.h> 13 #include <linux/types.h> 12 #include <linux/slab.h> 14 #include <linux/slab.h> 13 #include <linux/namei.h> 15 #include <linux/namei.h> 14 16 15 #include <cluster/masklog.h> 17 #include <cluster/masklog.h> 16 18 17 #include "ocfs2.h" 19 #include "ocfs2.h" 18 20 19 #include "alloc.h" 21 #include "alloc.h" 20 #include "dcache.h" 22 #include "dcache.h" 21 #include "dlmglue.h" 23 #include "dlmglue.h" 22 #include "file.h" 24 #include "file.h" 23 #include "inode.h" 25 #include "inode.h" 24 #include "ocfs2_trace.h" 26 #include "ocfs2_trace.h" 25 27 26 void ocfs2_dentry_attach_gen(struct dentry *de 28 void ocfs2_dentry_attach_gen(struct dentry *dentry) 27 { 29 { 28 unsigned long gen = 30 unsigned long gen = 29 OCFS2_I(d_inode(dentry->d_pare 31 OCFS2_I(d_inode(dentry->d_parent))->ip_dir_lock_gen; 30 BUG_ON(d_inode(dentry)); 32 BUG_ON(d_inode(dentry)); 31 dentry->d_fsdata = (void *)gen; 33 dentry->d_fsdata = (void *)gen; 32 } 34 } 33 35 34 36 35 static int ocfs2_dentry_revalidate(struct dent 37 static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags) 36 { 38 { 37 struct inode *inode; 39 struct inode *inode; 38 int ret = 0; /* if all else fails, 40 int ret = 0; /* if all else fails, just return false */ 39 struct ocfs2_super *osb; 41 struct ocfs2_super *osb; 40 42 41 if (flags & LOOKUP_RCU) 43 if (flags & LOOKUP_RCU) 42 return -ECHILD; 44 return -ECHILD; 43 45 44 inode = d_inode(dentry); 46 inode = d_inode(dentry); 45 osb = OCFS2_SB(dentry->d_sb); 47 osb = OCFS2_SB(dentry->d_sb); 46 48 47 trace_ocfs2_dentry_revalidate(dentry, 49 trace_ocfs2_dentry_revalidate(dentry, dentry->d_name.len, 48 dentry-> 50 dentry->d_name.name); 49 51 50 /* For a negative dentry - 52 /* For a negative dentry - 51 * check the generation number of the 53 * check the generation number of the parent and compare with the 52 * one stored in the inode. 54 * one stored in the inode. 53 */ 55 */ 54 if (inode == NULL) { 56 if (inode == NULL) { 55 unsigned long gen = (unsigned 57 unsigned long gen = (unsigned long) dentry->d_fsdata; 56 unsigned long pgen; 58 unsigned long pgen; 57 spin_lock(&dentry->d_lock); 59 spin_lock(&dentry->d_lock); 58 pgen = OCFS2_I(d_inode(dentry- 60 pgen = OCFS2_I(d_inode(dentry->d_parent))->ip_dir_lock_gen; 59 spin_unlock(&dentry->d_lock); 61 spin_unlock(&dentry->d_lock); 60 trace_ocfs2_dentry_revalidate_ 62 trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len, 61 63 dentry->d_name.name, 62 64 pgen, gen); 63 if (gen != pgen) 65 if (gen != pgen) 64 goto bail; 66 goto bail; 65 goto valid; 67 goto valid; 66 } 68 } 67 69 68 BUG_ON(!osb); 70 BUG_ON(!osb); 69 71 70 if (inode == osb->root_inode || is_bad 72 if (inode == osb->root_inode || is_bad_inode(inode)) 71 goto bail; 73 goto bail; 72 74 73 spin_lock(&OCFS2_I(inode)->ip_lock); 75 spin_lock(&OCFS2_I(inode)->ip_lock); 74 /* did we or someone else delete this 76 /* did we or someone else delete this inode? */ 75 if (OCFS2_I(inode)->ip_flags & OCFS2_I 77 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { 76 spin_unlock(&OCFS2_I(inode)->i 78 spin_unlock(&OCFS2_I(inode)->ip_lock); 77 trace_ocfs2_dentry_revalidate_ 79 trace_ocfs2_dentry_revalidate_delete( 78 (unsigned long 80 (unsigned long long)OCFS2_I(inode)->ip_blkno); 79 goto bail; 81 goto bail; 80 } 82 } 81 spin_unlock(&OCFS2_I(inode)->ip_lock); 83 spin_unlock(&OCFS2_I(inode)->ip_lock); 82 84 83 /* 85 /* 84 * We don't need a cluster lock to tes 86 * We don't need a cluster lock to test this because once an 85 * inode nlink hits zero, it never goe 87 * inode nlink hits zero, it never goes back. 86 */ 88 */ 87 if (inode->i_nlink == 0) { 89 if (inode->i_nlink == 0) { 88 trace_ocfs2_dentry_revalidate_ 90 trace_ocfs2_dentry_revalidate_orphaned( 89 (unsigned long long)OC 91 (unsigned long long)OCFS2_I(inode)->ip_blkno, 90 S_ISDIR(inode->i_mode) 92 S_ISDIR(inode->i_mode)); 91 goto bail; 93 goto bail; 92 } 94 } 93 95 94 /* 96 /* 95 * If the last lookup failed to create 97 * If the last lookup failed to create dentry lock, let us 96 * redo it. 98 * redo it. 97 */ 99 */ 98 if (!dentry->d_fsdata) { 100 if (!dentry->d_fsdata) { 99 trace_ocfs2_dentry_revalidate_ 101 trace_ocfs2_dentry_revalidate_nofsdata( 100 (unsigned long 102 (unsigned long long)OCFS2_I(inode)->ip_blkno); 101 goto bail; 103 goto bail; 102 } 104 } 103 105 104 valid: 106 valid: 105 ret = 1; 107 ret = 1; 106 108 107 bail: 109 bail: 108 trace_ocfs2_dentry_revalidate_ret(ret) 110 trace_ocfs2_dentry_revalidate_ret(ret); 109 return ret; 111 return ret; 110 } 112 } 111 113 112 static int ocfs2_match_dentry(struct dentry *d 114 static int ocfs2_match_dentry(struct dentry *dentry, 113 u64 parent_blkno 115 u64 parent_blkno, 114 int skip_unhashe 116 int skip_unhashed) 115 { 117 { 116 struct inode *parent; 118 struct inode *parent; 117 119 118 /* 120 /* 119 * ocfs2_lookup() does a d_splice_alia 121 * ocfs2_lookup() does a d_splice_alias() _before_ attaching 120 * to the lock data, so we skip those 122 * to the lock data, so we skip those here, otherwise 121 * ocfs2_dentry_attach_lock() will get 123 * ocfs2_dentry_attach_lock() will get its original dentry 122 * back. 124 * back. 123 */ 125 */ 124 if (!dentry->d_fsdata) 126 if (!dentry->d_fsdata) 125 return 0; 127 return 0; 126 128 >> 129 if (!dentry->d_parent) >> 130 return 0; >> 131 127 if (skip_unhashed && d_unhashed(dentry 132 if (skip_unhashed && d_unhashed(dentry)) 128 return 0; 133 return 0; 129 134 130 parent = d_inode(dentry->d_parent); 135 parent = d_inode(dentry->d_parent); >> 136 /* Negative parent dentry? */ >> 137 if (!parent) >> 138 return 0; >> 139 131 /* Name is in a different directory. * 140 /* Name is in a different directory. */ 132 if (OCFS2_I(parent)->ip_blkno != paren 141 if (OCFS2_I(parent)->ip_blkno != parent_blkno) 133 return 0; 142 return 0; 134 143 135 return 1; 144 return 1; 136 } 145 } 137 146 138 /* 147 /* 139 * Walk the inode alias list, and find a dentr 148 * Walk the inode alias list, and find a dentry which has a given 140 * parent. ocfs2_dentry_attach_lock() wants to 149 * parent. ocfs2_dentry_attach_lock() wants to find _any_ alias as it 141 * is looking for a dentry_lock reference. The 150 * is looking for a dentry_lock reference. The downconvert thread is 142 * looking to unhash aliases, so we allow it t 151 * looking to unhash aliases, so we allow it to skip any that already 143 * have that property. 152 * have that property. 144 */ 153 */ 145 struct dentry *ocfs2_find_local_alias(struct i 154 struct dentry *ocfs2_find_local_alias(struct inode *inode, 146 u64 pare 155 u64 parent_blkno, 147 int skip 156 int skip_unhashed) 148 { 157 { 149 struct dentry *dentry; 158 struct dentry *dentry; 150 159 151 spin_lock(&inode->i_lock); 160 spin_lock(&inode->i_lock); 152 hlist_for_each_entry(dentry, &inode->i 161 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { 153 spin_lock(&dentry->d_lock); 162 spin_lock(&dentry->d_lock); 154 if (ocfs2_match_dentry(dentry, 163 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { 155 trace_ocfs2_find_local 164 trace_ocfs2_find_local_alias(dentry->d_name.len, 156 165 dentry->d_name.name); 157 166 158 dget_dlock(dentry); 167 dget_dlock(dentry); 159 spin_unlock(&dentry->d 168 spin_unlock(&dentry->d_lock); 160 spin_unlock(&inode->i_ 169 spin_unlock(&inode->i_lock); 161 return dentry; 170 return dentry; 162 } 171 } 163 spin_unlock(&dentry->d_lock); 172 spin_unlock(&dentry->d_lock); 164 } 173 } 165 spin_unlock(&inode->i_lock); 174 spin_unlock(&inode->i_lock); 166 return NULL; 175 return NULL; 167 } 176 } 168 177 169 DEFINE_SPINLOCK(dentry_attach_lock); 178 DEFINE_SPINLOCK(dentry_attach_lock); 170 179 171 /* 180 /* 172 * Attach this dentry to a cluster lock. 181 * Attach this dentry to a cluster lock. 173 * 182 * 174 * Dentry locks cover all links in a given dir 183 * Dentry locks cover all links in a given directory to a particular 175 * inode. We do this so that ocfs2 can build a 184 * inode. We do this so that ocfs2 can build a lock name which all 176 * nodes in the cluster can agree on at all ti 185 * nodes in the cluster can agree on at all times. Shoving full names 177 * in the cluster lock won't work due to size 186 * in the cluster lock won't work due to size restrictions. Covering 178 * links inside of a directory is a good compr 187 * links inside of a directory is a good compromise because it still 179 * allows us to use the parent directory lock 188 * allows us to use the parent directory lock to synchronize 180 * operations. 189 * operations. 181 * 190 * 182 * Call this function with the parent dir sema 191 * Call this function with the parent dir semaphore and the parent dir 183 * cluster lock held. 192 * cluster lock held. 184 * 193 * 185 * The dir semaphore will protect us from havi 194 * The dir semaphore will protect us from having to worry about 186 * concurrent processes on our node trying to 195 * concurrent processes on our node trying to attach a lock at the 187 * same time. 196 * same time. 188 * 197 * 189 * The dir cluster lock (held at either PR or 198 * The dir cluster lock (held at either PR or EX mode) protects us 190 * from unlink and rename on other nodes. 199 * from unlink and rename on other nodes. 191 * 200 * 192 * A dput() can happen asynchronously due to p 201 * A dput() can happen asynchronously due to pruning, so we cover 193 * attaching and detaching the dentry lock wit 202 * attaching and detaching the dentry lock with a 194 * dentry_attach_lock. 203 * dentry_attach_lock. 195 * 204 * 196 * A node which has done lookup on a name reta 205 * A node which has done lookup on a name retains a protected read 197 * lock until final dput. If the user requests 206 * lock until final dput. If the user requests and unlink or rename, 198 * the protected read is upgraded to an exclus 207 * the protected read is upgraded to an exclusive lock. Other nodes 199 * who have seen the dentry will then be infor 208 * who have seen the dentry will then be informed that they need to 200 * downgrade their lock, which will involve d_ 209 * downgrade their lock, which will involve d_delete on the 201 * dentry. This happens in ocfs2_dentry_conver 210 * dentry. This happens in ocfs2_dentry_convert_worker(). 202 */ 211 */ 203 int ocfs2_dentry_attach_lock(struct dentry *de 212 int ocfs2_dentry_attach_lock(struct dentry *dentry, 204 struct inode *ino 213 struct inode *inode, 205 u64 parent_blkno) 214 u64 parent_blkno) 206 { 215 { 207 int ret; 216 int ret; 208 struct dentry *alias; 217 struct dentry *alias; 209 struct ocfs2_dentry_lock *dl = dentry- 218 struct ocfs2_dentry_lock *dl = dentry->d_fsdata; 210 219 211 trace_ocfs2_dentry_attach_lock(dentry- 220 trace_ocfs2_dentry_attach_lock(dentry->d_name.len, dentry->d_name.name, 212 (unsign 221 (unsigned long long)parent_blkno, dl); 213 222 214 /* 223 /* 215 * Negative dentry. We ignore these fo 224 * Negative dentry. We ignore these for now. 216 * 225 * 217 * XXX: Could we can improve ocfs2_den 226 * XXX: Could we can improve ocfs2_dentry_revalidate() by 218 * tracking these? 227 * tracking these? 219 */ 228 */ 220 if (!inode) 229 if (!inode) 221 return 0; 230 return 0; 222 231 223 if (d_really_is_negative(dentry) && de 232 if (d_really_is_negative(dentry) && dentry->d_fsdata) { 224 /* Converting a negative dentr 233 /* Converting a negative dentry to positive 225 Clear dentry->d_fsdata */ 234 Clear dentry->d_fsdata */ 226 dentry->d_fsdata = dl = NULL; 235 dentry->d_fsdata = dl = NULL; 227 } 236 } 228 237 229 if (dl) { 238 if (dl) { 230 mlog_bug_on_msg(dl->dl_parent_ 239 mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, 231 " \"%pd\": old 240 " \"%pd\": old parent: %llu, new: %llu\n", 232 dentry, 241 dentry, 233 (unsigned long 242 (unsigned long long)parent_blkno, 234 (unsigned long 243 (unsigned long long)dl->dl_parent_blkno); 235 return 0; 244 return 0; 236 } 245 } 237 246 238 alias = ocfs2_find_local_alias(inode, 247 alias = ocfs2_find_local_alias(inode, parent_blkno, 0); 239 if (alias) { 248 if (alias) { 240 /* 249 /* 241 * Great, an alias exists, whi 250 * Great, an alias exists, which means we must have a 242 * dentry lock already. We can 251 * dentry lock already. We can just grab the lock off 243 * the alias and add it to the 252 * the alias and add it to the list. 244 * 253 * 245 * We're depending here on the 254 * We're depending here on the fact that this dentry 246 * was found and exists in the 255 * was found and exists in the dcache and so must have 247 * a reference to the dentry_l 256 * a reference to the dentry_lock because we can't 248 * race creates. Final dput() 257 * race creates. Final dput() cannot happen on it 249 * since we have it pinned, so 258 * since we have it pinned, so our reference is safe. 250 */ 259 */ 251 dl = alias->d_fsdata; 260 dl = alias->d_fsdata; 252 mlog_bug_on_msg(!dl, "parent % 261 mlog_bug_on_msg(!dl, "parent %llu, ino %llu\n", 253 (unsigned long 262 (unsigned long long)parent_blkno, 254 (unsigned long 263 (unsigned long long)OCFS2_I(inode)->ip_blkno); 255 264 256 mlog_bug_on_msg(dl->dl_parent_ 265 mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, 257 " \"%pd\": old 266 " \"%pd\": old parent: %llu, new: %llu\n", 258 dentry, 267 dentry, 259 (unsigned long 268 (unsigned long long)parent_blkno, 260 (unsigned long 269 (unsigned long long)dl->dl_parent_blkno); 261 270 262 trace_ocfs2_dentry_attach_lock 271 trace_ocfs2_dentry_attach_lock_found(dl->dl_lockres.l_name, 263 (unsigned long 272 (unsigned long long)parent_blkno, 264 (unsigned long 273 (unsigned long long)OCFS2_I(inode)->ip_blkno); 265 274 266 goto out_attach; 275 goto out_attach; 267 } 276 } 268 277 269 /* 278 /* 270 * There are no other aliases 279 * There are no other aliases 271 */ 280 */ 272 dl = kmalloc(sizeof(*dl), GFP_NOFS); 281 dl = kmalloc(sizeof(*dl), GFP_NOFS); 273 if (!dl) { 282 if (!dl) { 274 ret = -ENOMEM; 283 ret = -ENOMEM; 275 mlog_errno(ret); 284 mlog_errno(ret); 276 return ret; 285 return ret; 277 } 286 } 278 287 279 dl->dl_count = 0; 288 dl->dl_count = 0; 280 /* 289 /* 281 * Does this have to happen below, for 290 * Does this have to happen below, for all attaches, in case 282 * the struct inode gets blown away by 291 * the struct inode gets blown away by the downconvert thread? 283 */ 292 */ 284 dl->dl_inode = igrab(inode); 293 dl->dl_inode = igrab(inode); 285 dl->dl_parent_blkno = parent_blkno; 294 dl->dl_parent_blkno = parent_blkno; 286 ocfs2_dentry_lock_res_init(dl, parent_ 295 ocfs2_dentry_lock_res_init(dl, parent_blkno, inode); 287 296 288 out_attach: 297 out_attach: 289 spin_lock(&dentry_attach_lock); 298 spin_lock(&dentry_attach_lock); 290 if (unlikely(dentry->d_fsdata && !alia 299 if (unlikely(dentry->d_fsdata && !alias)) { 291 /* d_fsdata is set by a racing 300 /* d_fsdata is set by a racing thread which is doing 292 * the same thing as this thre 301 * the same thing as this thread is doing. Leave the racing 293 * thread going ahead and we r 302 * thread going ahead and we return here. 294 */ 303 */ 295 spin_unlock(&dentry_attach_loc 304 spin_unlock(&dentry_attach_lock); 296 iput(dl->dl_inode); 305 iput(dl->dl_inode); 297 ocfs2_lock_res_free(&dl->dl_lo 306 ocfs2_lock_res_free(&dl->dl_lockres); 298 kfree(dl); 307 kfree(dl); 299 return 0; 308 return 0; 300 } 309 } 301 310 302 dentry->d_fsdata = dl; 311 dentry->d_fsdata = dl; 303 dl->dl_count++; 312 dl->dl_count++; 304 spin_unlock(&dentry_attach_lock); 313 spin_unlock(&dentry_attach_lock); 305 314 306 /* 315 /* 307 * This actually gets us our PRMODE le 316 * This actually gets us our PRMODE level lock. From now on, 308 * we'll have a notification if one of 317 * we'll have a notification if one of these names is 309 * destroyed on another node. 318 * destroyed on another node. 310 */ 319 */ 311 ret = ocfs2_dentry_lock(dentry, 0); 320 ret = ocfs2_dentry_lock(dentry, 0); 312 if (!ret) 321 if (!ret) 313 ocfs2_dentry_unlock(dentry, 0) 322 ocfs2_dentry_unlock(dentry, 0); 314 else 323 else 315 mlog_errno(ret); 324 mlog_errno(ret); 316 325 317 /* 326 /* 318 * In case of error, manually free the 327 * In case of error, manually free the allocation and do the iput(). 319 * We need to do this because error he 328 * We need to do this because error here means no d_instantiate(), 320 * which means iput() will not be call 329 * which means iput() will not be called during dput(dentry). 321 */ 330 */ 322 if (ret < 0 && !alias) { 331 if (ret < 0 && !alias) { 323 ocfs2_lock_res_free(&dl->dl_lo 332 ocfs2_lock_res_free(&dl->dl_lockres); 324 BUG_ON(dl->dl_count != 1); 333 BUG_ON(dl->dl_count != 1); 325 spin_lock(&dentry_attach_lock) 334 spin_lock(&dentry_attach_lock); 326 dentry->d_fsdata = NULL; 335 dentry->d_fsdata = NULL; 327 spin_unlock(&dentry_attach_loc 336 spin_unlock(&dentry_attach_lock); 328 kfree(dl); 337 kfree(dl); 329 iput(inode); 338 iput(inode); 330 } 339 } 331 340 332 dput(alias); 341 dput(alias); 333 342 334 return ret; 343 return ret; 335 } 344 } 336 345 337 /* 346 /* 338 * ocfs2_dentry_iput() and friends. 347 * ocfs2_dentry_iput() and friends. 339 * 348 * 340 * At this point, our particular dentry is det 349 * At this point, our particular dentry is detached from the inodes 341 * alias list, so there's no way that the lock 350 * alias list, so there's no way that the locking code can find it. 342 * 351 * 343 * The interesting stuff happens when we deter 352 * The interesting stuff happens when we determine that our lock needs 344 * to go away because this is the last subdir 353 * to go away because this is the last subdir alias in the 345 * system. This function needs to handle a cou 354 * system. This function needs to handle a couple things: 346 * 355 * 347 * 1) Synchronizing lock shutdown with the dow 356 * 1) Synchronizing lock shutdown with the downconvert threads. This 348 * is already handled for us via the lockre 357 * is already handled for us via the lockres release drop function 349 * called in ocfs2_release_dentry_lock() 358 * called in ocfs2_release_dentry_lock() 350 * 359 * 351 * 2) A race may occur when we're doing our lo 360 * 2) A race may occur when we're doing our lock shutdown and 352 * another process wants to create a new de 361 * another process wants to create a new dentry lock. Right now we 353 * let them race, which means that for a ve 362 * let them race, which means that for a very short while, this 354 * node might have two locks on a lock reso 363 * node might have two locks on a lock resource. This should be a 355 * problem though because one of them is in 364 * problem though because one of them is in the process of being 356 * thrown out. 365 * thrown out. 357 */ 366 */ 358 static void ocfs2_drop_dentry_lock(struct ocfs 367 static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb, 359 struct ocfs 368 struct ocfs2_dentry_lock *dl) 360 { 369 { 361 iput(dl->dl_inode); 370 iput(dl->dl_inode); 362 ocfs2_simple_drop_lockres(osb, &dl->dl 371 ocfs2_simple_drop_lockres(osb, &dl->dl_lockres); 363 ocfs2_lock_res_free(&dl->dl_lockres); 372 ocfs2_lock_res_free(&dl->dl_lockres); 364 kfree(dl); 373 kfree(dl); 365 } 374 } 366 375 367 void ocfs2_dentry_lock_put(struct ocfs2_super 376 void ocfs2_dentry_lock_put(struct ocfs2_super *osb, 368 struct ocfs2_dentry 377 struct ocfs2_dentry_lock *dl) 369 { 378 { 370 int unlock = 0; 379 int unlock = 0; 371 380 372 BUG_ON(dl->dl_count == 0); 381 BUG_ON(dl->dl_count == 0); 373 382 374 spin_lock(&dentry_attach_lock); 383 spin_lock(&dentry_attach_lock); 375 dl->dl_count--; 384 dl->dl_count--; 376 unlock = !dl->dl_count; 385 unlock = !dl->dl_count; 377 spin_unlock(&dentry_attach_lock); 386 spin_unlock(&dentry_attach_lock); 378 387 379 if (unlock) 388 if (unlock) 380 ocfs2_drop_dentry_lock(osb, dl 389 ocfs2_drop_dentry_lock(osb, dl); 381 } 390 } 382 391 383 static void ocfs2_dentry_iput(struct dentry *d 392 static void ocfs2_dentry_iput(struct dentry *dentry, struct inode *inode) 384 { 393 { 385 struct ocfs2_dentry_lock *dl = dentry- 394 struct ocfs2_dentry_lock *dl = dentry->d_fsdata; 386 395 387 if (!dl) { 396 if (!dl) { 388 /* 397 /* 389 * No dentry lock is ok if we' 398 * No dentry lock is ok if we're disconnected or 390 * unhashed. 399 * unhashed. 391 */ 400 */ 392 if (!(dentry->d_flags & DCACHE 401 if (!(dentry->d_flags & DCACHE_DISCONNECTED) && 393 !d_unhashed(dentry)) { 402 !d_unhashed(dentry)) { 394 unsigned long long ino 403 unsigned long long ino = 0ULL; 395 if (inode) 404 if (inode) 396 ino = (unsigne 405 ino = (unsigned long long)OCFS2_I(inode)->ip_blkno; 397 mlog(ML_ERROR, "Dentry 406 mlog(ML_ERROR, "Dentry is missing cluster lock. " 398 "inode: %llu, d_f 407 "inode: %llu, d_flags: 0x%x, d_name: %pd\n", 399 ino, dentry->d_fl 408 ino, dentry->d_flags, dentry); 400 } 409 } 401 410 402 goto out; 411 goto out; 403 } 412 } 404 413 405 mlog_bug_on_msg(dl->dl_count == 0, "de 414 mlog_bug_on_msg(dl->dl_count == 0, "dentry: %pd, count: %u\n", 406 dentry, dl->dl_count); 415 dentry, dl->dl_count); 407 416 408 ocfs2_dentry_lock_put(OCFS2_SB(dentry- 417 ocfs2_dentry_lock_put(OCFS2_SB(dentry->d_sb), dl); 409 418 410 out: 419 out: 411 iput(inode); 420 iput(inode); 412 } 421 } 413 422 414 /* 423 /* 415 * d_move(), but keep the locks in sync. 424 * d_move(), but keep the locks in sync. 416 * 425 * 417 * When we are done, "dentry" will have the pa 426 * When we are done, "dentry" will have the parent dir and name of 418 * "target", which will be thrown away. 427 * "target", which will be thrown away. 419 * 428 * 420 * We manually update the lock of "dentry" if 429 * We manually update the lock of "dentry" if need be. 421 * 430 * 422 * "target" doesn't have it's dentry lock touc 431 * "target" doesn't have it's dentry lock touched - we allow the later 423 * dput() to handle this for us. 432 * dput() to handle this for us. 424 * 433 * 425 * This is called during ocfs2_rename(), while 434 * This is called during ocfs2_rename(), while holding parent 426 * directory locks. The dentries have already 435 * directory locks. The dentries have already been deleted on other 427 * nodes via ocfs2_remote_dentry_delete(). 436 * nodes via ocfs2_remote_dentry_delete(). 428 * 437 * 429 * Normally, the VFS handles the d_move() for 438 * Normally, the VFS handles the d_move() for the file system, after 430 * the ->rename() callback. OCFS2 wants to han 439 * the ->rename() callback. OCFS2 wants to handle this internally, so 431 * the new lock can be created atomically with 440 * the new lock can be created atomically with respect to the cluster. 432 */ 441 */ 433 void ocfs2_dentry_move(struct dentry *dentry, 442 void ocfs2_dentry_move(struct dentry *dentry, struct dentry *target, 434 struct inode *old_dir, 443 struct inode *old_dir, struct inode *new_dir) 435 { 444 { 436 int ret; 445 int ret; 437 struct ocfs2_super *osb = OCFS2_SB(old 446 struct ocfs2_super *osb = OCFS2_SB(old_dir->i_sb); 438 struct inode *inode = d_inode(dentry); 447 struct inode *inode = d_inode(dentry); 439 448 440 /* 449 /* 441 * Move within the same directory, so 450 * Move within the same directory, so the actual lock info won't 442 * change. 451 * change. 443 * 452 * 444 * XXX: Is there any advantage to drop 453 * XXX: Is there any advantage to dropping the lock here? 445 */ 454 */ 446 if (old_dir == new_dir) 455 if (old_dir == new_dir) 447 goto out_move; 456 goto out_move; 448 457 449 ocfs2_dentry_lock_put(osb, dentry->d_f 458 ocfs2_dentry_lock_put(osb, dentry->d_fsdata); 450 459 451 dentry->d_fsdata = NULL; 460 dentry->d_fsdata = NULL; 452 ret = ocfs2_dentry_attach_lock(dentry, 461 ret = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(new_dir)->ip_blkno); 453 if (ret) 462 if (ret) 454 mlog_errno(ret); 463 mlog_errno(ret); 455 464 456 out_move: 465 out_move: 457 d_move(dentry, target); 466 d_move(dentry, target); 458 } 467 } 459 468 460 const struct dentry_operations ocfs2_dentry_op 469 const struct dentry_operations ocfs2_dentry_ops = { 461 .d_revalidate = ocfs2_dentry 470 .d_revalidate = ocfs2_dentry_revalidate, 462 .d_iput = ocfs2_dentry 471 .d_iput = ocfs2_dentry_iput, 463 }; 472 }; 464 473
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.