1 /* 1 /* 2 * Device operations for the pnfs client. 2 * Device operations for the pnfs client. 3 * 3 * 4 * Copyright (c) 2002 4 * Copyright (c) 2002 5 * The Regents of the University of Michigan 5 * The Regents of the University of Michigan 6 * All Rights Reserved 6 * All Rights Reserved 7 * 7 * 8 * Dean Hildebrand <dhildebz@umich.edu> 8 * Dean Hildebrand <dhildebz@umich.edu> 9 * Garth Goodson <Garth.Goodson@netapp.com> 9 * Garth Goodson <Garth.Goodson@netapp.com> 10 * 10 * 11 * Permission is granted to use, copy, create 11 * Permission is granted to use, copy, create derivative works, and 12 * redistribute this software and such deriva 12 * redistribute this software and such derivative works for any purpose, 13 * so long as the name of the University of M 13 * so long as the name of the University of Michigan is not used in 14 * any advertising or publicity pertaining to 14 * any advertising or publicity pertaining to the use or distribution 15 * of this software without specific, written 15 * of this software without specific, written prior authorization. If 16 * the above copyright notice or any other id 16 * the above copyright notice or any other identification of the 17 * University of Michigan is included in any 17 * University of Michigan is included in any copy of any portion of 18 * this software, then the disclaimer below m 18 * this software, then the disclaimer below must also be included. 19 * 19 * 20 * This software is provided as is, without r 20 * This software is provided as is, without representation or warranty 21 * of any kind either express or implied, inc 21 * of any kind either express or implied, including without limitation 22 * the implied warranties of merchantability, 22 * the implied warranties of merchantability, fitness for a particular 23 * purpose, or noninfringement. The Regents 23 * purpose, or noninfringement. The Regents of the University of 24 * Michigan shall not be liable for any damag 24 * Michigan shall not be liable for any damages, including special, 25 * indirect, incidental, or consequential dam 25 * indirect, incidental, or consequential damages, with respect to any 26 * claim arising out of or in connection with 26 * claim arising out of or in connection with the use of the software, 27 * even if it has been or is hereafter advise 27 * even if it has been or is hereafter advised of the possibility of 28 * such damages. 28 * such damages. 29 */ 29 */ 30 30 31 #include <linux/export.h> 31 #include <linux/export.h> 32 #include <linux/nfs_fs.h> 32 #include <linux/nfs_fs.h> 33 #include "nfs4session.h" 33 #include "nfs4session.h" 34 #include "internal.h" 34 #include "internal.h" 35 #include "pnfs.h" 35 #include "pnfs.h" 36 36 37 #include "nfs4trace.h" << 38 << 39 #define NFSDBG_FACILITY NFSDBG_PNFS 37 #define NFSDBG_FACILITY NFSDBG_PNFS 40 38 41 /* 39 /* 42 * Device ID RCU cache. A device ID is unique 40 * Device ID RCU cache. A device ID is unique per server and layout type. 43 */ 41 */ 44 #define NFS4_DEVICE_ID_HASH_BITS 5 42 #define NFS4_DEVICE_ID_HASH_BITS 5 45 #define NFS4_DEVICE_ID_HASH_SIZE (1 << 43 #define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS) 46 #define NFS4_DEVICE_ID_HASH_MASK (NFS4_ 44 #define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1) 47 45 >> 46 #define PNFS_DEVICE_RETRY_TIMEOUT (120*HZ) 48 47 49 static struct hlist_head nfs4_deviceid_cache[N 48 static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE]; 50 static DEFINE_SPINLOCK(nfs4_deviceid_lock); 49 static DEFINE_SPINLOCK(nfs4_deviceid_lock); 51 50 52 #ifdef NFS_DEBUG 51 #ifdef NFS_DEBUG 53 void 52 void 54 nfs4_print_deviceid(const struct nfs4_deviceid 53 nfs4_print_deviceid(const struct nfs4_deviceid *id) 55 { 54 { 56 u32 *p = (u32 *)id; 55 u32 *p = (u32 *)id; 57 56 58 dprintk("%s: device id= [%x%x%x%x]\n", 57 dprintk("%s: device id= [%x%x%x%x]\n", __func__, 59 p[0], p[1], p[2], p[3]); 58 p[0], p[1], p[2], p[3]); 60 } 59 } 61 EXPORT_SYMBOL_GPL(nfs4_print_deviceid); 60 EXPORT_SYMBOL_GPL(nfs4_print_deviceid); 62 #endif 61 #endif 63 62 64 static inline u32 63 static inline u32 65 nfs4_deviceid_hash(const struct nfs4_deviceid 64 nfs4_deviceid_hash(const struct nfs4_deviceid *id) 66 { 65 { 67 unsigned char *cptr = (unsigned char * 66 unsigned char *cptr = (unsigned char *)id->data; 68 unsigned int nbytes = NFS4_DEVICEID4_S 67 unsigned int nbytes = NFS4_DEVICEID4_SIZE; 69 u32 x = 0; 68 u32 x = 0; 70 69 71 while (nbytes--) { 70 while (nbytes--) { 72 x *= 37; 71 x *= 37; 73 x += *cptr++; 72 x += *cptr++; 74 } 73 } 75 return x & NFS4_DEVICE_ID_HASH_MASK; 74 return x & NFS4_DEVICE_ID_HASH_MASK; 76 } 75 } 77 76 78 static struct nfs4_deviceid_node * 77 static struct nfs4_deviceid_node * 79 _lookup_deviceid(const struct pnfs_layoutdrive 78 _lookup_deviceid(const struct pnfs_layoutdriver_type *ld, 80 const struct nfs_client *clp, 79 const struct nfs_client *clp, const struct nfs4_deviceid *id, 81 long hash) 80 long hash) 82 { 81 { 83 struct nfs4_deviceid_node *d; 82 struct nfs4_deviceid_node *d; 84 83 85 hlist_for_each_entry_rcu(d, &nfs4_devi 84 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node) 86 if (d->ld == ld && d->nfs_clie 85 if (d->ld == ld && d->nfs_client == clp && 87 !memcmp(&d->deviceid, id, 86 !memcmp(&d->deviceid, id, sizeof(*id))) { 88 if (atomic_read(&d->re 87 if (atomic_read(&d->ref)) 89 return d; 88 return d; 90 else 89 else 91 continue; 90 continue; 92 } 91 } 93 return NULL; 92 return NULL; 94 } 93 } 95 94 96 static struct nfs4_deviceid_node * 95 static struct nfs4_deviceid_node * 97 nfs4_get_device_info(struct nfs_server *server 96 nfs4_get_device_info(struct nfs_server *server, 98 const struct nfs4_deviceid *de 97 const struct nfs4_deviceid *dev_id, 99 const struct cred *cred, gfp_t !! 98 struct rpc_cred *cred, gfp_t gfp_flags) 100 { 99 { 101 struct nfs4_deviceid_node *d = NULL; 100 struct nfs4_deviceid_node *d = NULL; 102 struct pnfs_device *pdev = NULL; 101 struct pnfs_device *pdev = NULL; 103 struct page **pages = NULL; 102 struct page **pages = NULL; 104 u32 max_resp_sz; 103 u32 max_resp_sz; 105 int max_pages; 104 int max_pages; 106 int rc, i; 105 int rc, i; 107 106 108 /* 107 /* 109 * Use the session max response size a 108 * Use the session max response size as the basis for setting 110 * GETDEVICEINFO's maxcount 109 * GETDEVICEINFO's maxcount 111 */ 110 */ 112 max_resp_sz = server->nfs_client->cl_s 111 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; >> 112 if (server->pnfs_curr_ld->max_deviceinfo_size && >> 113 server->pnfs_curr_ld->max_deviceinfo_size < max_resp_sz) >> 114 max_resp_sz = server->pnfs_curr_ld->max_deviceinfo_size; 113 max_pages = nfs_page_array_len(0, max_ 115 max_pages = nfs_page_array_len(0, max_resp_sz); 114 dprintk("%s: server %p max_resp_sz %u 116 dprintk("%s: server %p max_resp_sz %u max_pages %d\n", 115 __func__, server, max_resp_sz, 117 __func__, server, max_resp_sz, max_pages); 116 118 117 pdev = kzalloc(sizeof(*pdev), gfp_flag 119 pdev = kzalloc(sizeof(*pdev), gfp_flags); 118 if (!pdev) 120 if (!pdev) 119 return NULL; 121 return NULL; 120 122 121 pages = kcalloc(max_pages, sizeof(stru 123 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); 122 if (!pages) 124 if (!pages) 123 goto out_free_pdev; 125 goto out_free_pdev; 124 126 125 for (i = 0; i < max_pages; i++) { 127 for (i = 0; i < max_pages; i++) { 126 pages[i] = alloc_page(gfp_flag 128 pages[i] = alloc_page(gfp_flags); 127 if (!pages[i]) 129 if (!pages[i]) 128 goto out_free_pages; 130 goto out_free_pages; 129 } 131 } 130 132 131 memcpy(&pdev->dev_id, dev_id, sizeof(* 133 memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id)); 132 pdev->layout_type = server->pnfs_curr_ 134 pdev->layout_type = server->pnfs_curr_ld->id; 133 pdev->pages = pages; 135 pdev->pages = pages; 134 pdev->pgbase = 0; 136 pdev->pgbase = 0; 135 pdev->pglen = max_resp_sz; 137 pdev->pglen = max_resp_sz; 136 pdev->mincount = 0; 138 pdev->mincount = 0; 137 pdev->maxcount = max_resp_sz - nfs41_m 139 pdev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead; 138 140 139 rc = nfs4_proc_getdeviceinfo(server, p 141 rc = nfs4_proc_getdeviceinfo(server, pdev, cred); 140 dprintk("%s getdevice info returns %d\ 142 dprintk("%s getdevice info returns %d\n", __func__, rc); 141 if (rc) 143 if (rc) 142 goto out_free_pages; 144 goto out_free_pages; 143 145 144 /* 146 /* 145 * Found new device, need to decode it 147 * Found new device, need to decode it and then add it to the 146 * list of known devices for this moun 148 * list of known devices for this mountpoint. 147 */ 149 */ 148 d = server->pnfs_curr_ld->alloc_device 150 d = server->pnfs_curr_ld->alloc_deviceid_node(server, pdev, 149 gfp_flags); 151 gfp_flags); 150 if (d && pdev->nocache) 152 if (d && pdev->nocache) 151 set_bit(NFS_DEVICEID_NOCACHE, 153 set_bit(NFS_DEVICEID_NOCACHE, &d->flags); 152 154 153 out_free_pages: 155 out_free_pages: 154 while (--i >= 0) !! 156 for (i = 0; i < max_pages; i++) 155 __free_page(pages[i]); 157 __free_page(pages[i]); 156 kfree(pages); 158 kfree(pages); 157 out_free_pdev: 159 out_free_pdev: 158 kfree(pdev); 160 kfree(pdev); 159 dprintk("<-- %s d %p\n", __func__, d); 161 dprintk("<-- %s d %p\n", __func__, d); 160 return d; 162 return d; 161 } 163 } 162 164 163 /* 165 /* 164 * Lookup a deviceid in cache and get a refere 166 * Lookup a deviceid in cache and get a reference count on it if found 165 * 167 * 166 * @clp nfs_client associated with deviceid 168 * @clp nfs_client associated with deviceid 167 * @id deviceid to look up 169 * @id deviceid to look up 168 */ 170 */ 169 static struct nfs4_deviceid_node * 171 static struct nfs4_deviceid_node * 170 __nfs4_find_get_deviceid(struct nfs_server *se 172 __nfs4_find_get_deviceid(struct nfs_server *server, 171 const struct nfs4_deviceid *id 173 const struct nfs4_deviceid *id, long hash) 172 { 174 { 173 struct nfs4_deviceid_node *d; 175 struct nfs4_deviceid_node *d; 174 176 175 rcu_read_lock(); 177 rcu_read_lock(); 176 d = _lookup_deviceid(server->pnfs_curr 178 d = _lookup_deviceid(server->pnfs_curr_ld, server->nfs_client, id, 177 hash); 179 hash); 178 if (d != NULL && !atomic_inc_not_zero( 180 if (d != NULL && !atomic_inc_not_zero(&d->ref)) 179 d = NULL; 181 d = NULL; 180 rcu_read_unlock(); 182 rcu_read_unlock(); 181 return d; 183 return d; 182 } 184 } 183 185 184 struct nfs4_deviceid_node * 186 struct nfs4_deviceid_node * 185 nfs4_find_get_deviceid(struct nfs_server *serv 187 nfs4_find_get_deviceid(struct nfs_server *server, 186 const struct nfs4_deviceid *id !! 188 const struct nfs4_deviceid *id, struct rpc_cred *cred, 187 gfp_t gfp_mask) 189 gfp_t gfp_mask) 188 { 190 { 189 long hash = nfs4_deviceid_hash(id); 191 long hash = nfs4_deviceid_hash(id); 190 struct nfs4_deviceid_node *d, *new; 192 struct nfs4_deviceid_node *d, *new; 191 193 192 d = __nfs4_find_get_deviceid(server, i 194 d = __nfs4_find_get_deviceid(server, id, hash); 193 if (d) 195 if (d) 194 goto found; !! 196 return d; 195 197 196 new = nfs4_get_device_info(server, id, 198 new = nfs4_get_device_info(server, id, cred, gfp_mask); 197 if (!new) { !! 199 if (!new) 198 trace_nfs4_find_deviceid(serve << 199 return new; 200 return new; 200 } << 201 201 202 spin_lock(&nfs4_deviceid_lock); 202 spin_lock(&nfs4_deviceid_lock); 203 d = __nfs4_find_get_deviceid(server, i 203 d = __nfs4_find_get_deviceid(server, id, hash); 204 if (d) { 204 if (d) { 205 spin_unlock(&nfs4_deviceid_loc 205 spin_unlock(&nfs4_deviceid_lock); 206 server->pnfs_curr_ld->free_dev 206 server->pnfs_curr_ld->free_deviceid_node(new); 207 } else { !! 207 return d; 208 atomic_inc(&new->ref); << 209 hlist_add_head_rcu(&new->node, << 210 spin_unlock(&nfs4_deviceid_loc << 211 d = new; << 212 } 208 } 213 found: !! 209 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]); 214 trace_nfs4_find_deviceid(server, id, 0 !! 210 atomic_inc(&new->ref); 215 return d; !! 211 spin_unlock(&nfs4_deviceid_lock); >> 212 >> 213 return new; 216 } 214 } 217 EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid); 215 EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid); 218 216 219 /* 217 /* 220 * Remove a deviceid from cache 218 * Remove a deviceid from cache 221 * 219 * 222 * @clp nfs_client associated with deviceid 220 * @clp nfs_client associated with deviceid 223 * @id the deviceid to unhash 221 * @id the deviceid to unhash 224 * 222 * 225 * @ret the unhashed node, if found and derefe 223 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise. 226 */ 224 */ 227 void 225 void 228 nfs4_delete_deviceid(const struct pnfs_layoutd 226 nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld, 229 const struct nfs_clie 227 const struct nfs_client *clp, const struct nfs4_deviceid *id) 230 { 228 { 231 struct nfs4_deviceid_node *d; 229 struct nfs4_deviceid_node *d; 232 230 233 spin_lock(&nfs4_deviceid_lock); 231 spin_lock(&nfs4_deviceid_lock); 234 rcu_read_lock(); 232 rcu_read_lock(); 235 d = _lookup_deviceid(ld, clp, id, nfs4 233 d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id)); 236 rcu_read_unlock(); 234 rcu_read_unlock(); 237 if (!d) { 235 if (!d) { 238 spin_unlock(&nfs4_deviceid_loc 236 spin_unlock(&nfs4_deviceid_lock); 239 return; 237 return; 240 } 238 } 241 hlist_del_init_rcu(&d->node); 239 hlist_del_init_rcu(&d->node); 242 clear_bit(NFS_DEVICEID_NOCACHE, &d->fl 240 clear_bit(NFS_DEVICEID_NOCACHE, &d->flags); 243 spin_unlock(&nfs4_deviceid_lock); 241 spin_unlock(&nfs4_deviceid_lock); 244 242 245 /* balance the initial ref set in pnfs 243 /* balance the initial ref set in pnfs_insert_deviceid */ 246 nfs4_put_deviceid_node(d); 244 nfs4_put_deviceid_node(d); 247 } 245 } 248 EXPORT_SYMBOL_GPL(nfs4_delete_deviceid); 246 EXPORT_SYMBOL_GPL(nfs4_delete_deviceid); 249 247 250 void 248 void 251 nfs4_init_deviceid_node(struct nfs4_deviceid_n 249 nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, struct nfs_server *server, 252 const struct nfs4_devi 250 const struct nfs4_deviceid *id) 253 { 251 { 254 INIT_HLIST_NODE(&d->node); 252 INIT_HLIST_NODE(&d->node); 255 INIT_HLIST_NODE(&d->tmpnode); 253 INIT_HLIST_NODE(&d->tmpnode); 256 d->ld = server->pnfs_curr_ld; 254 d->ld = server->pnfs_curr_ld; 257 d->nfs_client = server->nfs_client; 255 d->nfs_client = server->nfs_client; 258 d->flags = 0; 256 d->flags = 0; 259 d->deviceid = *id; 257 d->deviceid = *id; 260 atomic_set(&d->ref, 1); 258 atomic_set(&d->ref, 1); 261 } 259 } 262 EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node); 260 EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node); 263 261 264 /* 262 /* 265 * Dereference a deviceid node and delete it w 263 * Dereference a deviceid node and delete it when its reference count drops 266 * to zero. 264 * to zero. 267 * 265 * 268 * @d deviceid node to put 266 * @d deviceid node to put 269 * 267 * 270 * return true iff the node was deleted 268 * return true iff the node was deleted 271 * Note that since the test for d->ref == 0 is 269 * Note that since the test for d->ref == 0 is sufficient to establish 272 * that the node is no longer hashed in the gl 270 * that the node is no longer hashed in the global device id cache. 273 */ 271 */ 274 bool 272 bool 275 nfs4_put_deviceid_node(struct nfs4_deviceid_no 273 nfs4_put_deviceid_node(struct nfs4_deviceid_node *d) 276 { 274 { 277 if (test_bit(NFS_DEVICEID_NOCACHE, &d- 275 if (test_bit(NFS_DEVICEID_NOCACHE, &d->flags)) { 278 if (atomic_add_unless(&d->ref, 276 if (atomic_add_unless(&d->ref, -1, 2)) 279 return false; 277 return false; 280 nfs4_delete_deviceid(d->ld, d- 278 nfs4_delete_deviceid(d->ld, d->nfs_client, &d->deviceid); 281 } 279 } 282 if (!atomic_dec_and_test(&d->ref)) 280 if (!atomic_dec_and_test(&d->ref)) 283 return false; 281 return false; 284 trace_nfs4_deviceid_free(d->nfs_client << 285 d->ld->free_deviceid_node(d); 282 d->ld->free_deviceid_node(d); 286 return true; 283 return true; 287 } 284 } 288 EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node); 285 EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node); 289 286 290 void 287 void 291 nfs4_mark_deviceid_available(struct nfs4_devic << 292 { << 293 if (test_bit(NFS_DEVICEID_UNAVAILABLE, << 294 clear_bit(NFS_DEVICEID_UNAVAIL << 295 smp_mb__after_atomic(); << 296 } << 297 } << 298 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_available << 299 << 300 void << 301 nfs4_mark_deviceid_unavailable(struct nfs4_dev 288 nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node) 302 { 289 { 303 node->timestamp_unavailable = jiffies; 290 node->timestamp_unavailable = jiffies; 304 smp_mb__before_atomic(); << 305 set_bit(NFS_DEVICEID_UNAVAILABLE, &nod 291 set_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags); 306 smp_mb__after_atomic(); << 307 } 292 } 308 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailab 293 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailable); 309 294 310 bool 295 bool 311 nfs4_test_deviceid_unavailable(struct nfs4_dev 296 nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node) 312 { 297 { 313 if (test_bit(NFS_DEVICEID_UNAVAILABLE, 298 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) { 314 unsigned long start, end; 299 unsigned long start, end; 315 300 316 end = jiffies; 301 end = jiffies; 317 start = end - PNFS_DEVICE_RETR 302 start = end - PNFS_DEVICE_RETRY_TIMEOUT; 318 if (time_in_range(node->timest 303 if (time_in_range(node->timestamp_unavailable, start, end)) 319 return true; 304 return true; 320 clear_bit(NFS_DEVICEID_UNAVAIL 305 clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags); 321 smp_mb__after_atomic(); << 322 } 306 } 323 return false; 307 return false; 324 } 308 } 325 EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailab 309 EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailable); 326 310 327 static void 311 static void 328 _deviceid_purge_client(const struct nfs_client 312 _deviceid_purge_client(const struct nfs_client *clp, long hash) 329 { 313 { 330 struct nfs4_deviceid_node *d; 314 struct nfs4_deviceid_node *d; 331 HLIST_HEAD(tmp); 315 HLIST_HEAD(tmp); 332 316 333 spin_lock(&nfs4_deviceid_lock); 317 spin_lock(&nfs4_deviceid_lock); 334 rcu_read_lock(); 318 rcu_read_lock(); 335 hlist_for_each_entry_rcu(d, &nfs4_devi 319 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node) 336 if (d->nfs_client == clp && at 320 if (d->nfs_client == clp && atomic_read(&d->ref)) { 337 hlist_del_init_rcu(&d- 321 hlist_del_init_rcu(&d->node); 338 hlist_add_head(&d->tmp 322 hlist_add_head(&d->tmpnode, &tmp); 339 clear_bit(NFS_DEVICEID 323 clear_bit(NFS_DEVICEID_NOCACHE, &d->flags); 340 } 324 } 341 rcu_read_unlock(); 325 rcu_read_unlock(); 342 spin_unlock(&nfs4_deviceid_lock); 326 spin_unlock(&nfs4_deviceid_lock); 343 327 344 if (hlist_empty(&tmp)) 328 if (hlist_empty(&tmp)) 345 return; 329 return; 346 330 347 while (!hlist_empty(&tmp)) { 331 while (!hlist_empty(&tmp)) { 348 d = hlist_entry(tmp.first, str 332 d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode); 349 hlist_del(&d->tmpnode); 333 hlist_del(&d->tmpnode); 350 nfs4_put_deviceid_node(d); 334 nfs4_put_deviceid_node(d); 351 } 335 } 352 } 336 } 353 337 354 void 338 void 355 nfs4_deviceid_purge_client(const struct nfs_cl 339 nfs4_deviceid_purge_client(const struct nfs_client *clp) 356 { 340 { 357 long h; 341 long h; 358 342 359 if (!(clp->cl_exchange_flags & EXCHGID 343 if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS)) 360 return; 344 return; 361 for (h = 0; h < NFS4_DEVICE_ID_HASH_SI 345 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++) 362 _deviceid_purge_client(clp, h) 346 _deviceid_purge_client(clp, h); 363 } 347 } 364 348 365 /* 349 /* 366 * Stop use of all deviceids associated with a 350 * Stop use of all deviceids associated with an nfs_client 367 */ 351 */ 368 void 352 void 369 nfs4_deviceid_mark_client_invalid(struct nfs_c 353 nfs4_deviceid_mark_client_invalid(struct nfs_client *clp) 370 { 354 { 371 struct nfs4_deviceid_node *d; 355 struct nfs4_deviceid_node *d; 372 int i; 356 int i; 373 357 374 rcu_read_lock(); 358 rcu_read_lock(); 375 for (i = 0; i < NFS4_DEVICE_ID_HASH_SI 359 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){ 376 hlist_for_each_entry_rcu(d, &n 360 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node) 377 if (d->nfs_client == c 361 if (d->nfs_client == clp) 378 set_bit(NFS_DE 362 set_bit(NFS_DEVICEID_INVALID, &d->flags); 379 } 363 } 380 rcu_read_unlock(); 364 rcu_read_unlock(); 381 } 365 } 382 366
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.