1 /* 1 /* 2 * Device operations for the pnfs client. 2 * Device operations for the pnfs client. 3 * 3 * 4 * Copyright (c) 2002 4 * Copyright (c) 2002 5 * The Regents of the University of Michigan 5 * The Regents of the University of Michigan 6 * All Rights Reserved 6 * All Rights Reserved 7 * 7 * 8 * Dean Hildebrand <dhildebz@umich.edu> 8 * Dean Hildebrand <dhildebz@umich.edu> 9 * Garth Goodson <Garth.Goodson@netapp.com> 9 * Garth Goodson <Garth.Goodson@netapp.com> 10 * 10 * 11 * Permission is granted to use, copy, create 11 * Permission is granted to use, copy, create derivative works, and 12 * redistribute this software and such deriva 12 * redistribute this software and such derivative works for any purpose, 13 * so long as the name of the University of M 13 * so long as the name of the University of Michigan is not used in 14 * any advertising or publicity pertaining to 14 * any advertising or publicity pertaining to the use or distribution 15 * of this software without specific, written 15 * of this software without specific, written prior authorization. If 16 * the above copyright notice or any other id 16 * the above copyright notice or any other identification of the 17 * University of Michigan is included in any 17 * University of Michigan is included in any copy of any portion of 18 * this software, then the disclaimer below m 18 * this software, then the disclaimer below must also be included. 19 * 19 * 20 * This software is provided as is, without r 20 * This software is provided as is, without representation or warranty 21 * of any kind either express or implied, inc 21 * of any kind either express or implied, including without limitation 22 * the implied warranties of merchantability, 22 * the implied warranties of merchantability, fitness for a particular 23 * purpose, or noninfringement. The Regents 23 * purpose, or noninfringement. The Regents of the University of 24 * Michigan shall not be liable for any damag 24 * Michigan shall not be liable for any damages, including special, 25 * indirect, incidental, or consequential dam 25 * indirect, incidental, or consequential damages, with respect to any 26 * claim arising out of or in connection with 26 * claim arising out of or in connection with the use of the software, 27 * even if it has been or is hereafter advise 27 * even if it has been or is hereafter advised of the possibility of 28 * such damages. 28 * such damages. 29 */ 29 */ 30 30 31 #include <linux/export.h> 31 #include <linux/export.h> 32 #include <linux/nfs_fs.h> 32 #include <linux/nfs_fs.h> 33 #include "nfs4session.h" 33 #include "nfs4session.h" 34 #include "internal.h" 34 #include "internal.h" 35 #include "pnfs.h" 35 #include "pnfs.h" 36 36 37 #include "nfs4trace.h" 37 #include "nfs4trace.h" 38 38 39 #define NFSDBG_FACILITY NFSDBG_PNFS 39 #define NFSDBG_FACILITY NFSDBG_PNFS 40 40 41 /* 41 /* 42 * Device ID RCU cache. A device ID is unique 42 * Device ID RCU cache. A device ID is unique per server and layout type. 43 */ 43 */ 44 #define NFS4_DEVICE_ID_HASH_BITS 5 44 #define NFS4_DEVICE_ID_HASH_BITS 5 45 #define NFS4_DEVICE_ID_HASH_SIZE (1 << 45 #define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS) 46 #define NFS4_DEVICE_ID_HASH_MASK (NFS4_ 46 #define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1) 47 47 48 48 49 static struct hlist_head nfs4_deviceid_cache[N 49 static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE]; 50 static DEFINE_SPINLOCK(nfs4_deviceid_lock); 50 static DEFINE_SPINLOCK(nfs4_deviceid_lock); 51 51 52 #ifdef NFS_DEBUG 52 #ifdef NFS_DEBUG 53 void 53 void 54 nfs4_print_deviceid(const struct nfs4_deviceid 54 nfs4_print_deviceid(const struct nfs4_deviceid *id) 55 { 55 { 56 u32 *p = (u32 *)id; 56 u32 *p = (u32 *)id; 57 57 58 dprintk("%s: device id= [%x%x%x%x]\n", 58 dprintk("%s: device id= [%x%x%x%x]\n", __func__, 59 p[0], p[1], p[2], p[3]); 59 p[0], p[1], p[2], p[3]); 60 } 60 } 61 EXPORT_SYMBOL_GPL(nfs4_print_deviceid); 61 EXPORT_SYMBOL_GPL(nfs4_print_deviceid); 62 #endif 62 #endif 63 63 64 static inline u32 64 static inline u32 65 nfs4_deviceid_hash(const struct nfs4_deviceid 65 nfs4_deviceid_hash(const struct nfs4_deviceid *id) 66 { 66 { 67 unsigned char *cptr = (unsigned char * 67 unsigned char *cptr = (unsigned char *)id->data; 68 unsigned int nbytes = NFS4_DEVICEID4_S 68 unsigned int nbytes = NFS4_DEVICEID4_SIZE; 69 u32 x = 0; 69 u32 x = 0; 70 70 71 while (nbytes--) { 71 while (nbytes--) { 72 x *= 37; 72 x *= 37; 73 x += *cptr++; 73 x += *cptr++; 74 } 74 } 75 return x & NFS4_DEVICE_ID_HASH_MASK; 75 return x & NFS4_DEVICE_ID_HASH_MASK; 76 } 76 } 77 77 78 static struct nfs4_deviceid_node * 78 static struct nfs4_deviceid_node * 79 _lookup_deviceid(const struct pnfs_layoutdrive 79 _lookup_deviceid(const struct pnfs_layoutdriver_type *ld, 80 const struct nfs_client *clp, 80 const struct nfs_client *clp, const struct nfs4_deviceid *id, 81 long hash) 81 long hash) 82 { 82 { 83 struct nfs4_deviceid_node *d; 83 struct nfs4_deviceid_node *d; 84 84 85 hlist_for_each_entry_rcu(d, &nfs4_devi 85 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node) 86 if (d->ld == ld && d->nfs_clie 86 if (d->ld == ld && d->nfs_client == clp && 87 !memcmp(&d->deviceid, id, 87 !memcmp(&d->deviceid, id, sizeof(*id))) { 88 if (atomic_read(&d->re 88 if (atomic_read(&d->ref)) 89 return d; 89 return d; 90 else 90 else 91 continue; 91 continue; 92 } 92 } 93 return NULL; 93 return NULL; 94 } 94 } 95 95 96 static struct nfs4_deviceid_node * 96 static struct nfs4_deviceid_node * 97 nfs4_get_device_info(struct nfs_server *server 97 nfs4_get_device_info(struct nfs_server *server, 98 const struct nfs4_deviceid *de 98 const struct nfs4_deviceid *dev_id, 99 const struct cred *cred, gfp_t 99 const struct cred *cred, gfp_t gfp_flags) 100 { 100 { 101 struct nfs4_deviceid_node *d = NULL; 101 struct nfs4_deviceid_node *d = NULL; 102 struct pnfs_device *pdev = NULL; 102 struct pnfs_device *pdev = NULL; 103 struct page **pages = NULL; 103 struct page **pages = NULL; 104 u32 max_resp_sz; 104 u32 max_resp_sz; 105 int max_pages; 105 int max_pages; 106 int rc, i; 106 int rc, i; 107 107 108 /* 108 /* 109 * Use the session max response size a 109 * Use the session max response size as the basis for setting 110 * GETDEVICEINFO's maxcount 110 * GETDEVICEINFO's maxcount 111 */ 111 */ 112 max_resp_sz = server->nfs_client->cl_s 112 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 113 max_pages = nfs_page_array_len(0, max_ 113 max_pages = nfs_page_array_len(0, max_resp_sz); 114 dprintk("%s: server %p max_resp_sz %u 114 dprintk("%s: server %p max_resp_sz %u max_pages %d\n", 115 __func__, server, max_resp_sz, 115 __func__, server, max_resp_sz, max_pages); 116 116 117 pdev = kzalloc(sizeof(*pdev), gfp_flag 117 pdev = kzalloc(sizeof(*pdev), gfp_flags); 118 if (!pdev) 118 if (!pdev) 119 return NULL; 119 return NULL; 120 120 121 pages = kcalloc(max_pages, sizeof(stru 121 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); 122 if (!pages) 122 if (!pages) 123 goto out_free_pdev; 123 goto out_free_pdev; 124 124 125 for (i = 0; i < max_pages; i++) { 125 for (i = 0; i < max_pages; i++) { 126 pages[i] = alloc_page(gfp_flag 126 pages[i] = alloc_page(gfp_flags); 127 if (!pages[i]) 127 if (!pages[i]) 128 goto out_free_pages; 128 goto out_free_pages; 129 } 129 } 130 130 131 memcpy(&pdev->dev_id, dev_id, sizeof(* 131 memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id)); 132 pdev->layout_type = server->pnfs_curr_ 132 pdev->layout_type = server->pnfs_curr_ld->id; 133 pdev->pages = pages; 133 pdev->pages = pages; 134 pdev->pgbase = 0; 134 pdev->pgbase = 0; 135 pdev->pglen = max_resp_sz; 135 pdev->pglen = max_resp_sz; 136 pdev->mincount = 0; 136 pdev->mincount = 0; 137 pdev->maxcount = max_resp_sz - nfs41_m 137 pdev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead; 138 138 139 rc = nfs4_proc_getdeviceinfo(server, p 139 rc = nfs4_proc_getdeviceinfo(server, pdev, cred); 140 dprintk("%s getdevice info returns %d\ 140 dprintk("%s getdevice info returns %d\n", __func__, rc); 141 if (rc) 141 if (rc) 142 goto out_free_pages; 142 goto out_free_pages; 143 143 144 /* 144 /* 145 * Found new device, need to decode it 145 * Found new device, need to decode it and then add it to the 146 * list of known devices for this moun 146 * list of known devices for this mountpoint. 147 */ 147 */ 148 d = server->pnfs_curr_ld->alloc_device 148 d = server->pnfs_curr_ld->alloc_deviceid_node(server, pdev, 149 gfp_flags); 149 gfp_flags); 150 if (d && pdev->nocache) 150 if (d && pdev->nocache) 151 set_bit(NFS_DEVICEID_NOCACHE, 151 set_bit(NFS_DEVICEID_NOCACHE, &d->flags); 152 152 153 out_free_pages: 153 out_free_pages: 154 while (--i >= 0) 154 while (--i >= 0) 155 __free_page(pages[i]); 155 __free_page(pages[i]); 156 kfree(pages); 156 kfree(pages); 157 out_free_pdev: 157 out_free_pdev: 158 kfree(pdev); 158 kfree(pdev); 159 dprintk("<-- %s d %p\n", __func__, d); 159 dprintk("<-- %s d %p\n", __func__, d); 160 return d; 160 return d; 161 } 161 } 162 162 163 /* 163 /* 164 * Lookup a deviceid in cache and get a refere 164 * Lookup a deviceid in cache and get a reference count on it if found 165 * 165 * 166 * @clp nfs_client associated with deviceid 166 * @clp nfs_client associated with deviceid 167 * @id deviceid to look up 167 * @id deviceid to look up 168 */ 168 */ 169 static struct nfs4_deviceid_node * 169 static struct nfs4_deviceid_node * 170 __nfs4_find_get_deviceid(struct nfs_server *se 170 __nfs4_find_get_deviceid(struct nfs_server *server, 171 const struct nfs4_deviceid *id 171 const struct nfs4_deviceid *id, long hash) 172 { 172 { 173 struct nfs4_deviceid_node *d; 173 struct nfs4_deviceid_node *d; 174 174 175 rcu_read_lock(); 175 rcu_read_lock(); 176 d = _lookup_deviceid(server->pnfs_curr 176 d = _lookup_deviceid(server->pnfs_curr_ld, server->nfs_client, id, 177 hash); 177 hash); 178 if (d != NULL && !atomic_inc_not_zero( 178 if (d != NULL && !atomic_inc_not_zero(&d->ref)) 179 d = NULL; 179 d = NULL; 180 rcu_read_unlock(); 180 rcu_read_unlock(); 181 return d; 181 return d; 182 } 182 } 183 183 184 struct nfs4_deviceid_node * 184 struct nfs4_deviceid_node * 185 nfs4_find_get_deviceid(struct nfs_server *serv 185 nfs4_find_get_deviceid(struct nfs_server *server, 186 const struct nfs4_deviceid *id 186 const struct nfs4_deviceid *id, const struct cred *cred, 187 gfp_t gfp_mask) 187 gfp_t gfp_mask) 188 { 188 { 189 long hash = nfs4_deviceid_hash(id); 189 long hash = nfs4_deviceid_hash(id); 190 struct nfs4_deviceid_node *d, *new; 190 struct nfs4_deviceid_node *d, *new; 191 191 192 d = __nfs4_find_get_deviceid(server, i 192 d = __nfs4_find_get_deviceid(server, id, hash); 193 if (d) 193 if (d) 194 goto found; 194 goto found; 195 195 196 new = nfs4_get_device_info(server, id, 196 new = nfs4_get_device_info(server, id, cred, gfp_mask); 197 if (!new) { 197 if (!new) { 198 trace_nfs4_find_deviceid(serve 198 trace_nfs4_find_deviceid(server, id, -ENOENT); 199 return new; 199 return new; 200 } 200 } 201 201 202 spin_lock(&nfs4_deviceid_lock); 202 spin_lock(&nfs4_deviceid_lock); 203 d = __nfs4_find_get_deviceid(server, i 203 d = __nfs4_find_get_deviceid(server, id, hash); 204 if (d) { 204 if (d) { 205 spin_unlock(&nfs4_deviceid_loc 205 spin_unlock(&nfs4_deviceid_lock); 206 server->pnfs_curr_ld->free_dev 206 server->pnfs_curr_ld->free_deviceid_node(new); 207 } else { 207 } else { 208 atomic_inc(&new->ref); 208 atomic_inc(&new->ref); 209 hlist_add_head_rcu(&new->node, 209 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]); 210 spin_unlock(&nfs4_deviceid_loc 210 spin_unlock(&nfs4_deviceid_lock); 211 d = new; 211 d = new; 212 } 212 } 213 found: 213 found: 214 trace_nfs4_find_deviceid(server, id, 0 214 trace_nfs4_find_deviceid(server, id, 0); 215 return d; 215 return d; 216 } 216 } 217 EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid); 217 EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid); 218 218 219 /* 219 /* 220 * Remove a deviceid from cache 220 * Remove a deviceid from cache 221 * 221 * 222 * @clp nfs_client associated with deviceid 222 * @clp nfs_client associated with deviceid 223 * @id the deviceid to unhash 223 * @id the deviceid to unhash 224 * 224 * 225 * @ret the unhashed node, if found and derefe 225 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise. 226 */ 226 */ 227 void 227 void 228 nfs4_delete_deviceid(const struct pnfs_layoutd 228 nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld, 229 const struct nfs_clie 229 const struct nfs_client *clp, const struct nfs4_deviceid *id) 230 { 230 { 231 struct nfs4_deviceid_node *d; 231 struct nfs4_deviceid_node *d; 232 232 233 spin_lock(&nfs4_deviceid_lock); 233 spin_lock(&nfs4_deviceid_lock); 234 rcu_read_lock(); 234 rcu_read_lock(); 235 d = _lookup_deviceid(ld, clp, id, nfs4 235 d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id)); 236 rcu_read_unlock(); 236 rcu_read_unlock(); 237 if (!d) { 237 if (!d) { 238 spin_unlock(&nfs4_deviceid_loc 238 spin_unlock(&nfs4_deviceid_lock); 239 return; 239 return; 240 } 240 } 241 hlist_del_init_rcu(&d->node); 241 hlist_del_init_rcu(&d->node); 242 clear_bit(NFS_DEVICEID_NOCACHE, &d->fl 242 clear_bit(NFS_DEVICEID_NOCACHE, &d->flags); 243 spin_unlock(&nfs4_deviceid_lock); 243 spin_unlock(&nfs4_deviceid_lock); 244 244 245 /* balance the initial ref set in pnfs 245 /* balance the initial ref set in pnfs_insert_deviceid */ 246 nfs4_put_deviceid_node(d); 246 nfs4_put_deviceid_node(d); 247 } 247 } 248 EXPORT_SYMBOL_GPL(nfs4_delete_deviceid); 248 EXPORT_SYMBOL_GPL(nfs4_delete_deviceid); 249 249 250 void 250 void 251 nfs4_init_deviceid_node(struct nfs4_deviceid_n 251 nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, struct nfs_server *server, 252 const struct nfs4_devi 252 const struct nfs4_deviceid *id) 253 { 253 { 254 INIT_HLIST_NODE(&d->node); 254 INIT_HLIST_NODE(&d->node); 255 INIT_HLIST_NODE(&d->tmpnode); 255 INIT_HLIST_NODE(&d->tmpnode); 256 d->ld = server->pnfs_curr_ld; 256 d->ld = server->pnfs_curr_ld; 257 d->nfs_client = server->nfs_client; 257 d->nfs_client = server->nfs_client; 258 d->flags = 0; 258 d->flags = 0; 259 d->deviceid = *id; 259 d->deviceid = *id; 260 atomic_set(&d->ref, 1); 260 atomic_set(&d->ref, 1); 261 } 261 } 262 EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node); 262 EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node); 263 263 264 /* 264 /* 265 * Dereference a deviceid node and delete it w 265 * Dereference a deviceid node and delete it when its reference count drops 266 * to zero. 266 * to zero. 267 * 267 * 268 * @d deviceid node to put 268 * @d deviceid node to put 269 * 269 * 270 * return true iff the node was deleted 270 * return true iff the node was deleted 271 * Note that since the test for d->ref == 0 is 271 * Note that since the test for d->ref == 0 is sufficient to establish 272 * that the node is no longer hashed in the gl 272 * that the node is no longer hashed in the global device id cache. 273 */ 273 */ 274 bool 274 bool 275 nfs4_put_deviceid_node(struct nfs4_deviceid_no 275 nfs4_put_deviceid_node(struct nfs4_deviceid_node *d) 276 { 276 { 277 if (test_bit(NFS_DEVICEID_NOCACHE, &d- 277 if (test_bit(NFS_DEVICEID_NOCACHE, &d->flags)) { 278 if (atomic_add_unless(&d->ref, 278 if (atomic_add_unless(&d->ref, -1, 2)) 279 return false; 279 return false; 280 nfs4_delete_deviceid(d->ld, d- 280 nfs4_delete_deviceid(d->ld, d->nfs_client, &d->deviceid); 281 } 281 } 282 if (!atomic_dec_and_test(&d->ref)) 282 if (!atomic_dec_and_test(&d->ref)) 283 return false; 283 return false; 284 trace_nfs4_deviceid_free(d->nfs_client 284 trace_nfs4_deviceid_free(d->nfs_client, &d->deviceid); 285 d->ld->free_deviceid_node(d); 285 d->ld->free_deviceid_node(d); 286 return true; 286 return true; 287 } 287 } 288 EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node); 288 EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node); 289 289 290 void 290 void 291 nfs4_mark_deviceid_available(struct nfs4_devic 291 nfs4_mark_deviceid_available(struct nfs4_deviceid_node *node) 292 { 292 { 293 if (test_bit(NFS_DEVICEID_UNAVAILABLE, 293 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) { 294 clear_bit(NFS_DEVICEID_UNAVAIL 294 clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags); 295 smp_mb__after_atomic(); 295 smp_mb__after_atomic(); 296 } 296 } 297 } 297 } 298 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_available 298 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_available); 299 299 300 void 300 void 301 nfs4_mark_deviceid_unavailable(struct nfs4_dev 301 nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node) 302 { 302 { 303 node->timestamp_unavailable = jiffies; 303 node->timestamp_unavailable = jiffies; 304 smp_mb__before_atomic(); 304 smp_mb__before_atomic(); 305 set_bit(NFS_DEVICEID_UNAVAILABLE, &nod 305 set_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags); 306 smp_mb__after_atomic(); 306 smp_mb__after_atomic(); 307 } 307 } 308 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailab 308 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailable); 309 309 310 bool 310 bool 311 nfs4_test_deviceid_unavailable(struct nfs4_dev 311 nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node) 312 { 312 { 313 if (test_bit(NFS_DEVICEID_UNAVAILABLE, 313 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) { 314 unsigned long start, end; 314 unsigned long start, end; 315 315 316 end = jiffies; 316 end = jiffies; 317 start = end - PNFS_DEVICE_RETR 317 start = end - PNFS_DEVICE_RETRY_TIMEOUT; 318 if (time_in_range(node->timest 318 if (time_in_range(node->timestamp_unavailable, start, end)) 319 return true; 319 return true; 320 clear_bit(NFS_DEVICEID_UNAVAIL 320 clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags); 321 smp_mb__after_atomic(); 321 smp_mb__after_atomic(); 322 } 322 } 323 return false; 323 return false; 324 } 324 } 325 EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailab 325 EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailable); 326 326 327 static void 327 static void 328 _deviceid_purge_client(const struct nfs_client 328 _deviceid_purge_client(const struct nfs_client *clp, long hash) 329 { 329 { 330 struct nfs4_deviceid_node *d; 330 struct nfs4_deviceid_node *d; 331 HLIST_HEAD(tmp); 331 HLIST_HEAD(tmp); 332 332 333 spin_lock(&nfs4_deviceid_lock); 333 spin_lock(&nfs4_deviceid_lock); 334 rcu_read_lock(); 334 rcu_read_lock(); 335 hlist_for_each_entry_rcu(d, &nfs4_devi 335 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node) 336 if (d->nfs_client == clp && at 336 if (d->nfs_client == clp && atomic_read(&d->ref)) { 337 hlist_del_init_rcu(&d- 337 hlist_del_init_rcu(&d->node); 338 hlist_add_head(&d->tmp 338 hlist_add_head(&d->tmpnode, &tmp); 339 clear_bit(NFS_DEVICEID 339 clear_bit(NFS_DEVICEID_NOCACHE, &d->flags); 340 } 340 } 341 rcu_read_unlock(); 341 rcu_read_unlock(); 342 spin_unlock(&nfs4_deviceid_lock); 342 spin_unlock(&nfs4_deviceid_lock); 343 343 344 if (hlist_empty(&tmp)) 344 if (hlist_empty(&tmp)) 345 return; 345 return; 346 346 347 while (!hlist_empty(&tmp)) { 347 while (!hlist_empty(&tmp)) { 348 d = hlist_entry(tmp.first, str 348 d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode); 349 hlist_del(&d->tmpnode); 349 hlist_del(&d->tmpnode); 350 nfs4_put_deviceid_node(d); 350 nfs4_put_deviceid_node(d); 351 } 351 } 352 } 352 } 353 353 354 void 354 void 355 nfs4_deviceid_purge_client(const struct nfs_cl 355 nfs4_deviceid_purge_client(const struct nfs_client *clp) 356 { 356 { 357 long h; 357 long h; 358 358 359 if (!(clp->cl_exchange_flags & EXCHGID 359 if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS)) 360 return; 360 return; 361 for (h = 0; h < NFS4_DEVICE_ID_HASH_SI 361 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++) 362 _deviceid_purge_client(clp, h) 362 _deviceid_purge_client(clp, h); 363 } 363 } 364 364 365 /* 365 /* 366 * Stop use of all deviceids associated with a 366 * Stop use of all deviceids associated with an nfs_client 367 */ 367 */ 368 void 368 void 369 nfs4_deviceid_mark_client_invalid(struct nfs_c 369 nfs4_deviceid_mark_client_invalid(struct nfs_client *clp) 370 { 370 { 371 struct nfs4_deviceid_node *d; 371 struct nfs4_deviceid_node *d; 372 int i; 372 int i; 373 373 374 rcu_read_lock(); 374 rcu_read_lock(); 375 for (i = 0; i < NFS4_DEVICE_ID_HASH_SI 375 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){ 376 hlist_for_each_entry_rcu(d, &n 376 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node) 377 if (d->nfs_client == c 377 if (d->nfs_client == clp) 378 set_bit(NFS_DE 378 set_bit(NFS_DEVICEID_INVALID, &d->flags); 379 } 379 } 380 rcu_read_unlock(); 380 rcu_read_unlock(); 381 } 381 } 382 382
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.