1 /* 1 /* 2 * Device operations for the pnfs client. 2 * Device operations for the pnfs client. 3 * 3 * 4 * Copyright (c) 2002 4 * Copyright (c) 2002 5 * The Regents of the University of Michigan 5 * The Regents of the University of Michigan 6 * All Rights Reserved 6 * All Rights Reserved 7 * 7 * 8 * Dean Hildebrand <dhildebz@umich.edu> 8 * Dean Hildebrand <dhildebz@umich.edu> 9 * Garth Goodson <Garth.Goodson@netapp.com> 9 * Garth Goodson <Garth.Goodson@netapp.com> 10 * 10 * 11 * Permission is granted to use, copy, create 11 * Permission is granted to use, copy, create derivative works, and 12 * redistribute this software and such deriva 12 * redistribute this software and such derivative works for any purpose, 13 * so long as the name of the University of M 13 * so long as the name of the University of Michigan is not used in 14 * any advertising or publicity pertaining to 14 * any advertising or publicity pertaining to the use or distribution 15 * of this software without specific, written 15 * of this software without specific, written prior authorization. If 16 * the above copyright notice or any other id 16 * the above copyright notice or any other identification of the 17 * University of Michigan is included in any 17 * University of Michigan is included in any copy of any portion of 18 * this software, then the disclaimer below m 18 * this software, then the disclaimer below must also be included. 19 * 19 * 20 * This software is provided as is, without r 20 * This software is provided as is, without representation or warranty 21 * of any kind either express or implied, inc 21 * of any kind either express or implied, including without limitation 22 * the implied warranties of merchantability, 22 * the implied warranties of merchantability, fitness for a particular 23 * purpose, or noninfringement. The Regents 23 * purpose, or noninfringement. The Regents of the University of 24 * Michigan shall not be liable for any damag 24 * Michigan shall not be liable for any damages, including special, 25 * indirect, incidental, or consequential dam 25 * indirect, incidental, or consequential damages, with respect to any 26 * claim arising out of or in connection with 26 * claim arising out of or in connection with the use of the software, 27 * even if it has been or is hereafter advise 27 * even if it has been or is hereafter advised of the possibility of 28 * such damages. 28 * such damages. 29 */ 29 */ 30 30 31 #include <linux/export.h> 31 #include <linux/export.h> 32 #include <linux/nfs_fs.h> 32 #include <linux/nfs_fs.h> 33 #include "nfs4session.h" 33 #include "nfs4session.h" 34 #include "internal.h" 34 #include "internal.h" 35 #include "pnfs.h" 35 #include "pnfs.h" 36 36 37 #include "nfs4trace.h" << 38 << 39 #define NFSDBG_FACILITY NFSDBG_PNFS 37 #define NFSDBG_FACILITY NFSDBG_PNFS 40 38 41 /* 39 /* 42 * Device ID RCU cache. A device ID is unique 40 * Device ID RCU cache. A device ID is unique per server and layout type. 43 */ 41 */ 44 #define NFS4_DEVICE_ID_HASH_BITS 5 42 #define NFS4_DEVICE_ID_HASH_BITS 5 45 #define NFS4_DEVICE_ID_HASH_SIZE (1 << 43 #define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS) 46 #define NFS4_DEVICE_ID_HASH_MASK (NFS4_ 44 #define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1) 47 45 48 46 49 static struct hlist_head nfs4_deviceid_cache[N 47 static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE]; 50 static DEFINE_SPINLOCK(nfs4_deviceid_lock); 48 static DEFINE_SPINLOCK(nfs4_deviceid_lock); 51 49 52 #ifdef NFS_DEBUG 50 #ifdef NFS_DEBUG 53 void 51 void 54 nfs4_print_deviceid(const struct nfs4_deviceid 52 nfs4_print_deviceid(const struct nfs4_deviceid *id) 55 { 53 { 56 u32 *p = (u32 *)id; 54 u32 *p = (u32 *)id; 57 55 58 dprintk("%s: device id= [%x%x%x%x]\n", 56 dprintk("%s: device id= [%x%x%x%x]\n", __func__, 59 p[0], p[1], p[2], p[3]); 57 p[0], p[1], p[2], p[3]); 60 } 58 } 61 EXPORT_SYMBOL_GPL(nfs4_print_deviceid); 59 EXPORT_SYMBOL_GPL(nfs4_print_deviceid); 62 #endif 60 #endif 63 61 64 static inline u32 62 static inline u32 65 nfs4_deviceid_hash(const struct nfs4_deviceid 63 nfs4_deviceid_hash(const struct nfs4_deviceid *id) 66 { 64 { 67 unsigned char *cptr = (unsigned char * 65 unsigned char *cptr = (unsigned char *)id->data; 68 unsigned int nbytes = NFS4_DEVICEID4_S 66 unsigned int nbytes = NFS4_DEVICEID4_SIZE; 69 u32 x = 0; 67 u32 x = 0; 70 68 71 while (nbytes--) { 69 while (nbytes--) { 72 x *= 37; 70 x *= 37; 73 x += *cptr++; 71 x += *cptr++; 74 } 72 } 75 return x & NFS4_DEVICE_ID_HASH_MASK; 73 return x & NFS4_DEVICE_ID_HASH_MASK; 76 } 74 } 77 75 78 static struct nfs4_deviceid_node * 76 static struct nfs4_deviceid_node * 79 _lookup_deviceid(const struct pnfs_layoutdrive 77 _lookup_deviceid(const struct pnfs_layoutdriver_type *ld, 80 const struct nfs_client *clp, 78 const struct nfs_client *clp, const struct nfs4_deviceid *id, 81 long hash) 79 long hash) 82 { 80 { 83 struct nfs4_deviceid_node *d; 81 struct nfs4_deviceid_node *d; 84 82 85 hlist_for_each_entry_rcu(d, &nfs4_devi 83 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node) 86 if (d->ld == ld && d->nfs_clie 84 if (d->ld == ld && d->nfs_client == clp && 87 !memcmp(&d->deviceid, id, 85 !memcmp(&d->deviceid, id, sizeof(*id))) { 88 if (atomic_read(&d->re 86 if (atomic_read(&d->ref)) 89 return d; 87 return d; 90 else 88 else 91 continue; 89 continue; 92 } 90 } 93 return NULL; 91 return NULL; 94 } 92 } 95 93 96 static struct nfs4_deviceid_node * 94 static struct nfs4_deviceid_node * 97 nfs4_get_device_info(struct nfs_server *server 95 nfs4_get_device_info(struct nfs_server *server, 98 const struct nfs4_deviceid *de 96 const struct nfs4_deviceid *dev_id, 99 const struct cred *cred, gfp_t 97 const struct cred *cred, gfp_t gfp_flags) 100 { 98 { 101 struct nfs4_deviceid_node *d = NULL; 99 struct nfs4_deviceid_node *d = NULL; 102 struct pnfs_device *pdev = NULL; 100 struct pnfs_device *pdev = NULL; 103 struct page **pages = NULL; 101 struct page **pages = NULL; 104 u32 max_resp_sz; 102 u32 max_resp_sz; 105 int max_pages; 103 int max_pages; 106 int rc, i; 104 int rc, i; 107 105 108 /* 106 /* 109 * Use the session max response size a 107 * Use the session max response size as the basis for setting 110 * GETDEVICEINFO's maxcount 108 * GETDEVICEINFO's maxcount 111 */ 109 */ 112 max_resp_sz = server->nfs_client->cl_s 110 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; >> 111 if (server->pnfs_curr_ld->max_deviceinfo_size && >> 112 server->pnfs_curr_ld->max_deviceinfo_size < max_resp_sz) >> 113 max_resp_sz = server->pnfs_curr_ld->max_deviceinfo_size; 113 max_pages = nfs_page_array_len(0, max_ 114 max_pages = nfs_page_array_len(0, max_resp_sz); 114 dprintk("%s: server %p max_resp_sz %u 115 dprintk("%s: server %p max_resp_sz %u max_pages %d\n", 115 __func__, server, max_resp_sz, 116 __func__, server, max_resp_sz, max_pages); 116 117 117 pdev = kzalloc(sizeof(*pdev), gfp_flag 118 pdev = kzalloc(sizeof(*pdev), gfp_flags); 118 if (!pdev) 119 if (!pdev) 119 return NULL; 120 return NULL; 120 121 121 pages = kcalloc(max_pages, sizeof(stru 122 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); 122 if (!pages) 123 if (!pages) 123 goto out_free_pdev; 124 goto out_free_pdev; 124 125 125 for (i = 0; i < max_pages; i++) { 126 for (i = 0; i < max_pages; i++) { 126 pages[i] = alloc_page(gfp_flag 127 pages[i] = alloc_page(gfp_flags); 127 if (!pages[i]) 128 if (!pages[i]) 128 goto out_free_pages; 129 goto out_free_pages; 129 } 130 } 130 131 131 memcpy(&pdev->dev_id, dev_id, sizeof(* 132 memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id)); 132 pdev->layout_type = server->pnfs_curr_ 133 pdev->layout_type = server->pnfs_curr_ld->id; 133 pdev->pages = pages; 134 pdev->pages = pages; 134 pdev->pgbase = 0; 135 pdev->pgbase = 0; 135 pdev->pglen = max_resp_sz; 136 pdev->pglen = max_resp_sz; 136 pdev->mincount = 0; 137 pdev->mincount = 0; 137 pdev->maxcount = max_resp_sz - nfs41_m 138 pdev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead; 138 139 139 rc = nfs4_proc_getdeviceinfo(server, p 140 rc = nfs4_proc_getdeviceinfo(server, pdev, cred); 140 dprintk("%s getdevice info returns %d\ 141 dprintk("%s getdevice info returns %d\n", __func__, rc); 141 if (rc) 142 if (rc) 142 goto out_free_pages; 143 goto out_free_pages; 143 144 144 /* 145 /* 145 * Found new device, need to decode it 146 * Found new device, need to decode it and then add it to the 146 * list of known devices for this moun 147 * list of known devices for this mountpoint. 147 */ 148 */ 148 d = server->pnfs_curr_ld->alloc_device 149 d = server->pnfs_curr_ld->alloc_deviceid_node(server, pdev, 149 gfp_flags); 150 gfp_flags); 150 if (d && pdev->nocache) 151 if (d && pdev->nocache) 151 set_bit(NFS_DEVICEID_NOCACHE, 152 set_bit(NFS_DEVICEID_NOCACHE, &d->flags); 152 153 153 out_free_pages: 154 out_free_pages: 154 while (--i >= 0) !! 155 for (i = 0; i < max_pages; i++) 155 __free_page(pages[i]); 156 __free_page(pages[i]); 156 kfree(pages); 157 kfree(pages); 157 out_free_pdev: 158 out_free_pdev: 158 kfree(pdev); 159 kfree(pdev); 159 dprintk("<-- %s d %p\n", __func__, d); 160 dprintk("<-- %s d %p\n", __func__, d); 160 return d; 161 return d; 161 } 162 } 162 163 163 /* 164 /* 164 * Lookup a deviceid in cache and get a refere 165 * Lookup a deviceid in cache and get a reference count on it if found 165 * 166 * 166 * @clp nfs_client associated with deviceid 167 * @clp nfs_client associated with deviceid 167 * @id deviceid to look up 168 * @id deviceid to look up 168 */ 169 */ 169 static struct nfs4_deviceid_node * 170 static struct nfs4_deviceid_node * 170 __nfs4_find_get_deviceid(struct nfs_server *se 171 __nfs4_find_get_deviceid(struct nfs_server *server, 171 const struct nfs4_deviceid *id 172 const struct nfs4_deviceid *id, long hash) 172 { 173 { 173 struct nfs4_deviceid_node *d; 174 struct nfs4_deviceid_node *d; 174 175 175 rcu_read_lock(); 176 rcu_read_lock(); 176 d = _lookup_deviceid(server->pnfs_curr 177 d = _lookup_deviceid(server->pnfs_curr_ld, server->nfs_client, id, 177 hash); 178 hash); 178 if (d != NULL && !atomic_inc_not_zero( 179 if (d != NULL && !atomic_inc_not_zero(&d->ref)) 179 d = NULL; 180 d = NULL; 180 rcu_read_unlock(); 181 rcu_read_unlock(); 181 return d; 182 return d; 182 } 183 } 183 184 184 struct nfs4_deviceid_node * 185 struct nfs4_deviceid_node * 185 nfs4_find_get_deviceid(struct nfs_server *serv 186 nfs4_find_get_deviceid(struct nfs_server *server, 186 const struct nfs4_deviceid *id 187 const struct nfs4_deviceid *id, const struct cred *cred, 187 gfp_t gfp_mask) 188 gfp_t gfp_mask) 188 { 189 { 189 long hash = nfs4_deviceid_hash(id); 190 long hash = nfs4_deviceid_hash(id); 190 struct nfs4_deviceid_node *d, *new; 191 struct nfs4_deviceid_node *d, *new; 191 192 192 d = __nfs4_find_get_deviceid(server, i 193 d = __nfs4_find_get_deviceid(server, id, hash); 193 if (d) 194 if (d) 194 goto found; !! 195 return d; 195 196 196 new = nfs4_get_device_info(server, id, 197 new = nfs4_get_device_info(server, id, cred, gfp_mask); 197 if (!new) { !! 198 if (!new) 198 trace_nfs4_find_deviceid(serve << 199 return new; 199 return new; 200 } << 201 200 202 spin_lock(&nfs4_deviceid_lock); 201 spin_lock(&nfs4_deviceid_lock); 203 d = __nfs4_find_get_deviceid(server, i 202 d = __nfs4_find_get_deviceid(server, id, hash); 204 if (d) { 203 if (d) { 205 spin_unlock(&nfs4_deviceid_loc 204 spin_unlock(&nfs4_deviceid_lock); 206 server->pnfs_curr_ld->free_dev 205 server->pnfs_curr_ld->free_deviceid_node(new); 207 } else { !! 206 return d; 208 atomic_inc(&new->ref); << 209 hlist_add_head_rcu(&new->node, << 210 spin_unlock(&nfs4_deviceid_loc << 211 d = new; << 212 } 207 } 213 found: !! 208 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]); 214 trace_nfs4_find_deviceid(server, id, 0 !! 209 atomic_inc(&new->ref); 215 return d; !! 210 spin_unlock(&nfs4_deviceid_lock); >> 211 >> 212 return new; 216 } 213 } 217 EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid); 214 EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid); 218 215 219 /* 216 /* 220 * Remove a deviceid from cache 217 * Remove a deviceid from cache 221 * 218 * 222 * @clp nfs_client associated with deviceid 219 * @clp nfs_client associated with deviceid 223 * @id the deviceid to unhash 220 * @id the deviceid to unhash 224 * 221 * 225 * @ret the unhashed node, if found and derefe 222 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise. 226 */ 223 */ 227 void 224 void 228 nfs4_delete_deviceid(const struct pnfs_layoutd 225 nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld, 229 const struct nfs_clie 226 const struct nfs_client *clp, const struct nfs4_deviceid *id) 230 { 227 { 231 struct nfs4_deviceid_node *d; 228 struct nfs4_deviceid_node *d; 232 229 233 spin_lock(&nfs4_deviceid_lock); 230 spin_lock(&nfs4_deviceid_lock); 234 rcu_read_lock(); 231 rcu_read_lock(); 235 d = _lookup_deviceid(ld, clp, id, nfs4 232 d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id)); 236 rcu_read_unlock(); 233 rcu_read_unlock(); 237 if (!d) { 234 if (!d) { 238 spin_unlock(&nfs4_deviceid_loc 235 spin_unlock(&nfs4_deviceid_lock); 239 return; 236 return; 240 } 237 } 241 hlist_del_init_rcu(&d->node); 238 hlist_del_init_rcu(&d->node); 242 clear_bit(NFS_DEVICEID_NOCACHE, &d->fl 239 clear_bit(NFS_DEVICEID_NOCACHE, &d->flags); 243 spin_unlock(&nfs4_deviceid_lock); 240 spin_unlock(&nfs4_deviceid_lock); 244 241 245 /* balance the initial ref set in pnfs 242 /* balance the initial ref set in pnfs_insert_deviceid */ 246 nfs4_put_deviceid_node(d); 243 nfs4_put_deviceid_node(d); 247 } 244 } 248 EXPORT_SYMBOL_GPL(nfs4_delete_deviceid); 245 EXPORT_SYMBOL_GPL(nfs4_delete_deviceid); 249 246 250 void 247 void 251 nfs4_init_deviceid_node(struct nfs4_deviceid_n 248 nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, struct nfs_server *server, 252 const struct nfs4_devi 249 const struct nfs4_deviceid *id) 253 { 250 { 254 INIT_HLIST_NODE(&d->node); 251 INIT_HLIST_NODE(&d->node); 255 INIT_HLIST_NODE(&d->tmpnode); 252 INIT_HLIST_NODE(&d->tmpnode); 256 d->ld = server->pnfs_curr_ld; 253 d->ld = server->pnfs_curr_ld; 257 d->nfs_client = server->nfs_client; 254 d->nfs_client = server->nfs_client; 258 d->flags = 0; 255 d->flags = 0; 259 d->deviceid = *id; 256 d->deviceid = *id; 260 atomic_set(&d->ref, 1); 257 atomic_set(&d->ref, 1); 261 } 258 } 262 EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node); 259 EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node); 263 260 264 /* 261 /* 265 * Dereference a deviceid node and delete it w 262 * Dereference a deviceid node and delete it when its reference count drops 266 * to zero. 263 * to zero. 267 * 264 * 268 * @d deviceid node to put 265 * @d deviceid node to put 269 * 266 * 270 * return true iff the node was deleted 267 * return true iff the node was deleted 271 * Note that since the test for d->ref == 0 is 268 * Note that since the test for d->ref == 0 is sufficient to establish 272 * that the node is no longer hashed in the gl 269 * that the node is no longer hashed in the global device id cache. 273 */ 270 */ 274 bool 271 bool 275 nfs4_put_deviceid_node(struct nfs4_deviceid_no 272 nfs4_put_deviceid_node(struct nfs4_deviceid_node *d) 276 { 273 { 277 if (test_bit(NFS_DEVICEID_NOCACHE, &d- 274 if (test_bit(NFS_DEVICEID_NOCACHE, &d->flags)) { 278 if (atomic_add_unless(&d->ref, 275 if (atomic_add_unless(&d->ref, -1, 2)) 279 return false; 276 return false; 280 nfs4_delete_deviceid(d->ld, d- 277 nfs4_delete_deviceid(d->ld, d->nfs_client, &d->deviceid); 281 } 278 } 282 if (!atomic_dec_and_test(&d->ref)) 279 if (!atomic_dec_and_test(&d->ref)) 283 return false; 280 return false; 284 trace_nfs4_deviceid_free(d->nfs_client << 285 d->ld->free_deviceid_node(d); 281 d->ld->free_deviceid_node(d); 286 return true; 282 return true; 287 } 283 } 288 EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node); 284 EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node); 289 285 290 void 286 void 291 nfs4_mark_deviceid_available(struct nfs4_devic 287 nfs4_mark_deviceid_available(struct nfs4_deviceid_node *node) 292 { 288 { 293 if (test_bit(NFS_DEVICEID_UNAVAILABLE, 289 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) { 294 clear_bit(NFS_DEVICEID_UNAVAIL 290 clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags); 295 smp_mb__after_atomic(); 291 smp_mb__after_atomic(); 296 } 292 } 297 } 293 } 298 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_available 294 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_available); 299 295 300 void 296 void 301 nfs4_mark_deviceid_unavailable(struct nfs4_dev 297 nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node) 302 { 298 { 303 node->timestamp_unavailable = jiffies; 299 node->timestamp_unavailable = jiffies; 304 smp_mb__before_atomic(); 300 smp_mb__before_atomic(); 305 set_bit(NFS_DEVICEID_UNAVAILABLE, &nod 301 set_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags); 306 smp_mb__after_atomic(); 302 smp_mb__after_atomic(); 307 } 303 } 308 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailab 304 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailable); 309 305 310 bool 306 bool 311 nfs4_test_deviceid_unavailable(struct nfs4_dev 307 nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node) 312 { 308 { 313 if (test_bit(NFS_DEVICEID_UNAVAILABLE, 309 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) { 314 unsigned long start, end; 310 unsigned long start, end; 315 311 316 end = jiffies; 312 end = jiffies; 317 start = end - PNFS_DEVICE_RETR 313 start = end - PNFS_DEVICE_RETRY_TIMEOUT; 318 if (time_in_range(node->timest 314 if (time_in_range(node->timestamp_unavailable, start, end)) 319 return true; 315 return true; 320 clear_bit(NFS_DEVICEID_UNAVAIL 316 clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags); 321 smp_mb__after_atomic(); 317 smp_mb__after_atomic(); 322 } 318 } 323 return false; 319 return false; 324 } 320 } 325 EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailab 321 EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailable); 326 322 327 static void 323 static void 328 _deviceid_purge_client(const struct nfs_client 324 _deviceid_purge_client(const struct nfs_client *clp, long hash) 329 { 325 { 330 struct nfs4_deviceid_node *d; 326 struct nfs4_deviceid_node *d; 331 HLIST_HEAD(tmp); 327 HLIST_HEAD(tmp); 332 328 333 spin_lock(&nfs4_deviceid_lock); 329 spin_lock(&nfs4_deviceid_lock); 334 rcu_read_lock(); 330 rcu_read_lock(); 335 hlist_for_each_entry_rcu(d, &nfs4_devi 331 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node) 336 if (d->nfs_client == clp && at 332 if (d->nfs_client == clp && atomic_read(&d->ref)) { 337 hlist_del_init_rcu(&d- 333 hlist_del_init_rcu(&d->node); 338 hlist_add_head(&d->tmp 334 hlist_add_head(&d->tmpnode, &tmp); 339 clear_bit(NFS_DEVICEID 335 clear_bit(NFS_DEVICEID_NOCACHE, &d->flags); 340 } 336 } 341 rcu_read_unlock(); 337 rcu_read_unlock(); 342 spin_unlock(&nfs4_deviceid_lock); 338 spin_unlock(&nfs4_deviceid_lock); 343 339 344 if (hlist_empty(&tmp)) 340 if (hlist_empty(&tmp)) 345 return; 341 return; 346 342 347 while (!hlist_empty(&tmp)) { 343 while (!hlist_empty(&tmp)) { 348 d = hlist_entry(tmp.first, str 344 d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode); 349 hlist_del(&d->tmpnode); 345 hlist_del(&d->tmpnode); 350 nfs4_put_deviceid_node(d); 346 nfs4_put_deviceid_node(d); 351 } 347 } 352 } 348 } 353 349 354 void 350 void 355 nfs4_deviceid_purge_client(const struct nfs_cl 351 nfs4_deviceid_purge_client(const struct nfs_client *clp) 356 { 352 { 357 long h; 353 long h; 358 354 359 if (!(clp->cl_exchange_flags & EXCHGID 355 if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS)) 360 return; 356 return; 361 for (h = 0; h < NFS4_DEVICE_ID_HASH_SI 357 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++) 362 _deviceid_purge_client(clp, h) 358 _deviceid_purge_client(clp, h); 363 } 359 } 364 360 365 /* 361 /* 366 * Stop use of all deviceids associated with a 362 * Stop use of all deviceids associated with an nfs_client 367 */ 363 */ 368 void 364 void 369 nfs4_deviceid_mark_client_invalid(struct nfs_c 365 nfs4_deviceid_mark_client_invalid(struct nfs_client *clp) 370 { 366 { 371 struct nfs4_deviceid_node *d; 367 struct nfs4_deviceid_node *d; 372 int i; 368 int i; 373 369 374 rcu_read_lock(); 370 rcu_read_lock(); 375 for (i = 0; i < NFS4_DEVICE_ID_HASH_SI 371 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){ 376 hlist_for_each_entry_rcu(d, &n 372 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node) 377 if (d->nfs_client == c 373 if (d->nfs_client == clp) 378 set_bit(NFS_DE 374 set_bit(NFS_DEVICEID_INVALID, &d->flags); 379 } 375 } 380 rcu_read_unlock(); 376 rcu_read_unlock(); 381 } 377 } 382 378
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.