~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sunrpc/xprtrdma/frwr_ops.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (c) 2015, 2017 Oracle.  All rights reserved.
  4  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
  5  */
  6 
  7 /* Lightweight memory registration using Fast Registration Work
  8  * Requests (FRWR).
  9  *
 10  * FRWR features ordered asynchronous registration and invalidation
 11  * of arbitrarily-sized memory regions. This is the fastest and safest
 12  * but most complex memory registration mode.
 13  */
 14 
 15 /* Normal operation
 16  *
 17  * A Memory Region is prepared for RDMA Read or Write using a FAST_REG
 18  * Work Request (frwr_map). When the RDMA operation is finished, this
 19  * Memory Region is invalidated using a LOCAL_INV Work Request
 20  * (frwr_unmap_async and frwr_unmap_sync).
 21  *
 22  * Typically FAST_REG Work Requests are not signaled, and neither are
 23  * RDMA Send Work Requests (with the exception of signaling occasionally
 24  * to prevent provider work queue overflows). This greatly reduces HCA
 25  * interrupt workload.
 26  */
 27 
 28 /* Transport recovery
 29  *
 30  * frwr_map and frwr_unmap_* cannot run at the same time the transport
 31  * connect worker is running. The connect worker holds the transport
 32  * send lock, just as ->send_request does. This prevents frwr_map and
 33  * the connect worker from running concurrently. When a connection is
 34  * closed, the Receive completion queue is drained before the allowing
 35  * the connect worker to get control. This prevents frwr_unmap and the
 36  * connect worker from running concurrently.
 37  *
 38  * When the underlying transport disconnects, MRs that are in flight
 39  * are flushed and are likely unusable. Thus all MRs are destroyed.
 40  * New MRs are created on demand.
 41  */
 42 
 43 #include <linux/sunrpc/svc_rdma.h>
 44 
 45 #include "xprt_rdma.h"
 46 #include <trace/events/rpcrdma.h>
 47 
 48 static void frwr_cid_init(struct rpcrdma_ep *ep,
 49                           struct rpcrdma_mr *mr)
 50 {
 51         struct rpc_rdma_cid *cid = &mr->mr_cid;
 52 
 53         cid->ci_queue_id = ep->re_attr.send_cq->res.id;
 54         cid->ci_completion_id = mr->mr_ibmr->res.id;
 55 }
 56 
 57 static void frwr_mr_unmap(struct rpcrdma_mr *mr)
 58 {
 59         if (mr->mr_device) {
 60                 trace_xprtrdma_mr_unmap(mr);
 61                 ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents,
 62                                 mr->mr_dir);
 63                 mr->mr_device = NULL;
 64         }
 65 }
 66 
 67 /**
 68  * frwr_mr_release - Destroy one MR
 69  * @mr: MR allocated by frwr_mr_init
 70  *
 71  */
 72 void frwr_mr_release(struct rpcrdma_mr *mr)
 73 {
 74         int rc;
 75 
 76         frwr_mr_unmap(mr);
 77 
 78         rc = ib_dereg_mr(mr->mr_ibmr);
 79         if (rc)
 80                 trace_xprtrdma_frwr_dereg(mr, rc);
 81         kfree(mr->mr_sg);
 82         kfree(mr);
 83 }
 84 
 85 static void frwr_mr_put(struct rpcrdma_mr *mr)
 86 {
 87         frwr_mr_unmap(mr);
 88 
 89         /* The MR is returned to the req's MR free list instead
 90          * of to the xprt's MR free list. No spinlock is needed.
 91          */
 92         rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
 93 }
 94 
 95 /**
 96  * frwr_reset - Place MRs back on @req's free list
 97  * @req: request to reset
 98  *
 99  * Used after a failed marshal. For FRWR, this means the MRs
100  * don't have to be fully released and recreated.
101  *
102  * NB: This is safe only as long as none of @req's MRs are
103  * involved with an ongoing asynchronous FAST_REG or LOCAL_INV
104  * Work Request.
105  */
106 void frwr_reset(struct rpcrdma_req *req)
107 {
108         struct rpcrdma_mr *mr;
109 
110         while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
111                 frwr_mr_put(mr);
112 }
113 
114 /**
115  * frwr_mr_init - Initialize one MR
116  * @r_xprt: controlling transport instance
117  * @mr: generic MR to prepare for FRWR
118  *
119  * Returns zero if successful. Otherwise a negative errno
120  * is returned.
121  */
122 int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
123 {
124         struct rpcrdma_ep *ep = r_xprt->rx_ep;
125         unsigned int depth = ep->re_max_fr_depth;
126         struct scatterlist *sg;
127         struct ib_mr *frmr;
128 
129         sg = kcalloc_node(depth, sizeof(*sg), XPRTRDMA_GFP_FLAGS,
130                           ibdev_to_node(ep->re_id->device));
131         if (!sg)
132                 return -ENOMEM;
133 
134         frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth);
135         if (IS_ERR(frmr))
136                 goto out_mr_err;
137 
138         mr->mr_xprt = r_xprt;
139         mr->mr_ibmr = frmr;
140         mr->mr_device = NULL;
141         INIT_LIST_HEAD(&mr->mr_list);
142         init_completion(&mr->mr_linv_done);
143         frwr_cid_init(ep, mr);
144 
145         sg_init_table(sg, depth);
146         mr->mr_sg = sg;
147         return 0;
148 
149 out_mr_err:
150         kfree(sg);
151         trace_xprtrdma_frwr_alloc(mr, PTR_ERR(frmr));
152         return PTR_ERR(frmr);
153 }
154 
155 /**
156  * frwr_query_device - Prepare a transport for use with FRWR
157  * @ep: endpoint to fill in
158  * @device: RDMA device to query
159  *
160  * On success, sets:
161  *      ep->re_attr
162  *      ep->re_max_requests
163  *      ep->re_max_rdma_segs
164  *      ep->re_max_fr_depth
165  *      ep->re_mrtype
166  *
167  * Return values:
168  *   On success, returns zero.
169  *   %-EINVAL - the device does not support FRWR memory registration
170  *   %-ENOMEM - the device is not sufficiently capable for NFS/RDMA
171  */
172 int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
173 {
174         const struct ib_device_attr *attrs = &device->attrs;
175         int max_qp_wr, depth, delta;
176         unsigned int max_sge;
177 
178         if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
179             attrs->max_fast_reg_page_list_len == 0) {
180                 pr_err("rpcrdma: 'frwr' mode is not supported by device %s\n",
181                        device->name);
182                 return -EINVAL;
183         }
184 
185         max_sge = min_t(unsigned int, attrs->max_send_sge,
186                         RPCRDMA_MAX_SEND_SGES);
187         if (max_sge < RPCRDMA_MIN_SEND_SGES) {
188                 pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge);
189                 return -ENOMEM;
190         }
191         ep->re_attr.cap.max_send_sge = max_sge;
192         ep->re_attr.cap.max_recv_sge = 1;
193 
194         ep->re_mrtype = IB_MR_TYPE_MEM_REG;
195         if (attrs->kernel_cap_flags & IBK_SG_GAPS_REG)
196                 ep->re_mrtype = IB_MR_TYPE_SG_GAPS;
197 
198         /* Quirk: Some devices advertise a large max_fast_reg_page_list_len
199          * capability, but perform optimally when the MRs are not larger
200          * than a page.
201          */
202         if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS)
203                 ep->re_max_fr_depth = attrs->max_sge_rd;
204         else
205                 ep->re_max_fr_depth = attrs->max_fast_reg_page_list_len;
206         if (ep->re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS)
207                 ep->re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS;
208 
209         /* Add room for frwr register and invalidate WRs.
210          * 1. FRWR reg WR for head
211          * 2. FRWR invalidate WR for head
212          * 3. N FRWR reg WRs for pagelist
213          * 4. N FRWR invalidate WRs for pagelist
214          * 5. FRWR reg WR for tail
215          * 6. FRWR invalidate WR for tail
216          * 7. The RDMA_SEND WR
217          */
218         depth = 7;
219 
220         /* Calculate N if the device max FRWR depth is smaller than
221          * RPCRDMA_MAX_DATA_SEGS.
222          */
223         if (ep->re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS) {
224                 delta = RPCRDMA_MAX_DATA_SEGS - ep->re_max_fr_depth;
225                 do {
226                         depth += 2; /* FRWR reg + invalidate */
227                         delta -= ep->re_max_fr_depth;
228                 } while (delta > 0);
229         }
230 
231         max_qp_wr = attrs->max_qp_wr;
232         max_qp_wr -= RPCRDMA_BACKWARD_WRS;
233         max_qp_wr -= 1;
234         if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
235                 return -ENOMEM;
236         if (ep->re_max_requests > max_qp_wr)
237                 ep->re_max_requests = max_qp_wr;
238         ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
239         if (ep->re_attr.cap.max_send_wr > max_qp_wr) {
240                 ep->re_max_requests = max_qp_wr / depth;
241                 if (!ep->re_max_requests)
242                         return -ENOMEM;
243                 ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
244         }
245         ep->re_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
246         ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
247         ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
248         ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
249         ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH;
250         ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
251 
252         ep->re_max_rdma_segs =
253                 DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ep->re_max_fr_depth);
254         /* Reply chunks require segments for head and tail buffers */
255         ep->re_max_rdma_segs += 2;
256         if (ep->re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS)
257                 ep->re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS;
258 
259         /* Ensure the underlying device is capable of conveying the
260          * largest r/wsize NFS will ask for. This guarantees that
261          * failing over from one RDMA device to another will not
262          * break NFS I/O.
263          */
264         if ((ep->re_max_rdma_segs * ep->re_max_fr_depth) < RPCRDMA_MAX_SEGS)
265                 return -ENOMEM;
266 
267         return 0;
268 }
269 
270 /**
271  * frwr_map - Register a memory region
272  * @r_xprt: controlling transport
273  * @seg: memory region co-ordinates
274  * @nsegs: number of segments remaining
275  * @writing: true when RDMA Write will be used
276  * @xid: XID of RPC using the registered memory
277  * @mr: MR to fill in
278  *
279  * Prepare a REG_MR Work Request to register a memory region
280  * for remote access via RDMA READ or RDMA WRITE.
281  *
282  * Returns the next segment or a negative errno pointer.
283  * On success, @mr is filled in.
284  */
285 struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
286                                 struct rpcrdma_mr_seg *seg,
287                                 int nsegs, bool writing, __be32 xid,
288                                 struct rpcrdma_mr *mr)
289 {
290         struct rpcrdma_ep *ep = r_xprt->rx_ep;
291         struct ib_reg_wr *reg_wr;
292         int i, n, dma_nents;
293         struct ib_mr *ibmr;
294         u8 key;
295 
296         if (nsegs > ep->re_max_fr_depth)
297                 nsegs = ep->re_max_fr_depth;
298         for (i = 0; i < nsegs;) {
299                 sg_set_page(&mr->mr_sg[i], seg->mr_page,
300                             seg->mr_len, seg->mr_offset);
301 
302                 ++seg;
303                 ++i;
304                 if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS)
305                         continue;
306                 if ((i < nsegs && seg->mr_offset) ||
307                     offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
308                         break;
309         }
310         mr->mr_dir = rpcrdma_data_dir(writing);
311         mr->mr_nents = i;
312 
313         dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents,
314                                   mr->mr_dir);
315         if (!dma_nents)
316                 goto out_dmamap_err;
317         mr->mr_device = ep->re_id->device;
318 
319         ibmr = mr->mr_ibmr;
320         n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
321         if (n != dma_nents)
322                 goto out_mapmr_err;
323 
324         ibmr->iova &= 0x00000000ffffffff;
325         ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
326         key = (u8)(ibmr->rkey & 0x000000FF);
327         ib_update_fast_reg_key(ibmr, ++key);
328 
329         reg_wr = &mr->mr_regwr;
330         reg_wr->mr = ibmr;
331         reg_wr->key = ibmr->rkey;
332         reg_wr->access = writing ?
333                          IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
334                          IB_ACCESS_REMOTE_READ;
335 
336         mr->mr_handle = ibmr->rkey;
337         mr->mr_length = ibmr->length;
338         mr->mr_offset = ibmr->iova;
339         trace_xprtrdma_mr_map(mr);
340 
341         return seg;
342 
343 out_dmamap_err:
344         trace_xprtrdma_frwr_sgerr(mr, i);
345         return ERR_PTR(-EIO);
346 
347 out_mapmr_err:
348         trace_xprtrdma_frwr_maperr(mr, n);
349         return ERR_PTR(-EIO);
350 }
351 
352 /**
353  * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
354  * @cq: completion queue
355  * @wc: WCE for a completed FastReg WR
356  *
357  * Each flushed MR gets destroyed after the QP has drained.
358  */
359 static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
360 {
361         struct ib_cqe *cqe = wc->wr_cqe;
362         struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
363 
364         /* WARNING: Only wr_cqe and status are reliable at this point */
365         trace_xprtrdma_wc_fastreg(wc, &mr->mr_cid);
366 
367         rpcrdma_flush_disconnect(cq->cq_context, wc);
368 }
369 
370 /**
371  * frwr_send - post Send WRs containing the RPC Call message
372  * @r_xprt: controlling transport instance
373  * @req: prepared RPC Call
374  *
375  * For FRWR, chain any FastReg WRs to the Send WR. Only a
376  * single ib_post_send call is needed to register memory
377  * and then post the Send WR.
378  *
379  * Returns the return code from ib_post_send.
380  *
381  * Caller must hold the transport send lock to ensure that the
382  * pointers to the transport's rdma_cm_id and QP are stable.
383  */
384 int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
385 {
386         struct ib_send_wr *post_wr, *send_wr = &req->rl_wr;
387         struct rpcrdma_ep *ep = r_xprt->rx_ep;
388         struct rpcrdma_mr *mr;
389         unsigned int num_wrs;
390         int ret;
391 
392         num_wrs = 1;
393         post_wr = send_wr;
394         list_for_each_entry(mr, &req->rl_registered, mr_list) {
395                 trace_xprtrdma_mr_fastreg(mr);
396 
397                 mr->mr_cqe.done = frwr_wc_fastreg;
398                 mr->mr_regwr.wr.next = post_wr;
399                 mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe;
400                 mr->mr_regwr.wr.num_sge = 0;
401                 mr->mr_regwr.wr.opcode = IB_WR_REG_MR;
402                 mr->mr_regwr.wr.send_flags = 0;
403                 post_wr = &mr->mr_regwr.wr;
404                 ++num_wrs;
405         }
406 
407         if ((kref_read(&req->rl_kref) > 1) || num_wrs > ep->re_send_count) {
408                 send_wr->send_flags |= IB_SEND_SIGNALED;
409                 ep->re_send_count = min_t(unsigned int, ep->re_send_batch,
410                                           num_wrs - ep->re_send_count);
411         } else {
412                 send_wr->send_flags &= ~IB_SEND_SIGNALED;
413                 ep->re_send_count -= num_wrs;
414         }
415 
416         trace_xprtrdma_post_send(req);
417         ret = ib_post_send(ep->re_id->qp, post_wr, NULL);
418         if (ret)
419                 trace_xprtrdma_post_send_err(r_xprt, req, ret);
420         return ret;
421 }
422 
423 /**
424  * frwr_reminv - handle a remotely invalidated mr on the @mrs list
425  * @rep: Received reply
426  * @mrs: list of MRs to check
427  *
428  */
429 void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
430 {
431         struct rpcrdma_mr *mr;
432 
433         list_for_each_entry(mr, mrs, mr_list)
434                 if (mr->mr_handle == rep->rr_inv_rkey) {
435                         list_del_init(&mr->mr_list);
436                         trace_xprtrdma_mr_reminv(mr);
437                         frwr_mr_put(mr);
438                         break;  /* only one invalidated MR per RPC */
439                 }
440 }
441 
442 static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr)
443 {
444         if (likely(wc->status == IB_WC_SUCCESS))
445                 frwr_mr_put(mr);
446 }
447 
448 /**
449  * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC
450  * @cq: completion queue
451  * @wc: WCE for a completed LocalInv WR
452  *
453  */
454 static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
455 {
456         struct ib_cqe *cqe = wc->wr_cqe;
457         struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
458 
459         /* WARNING: Only wr_cqe and status are reliable at this point */
460         trace_xprtrdma_wc_li(wc, &mr->mr_cid);
461         frwr_mr_done(wc, mr);
462 
463         rpcrdma_flush_disconnect(cq->cq_context, wc);
464 }
465 
466 /**
467  * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC
468  * @cq: completion queue
469  * @wc: WCE for a completed LocalInv WR
470  *
471  * Awaken anyone waiting for an MR to finish being fenced.
472  */
473 static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
474 {
475         struct ib_cqe *cqe = wc->wr_cqe;
476         struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
477 
478         /* WARNING: Only wr_cqe and status are reliable at this point */
479         trace_xprtrdma_wc_li_wake(wc, &mr->mr_cid);
480         frwr_mr_done(wc, mr);
481         complete(&mr->mr_linv_done);
482 
483         rpcrdma_flush_disconnect(cq->cq_context, wc);
484 }
485 
486 /**
487  * frwr_unmap_sync - invalidate memory regions that were registered for @req
488  * @r_xprt: controlling transport instance
489  * @req: rpcrdma_req with a non-empty list of MRs to process
490  *
491  * Sleeps until it is safe for the host CPU to access the previously mapped
492  * memory regions. This guarantees that registered MRs are properly fenced
493  * from the server before the RPC consumer accesses the data in them. It
494  * also ensures proper Send flow control: waking the next RPC waits until
495  * this RPC has relinquished all its Send Queue entries.
496  */
497 void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
498 {
499         struct ib_send_wr *first, **prev, *last;
500         struct rpcrdma_ep *ep = r_xprt->rx_ep;
501         const struct ib_send_wr *bad_wr;
502         struct rpcrdma_mr *mr;
503         int rc;
504 
505         /* ORDER: Invalidate all of the MRs first
506          *
507          * Chain the LOCAL_INV Work Requests and post them with
508          * a single ib_post_send() call.
509          */
510         prev = &first;
511         mr = rpcrdma_mr_pop(&req->rl_registered);
512         do {
513                 trace_xprtrdma_mr_localinv(mr);
514                 r_xprt->rx_stats.local_inv_needed++;
515 
516                 last = &mr->mr_invwr;
517                 last->next = NULL;
518                 last->wr_cqe = &mr->mr_cqe;
519                 last->sg_list = NULL;
520                 last->num_sge = 0;
521                 last->opcode = IB_WR_LOCAL_INV;
522                 last->send_flags = IB_SEND_SIGNALED;
523                 last->ex.invalidate_rkey = mr->mr_handle;
524 
525                 last->wr_cqe->done = frwr_wc_localinv;
526 
527                 *prev = last;
528                 prev = &last->next;
529         } while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
530 
531         mr = container_of(last, struct rpcrdma_mr, mr_invwr);
532 
533         /* Strong send queue ordering guarantees that when the
534          * last WR in the chain completes, all WRs in the chain
535          * are complete.
536          */
537         last->wr_cqe->done = frwr_wc_localinv_wake;
538         reinit_completion(&mr->mr_linv_done);
539 
540         /* Transport disconnect drains the receive CQ before it
541          * replaces the QP. The RPC reply handler won't call us
542          * unless re_id->qp is a valid pointer.
543          */
544         bad_wr = NULL;
545         rc = ib_post_send(ep->re_id->qp, first, &bad_wr);
546 
547         /* The final LOCAL_INV WR in the chain is supposed to
548          * do the wake. If it was never posted, the wake will
549          * not happen, so don't wait in that case.
550          */
551         if (bad_wr != first)
552                 wait_for_completion(&mr->mr_linv_done);
553         if (!rc)
554                 return;
555 
556         /* On error, the MRs get destroyed once the QP has drained. */
557         trace_xprtrdma_post_linv_err(req, rc);
558 
559         /* Force a connection loss to ensure complete recovery.
560          */
561         rpcrdma_force_disconnect(ep);
562 }
563 
564 /**
565  * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC
566  * @cq: completion queue
567  * @wc: WCE for a completed LocalInv WR
568  *
569  */
570 static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
571 {
572         struct ib_cqe *cqe = wc->wr_cqe;
573         struct rpcrdma_mr *mr = container_of(cqe, struct rpcrdma_mr, mr_cqe);
574         struct rpcrdma_rep *rep;
575 
576         /* WARNING: Only wr_cqe and status are reliable at this point */
577         trace_xprtrdma_wc_li_done(wc, &mr->mr_cid);
578 
579         /* Ensure that @rep is generated before the MR is released */
580         rep = mr->mr_req->rl_reply;
581         smp_rmb();
582 
583         if (wc->status != IB_WC_SUCCESS) {
584                 if (rep)
585                         rpcrdma_unpin_rqst(rep);
586                 rpcrdma_flush_disconnect(cq->cq_context, wc);
587                 return;
588         }
589         frwr_mr_put(mr);
590         rpcrdma_complete_rqst(rep);
591 }
592 
593 /**
594  * frwr_unmap_async - invalidate memory regions that were registered for @req
595  * @r_xprt: controlling transport instance
596  * @req: rpcrdma_req with a non-empty list of MRs to process
597  *
598  * This guarantees that registered MRs are properly fenced from the
599  * server before the RPC consumer accesses the data in them. It also
600  * ensures proper Send flow control: waking the next RPC waits until
601  * this RPC has relinquished all its Send Queue entries.
602  */
603 void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
604 {
605         struct ib_send_wr *first, *last, **prev;
606         struct rpcrdma_ep *ep = r_xprt->rx_ep;
607         struct rpcrdma_mr *mr;
608         int rc;
609 
610         /* Chain the LOCAL_INV Work Requests and post them with
611          * a single ib_post_send() call.
612          */
613         prev = &first;
614         mr = rpcrdma_mr_pop(&req->rl_registered);
615         do {
616                 trace_xprtrdma_mr_localinv(mr);
617                 r_xprt->rx_stats.local_inv_needed++;
618 
619                 last = &mr->mr_invwr;
620                 last->next = NULL;
621                 last->wr_cqe = &mr->mr_cqe;
622                 last->sg_list = NULL;
623                 last->num_sge = 0;
624                 last->opcode = IB_WR_LOCAL_INV;
625                 last->send_flags = IB_SEND_SIGNALED;
626                 last->ex.invalidate_rkey = mr->mr_handle;
627 
628                 last->wr_cqe->done = frwr_wc_localinv;
629 
630                 *prev = last;
631                 prev = &last->next;
632         } while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
633 
634         /* Strong send queue ordering guarantees that when the
635          * last WR in the chain completes, all WRs in the chain
636          * are complete. The last completion will wake up the
637          * RPC waiter.
638          */
639         last->wr_cqe->done = frwr_wc_localinv_done;
640 
641         /* Transport disconnect drains the receive CQ before it
642          * replaces the QP. The RPC reply handler won't call us
643          * unless re_id->qp is a valid pointer.
644          */
645         rc = ib_post_send(ep->re_id->qp, first, NULL);
646         if (!rc)
647                 return;
648 
649         /* On error, the MRs get destroyed once the QP has drained. */
650         trace_xprtrdma_post_linv_err(req, rc);
651 
652         /* The final LOCAL_INV WR in the chain is supposed to
653          * do the wake. If it was never posted, the wake does
654          * not happen. Unpin the rqst in preparation for its
655          * retransmission.
656          */
657         rpcrdma_unpin_rqst(req->rl_reply);
658 
659         /* Force a connection loss to ensure complete recovery.
660          */
661         rpcrdma_force_disconnect(ep);
662 }
663 
664 /**
665  * frwr_wp_create - Create an MR for padding Write chunks
666  * @r_xprt: transport resources to use
667  *
668  * Return 0 on success, negative errno on failure.
669  */
670 int frwr_wp_create(struct rpcrdma_xprt *r_xprt)
671 {
672         struct rpcrdma_ep *ep = r_xprt->rx_ep;
673         struct rpcrdma_mr_seg seg;
674         struct rpcrdma_mr *mr;
675 
676         mr = rpcrdma_mr_get(r_xprt);
677         if (!mr)
678                 return -EAGAIN;
679         mr->mr_req = NULL;
680         ep->re_write_pad_mr = mr;
681 
682         seg.mr_len = XDR_UNIT;
683         seg.mr_page = virt_to_page(ep->re_write_pad);
684         seg.mr_offset = offset_in_page(ep->re_write_pad);
685         if (IS_ERR(frwr_map(r_xprt, &seg, 1, true, xdr_zero, mr)))
686                 return -EIO;
687         trace_xprtrdma_mr_fastreg(mr);
688 
689         mr->mr_cqe.done = frwr_wc_fastreg;
690         mr->mr_regwr.wr.next = NULL;
691         mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe;
692         mr->mr_regwr.wr.num_sge = 0;
693         mr->mr_regwr.wr.opcode = IB_WR_REG_MR;
694         mr->mr_regwr.wr.send_flags = 0;
695 
696         return ib_post_send(ep->re_id->qp, &mr->mr_regwr.wr, NULL);
697 }
698 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php