~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/smb/client/transport.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: LGPL-2.1
  2 /*
  3  *
  4  *   Copyright (C) International Business Machines  Corp., 2002,2008
  5  *   Author(s): Steve French (sfrench@us.ibm.com)
  6  *   Jeremy Allison (jra@samba.org) 2006.
  7  *
  8  */
  9 
 10 #include <linux/fs.h>
 11 #include <linux/list.h>
 12 #include <linux/gfp.h>
 13 #include <linux/wait.h>
 14 #include <linux/net.h>
 15 #include <linux/delay.h>
 16 #include <linux/freezer.h>
 17 #include <linux/tcp.h>
 18 #include <linux/bvec.h>
 19 #include <linux/highmem.h>
 20 #include <linux/uaccess.h>
 21 #include <linux/processor.h>
 22 #include <linux/mempool.h>
 23 #include <linux/sched/signal.h>
 24 #include <linux/task_io_accounting_ops.h>
 25 #include "cifspdu.h"
 26 #include "cifsglob.h"
 27 #include "cifsproto.h"
 28 #include "cifs_debug.h"
 29 #include "smb2proto.h"
 30 #include "smbdirect.h"
 31 
 32 /* Max number of iovectors we can use off the stack when sending requests. */
 33 #define CIFS_MAX_IOV_SIZE 8
 34 
 35 void
 36 cifs_wake_up_task(struct mid_q_entry *mid)
 37 {
 38         if (mid->mid_state == MID_RESPONSE_RECEIVED)
 39                 mid->mid_state = MID_RESPONSE_READY;
 40         wake_up_process(mid->callback_data);
 41 }
 42 
 43 static struct mid_q_entry *
 44 alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
 45 {
 46         struct mid_q_entry *temp;
 47 
 48         if (server == NULL) {
 49                 cifs_dbg(VFS, "%s: null TCP session\n", __func__);
 50                 return NULL;
 51         }
 52 
 53         temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
 54         memset(temp, 0, sizeof(struct mid_q_entry));
 55         kref_init(&temp->refcount);
 56         temp->mid = get_mid(smb_buffer);
 57         temp->pid = current->pid;
 58         temp->command = cpu_to_le16(smb_buffer->Command);
 59         cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
 60         /* easier to use jiffies */
 61         /* when mid allocated can be before when sent */
 62         temp->when_alloc = jiffies;
 63         temp->server = server;
 64 
 65         /*
 66          * The default is for the mid to be synchronous, so the
 67          * default callback just wakes up the current task.
 68          */
 69         get_task_struct(current);
 70         temp->creator = current;
 71         temp->callback = cifs_wake_up_task;
 72         temp->callback_data = current;
 73 
 74         atomic_inc(&mid_count);
 75         temp->mid_state = MID_REQUEST_ALLOCATED;
 76         return temp;
 77 }
 78 
 79 void __release_mid(struct kref *refcount)
 80 {
 81         struct mid_q_entry *midEntry =
 82                         container_of(refcount, struct mid_q_entry, refcount);
 83 #ifdef CONFIG_CIFS_STATS2
 84         __le16 command = midEntry->server->vals->lock_cmd;
 85         __u16 smb_cmd = le16_to_cpu(midEntry->command);
 86         unsigned long now;
 87         unsigned long roundtrip_time;
 88 #endif
 89         struct TCP_Server_Info *server = midEntry->server;
 90 
 91         if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
 92             (midEntry->mid_state == MID_RESPONSE_RECEIVED ||
 93              midEntry->mid_state == MID_RESPONSE_READY) &&
 94             server->ops->handle_cancelled_mid)
 95                 server->ops->handle_cancelled_mid(midEntry, server);
 96 
 97         midEntry->mid_state = MID_FREE;
 98         atomic_dec(&mid_count);
 99         if (midEntry->large_buf)
100                 cifs_buf_release(midEntry->resp_buf);
101         else
102                 cifs_small_buf_release(midEntry->resp_buf);
103 #ifdef CONFIG_CIFS_STATS2
104         now = jiffies;
105         if (now < midEntry->when_alloc)
106                 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
107         roundtrip_time = now - midEntry->when_alloc;
108 
109         if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
110                 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
111                         server->slowest_cmd[smb_cmd] = roundtrip_time;
112                         server->fastest_cmd[smb_cmd] = roundtrip_time;
113                 } else {
114                         if (server->slowest_cmd[smb_cmd] < roundtrip_time)
115                                 server->slowest_cmd[smb_cmd] = roundtrip_time;
116                         else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
117                                 server->fastest_cmd[smb_cmd] = roundtrip_time;
118                 }
119                 cifs_stats_inc(&server->num_cmds[smb_cmd]);
120                 server->time_per_cmd[smb_cmd] += roundtrip_time;
121         }
122         /*
123          * commands taking longer than one second (default) can be indications
124          * that something is wrong, unless it is quite a slow link or a very
125          * busy server. Note that this calc is unlikely or impossible to wrap
126          * as long as slow_rsp_threshold is not set way above recommended max
127          * value (32767 ie 9 hours) and is generally harmless even if wrong
128          * since only affects debug counters - so leaving the calc as simple
129          * comparison rather than doing multiple conversions and overflow
130          * checks
131          */
132         if ((slow_rsp_threshold != 0) &&
133             time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
134             (midEntry->command != command)) {
135                 /*
136                  * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
137                  * NB: le16_to_cpu returns unsigned so can not be negative below
138                  */
139                 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
140                         cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
141 
142                 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
143                                midEntry->when_sent, midEntry->when_received);
144                 if (cifsFYI & CIFS_TIMER) {
145                         pr_debug("slow rsp: cmd %d mid %llu",
146                                  midEntry->command, midEntry->mid);
147                         cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
148                                   now - midEntry->when_alloc,
149                                   now - midEntry->when_sent,
150                                   now - midEntry->when_received);
151                 }
152         }
153 #endif
154         put_task_struct(midEntry->creator);
155 
156         mempool_free(midEntry, cifs_mid_poolp);
157 }
158 
159 void
160 delete_mid(struct mid_q_entry *mid)
161 {
162         spin_lock(&mid->server->mid_lock);
163         if (!(mid->mid_flags & MID_DELETED)) {
164                 list_del_init(&mid->qhead);
165                 mid->mid_flags |= MID_DELETED;
166         }
167         spin_unlock(&mid->server->mid_lock);
168 
169         release_mid(mid);
170 }
171 
172 /*
173  * smb_send_kvec - send an array of kvecs to the server
174  * @server:     Server to send the data to
175  * @smb_msg:    Message to send
176  * @sent:       amount of data sent on socket is stored here
177  *
178  * Our basic "send data to server" function. Should be called with srv_mutex
179  * held. The caller is responsible for handling the results.
180  */
181 static int
182 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
183               size_t *sent)
184 {
185         int rc = 0;
186         int retries = 0;
187         struct socket *ssocket = server->ssocket;
188 
189         *sent = 0;
190 
191         if (server->noblocksnd)
192                 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
193         else
194                 smb_msg->msg_flags = MSG_NOSIGNAL;
195 
196         while (msg_data_left(smb_msg)) {
197                 /*
198                  * If blocking send, we try 3 times, since each can block
199                  * for 5 seconds. For nonblocking  we have to try more
200                  * but wait increasing amounts of time allowing time for
201                  * socket to clear.  The overall time we wait in either
202                  * case to send on the socket is about 15 seconds.
203                  * Similarly we wait for 15 seconds for a response from
204                  * the server in SendReceive[2] for the server to send
205                  * a response back for most types of requests (except
206                  * SMB Write past end of file which can be slow, and
207                  * blocking lock operations). NFS waits slightly longer
208                  * than CIFS, but this can make it take longer for
209                  * nonresponsive servers to be detected and 15 seconds
210                  * is more than enough time for modern networks to
211                  * send a packet.  In most cases if we fail to send
212                  * after the retries we will kill the socket and
213                  * reconnect which may clear the network problem.
214                  */
215                 rc = sock_sendmsg(ssocket, smb_msg);
216                 if (rc == -EAGAIN) {
217                         retries++;
218                         if (retries >= 14 ||
219                             (!server->noblocksnd && (retries > 2))) {
220                                 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
221                                          ssocket);
222                                 return -EAGAIN;
223                         }
224                         msleep(1 << retries);
225                         continue;
226                 }
227 
228                 if (rc < 0)
229                         return rc;
230 
231                 if (rc == 0) {
232                         /* should never happen, letting socket clear before
233                            retrying is our only obvious option here */
234                         cifs_server_dbg(VFS, "tcp sent no data\n");
235                         msleep(500);
236                         continue;
237                 }
238 
239                 /* send was at least partially successful */
240                 *sent += rc;
241                 retries = 0; /* in case we get ENOSPC on the next send */
242         }
243         return 0;
244 }
245 
246 unsigned long
247 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
248 {
249         unsigned int i;
250         struct kvec *iov;
251         int nvec;
252         unsigned long buflen = 0;
253 
254         if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
255             rqst->rq_iov[0].iov_len == 4) {
256                 iov = &rqst->rq_iov[1];
257                 nvec = rqst->rq_nvec - 1;
258         } else {
259                 iov = rqst->rq_iov;
260                 nvec = rqst->rq_nvec;
261         }
262 
263         /* total up iov array first */
264         for (i = 0; i < nvec; i++)
265                 buflen += iov[i].iov_len;
266 
267         buflen += iov_iter_count(&rqst->rq_iter);
268         return buflen;
269 }
270 
271 static int
272 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
273                 struct smb_rqst *rqst)
274 {
275         int rc;
276         struct kvec *iov;
277         int n_vec;
278         unsigned int send_length = 0;
279         unsigned int i, j;
280         sigset_t mask, oldmask;
281         size_t total_len = 0, sent, size;
282         struct socket *ssocket = server->ssocket;
283         struct msghdr smb_msg = {};
284         __be32 rfc1002_marker;
285 
286         cifs_in_send_inc(server);
287         if (cifs_rdma_enabled(server)) {
288                 /* return -EAGAIN when connecting or reconnecting */
289                 rc = -EAGAIN;
290                 if (server->smbd_conn)
291                         rc = smbd_send(server, num_rqst, rqst);
292                 goto smbd_done;
293         }
294 
295         rc = -EAGAIN;
296         if (ssocket == NULL)
297                 goto out;
298 
299         rc = -ERESTARTSYS;
300         if (fatal_signal_pending(current)) {
301                 cifs_dbg(FYI, "signal pending before send request\n");
302                 goto out;
303         }
304 
305         rc = 0;
306         /* cork the socket */
307         tcp_sock_set_cork(ssocket->sk, true);
308 
309         for (j = 0; j < num_rqst; j++)
310                 send_length += smb_rqst_len(server, &rqst[j]);
311         rfc1002_marker = cpu_to_be32(send_length);
312 
313         /*
314          * We should not allow signals to interrupt the network send because
315          * any partial send will cause session reconnects thus increasing
316          * latency of system calls and overload a server with unnecessary
317          * requests.
318          */
319 
320         sigfillset(&mask);
321         sigprocmask(SIG_BLOCK, &mask, &oldmask);
322 
323         /* Generate a rfc1002 marker for SMB2+ */
324         if (!is_smb1(server)) {
325                 struct kvec hiov = {
326                         .iov_base = &rfc1002_marker,
327                         .iov_len  = 4
328                 };
329                 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
330                 rc = smb_send_kvec(server, &smb_msg, &sent);
331                 if (rc < 0)
332                         goto unmask;
333 
334                 total_len += sent;
335                 send_length += 4;
336         }
337 
338         cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
339 
340         for (j = 0; j < num_rqst; j++) {
341                 iov = rqst[j].rq_iov;
342                 n_vec = rqst[j].rq_nvec;
343 
344                 size = 0;
345                 for (i = 0; i < n_vec; i++) {
346                         dump_smb(iov[i].iov_base, iov[i].iov_len);
347                         size += iov[i].iov_len;
348                 }
349 
350                 iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
351 
352                 rc = smb_send_kvec(server, &smb_msg, &sent);
353                 if (rc < 0)
354                         goto unmask;
355 
356                 total_len += sent;
357 
358                 if (iov_iter_count(&rqst[j].rq_iter) > 0) {
359                         smb_msg.msg_iter = rqst[j].rq_iter;
360                         rc = smb_send_kvec(server, &smb_msg, &sent);
361                         if (rc < 0)
362                                 break;
363                         total_len += sent;
364                 }
365 
366 }
367 
368 unmask:
369         sigprocmask(SIG_SETMASK, &oldmask, NULL);
370 
371         /*
372          * If signal is pending but we have already sent the whole packet to
373          * the server we need to return success status to allow a corresponding
374          * mid entry to be kept in the pending requests queue thus allowing
375          * to handle responses from the server by the client.
376          *
377          * If only part of the packet has been sent there is no need to hide
378          * interrupt because the session will be reconnected anyway, so there
379          * won't be any response from the server to handle.
380          */
381 
382         if (signal_pending(current) && (total_len != send_length)) {
383                 cifs_dbg(FYI, "signal is pending after attempt to send\n");
384                 rc = -ERESTARTSYS;
385         }
386 
387         /* uncork it */
388         tcp_sock_set_cork(ssocket->sk, false);
389 
390         if ((total_len > 0) && (total_len != send_length)) {
391                 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
392                          send_length, total_len);
393                 /*
394                  * If we have only sent part of an SMB then the next SMB could
395                  * be taken as the remainder of this one. We need to kill the
396                  * socket so the server throws away the partial SMB
397                  */
398                 cifs_signal_cifsd_for_reconnect(server, false);
399                 trace_smb3_partial_send_reconnect(server->CurrentMid,
400                                                   server->conn_id, server->hostname);
401         }
402 smbd_done:
403         /*
404          * there's hardly any use for the layers above to know the
405          * actual error code here. All they should do at this point is
406          * to retry the connection and hope it goes away.
407          */
408         if (rc < 0 && rc != -EINTR && rc != -EAGAIN) {
409                 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
410                          rc);
411                 rc = -ECONNABORTED;
412                 cifs_signal_cifsd_for_reconnect(server, false);
413         } else if (rc > 0)
414                 rc = 0;
415 out:
416         cifs_in_send_dec(server);
417         return rc;
418 }
419 
420 struct send_req_vars {
421         struct smb2_transform_hdr tr_hdr;
422         struct smb_rqst rqst[MAX_COMPOUND];
423         struct kvec iov;
424 };
425 
426 static int
427 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
428               struct smb_rqst *rqst, int flags)
429 {
430         struct send_req_vars *vars;
431         struct smb_rqst *cur_rqst;
432         struct kvec *iov;
433         int rc;
434 
435         if (!(flags & CIFS_TRANSFORM_REQ))
436                 return __smb_send_rqst(server, num_rqst, rqst);
437 
438         if (WARN_ON_ONCE(num_rqst > MAX_COMPOUND - 1))
439                 return -EIO;
440 
441         if (!server->ops->init_transform_rq) {
442                 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
443                 return -EIO;
444         }
445 
446         vars = kzalloc(sizeof(*vars), GFP_NOFS);
447         if (!vars)
448                 return -ENOMEM;
449         cur_rqst = vars->rqst;
450         iov = &vars->iov;
451 
452         iov->iov_base = &vars->tr_hdr;
453         iov->iov_len = sizeof(vars->tr_hdr);
454         cur_rqst[0].rq_iov = iov;
455         cur_rqst[0].rq_nvec = 1;
456 
457         rc = server->ops->init_transform_rq(server, num_rqst + 1,
458                                             &cur_rqst[0], rqst);
459         if (rc)
460                 goto out;
461 
462         rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
463         smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
464 out:
465         kfree(vars);
466         return rc;
467 }
468 
469 int
470 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
471          unsigned int smb_buf_length)
472 {
473         struct kvec iov[2];
474         struct smb_rqst rqst = { .rq_iov = iov,
475                                  .rq_nvec = 2 };
476 
477         iov[0].iov_base = smb_buffer;
478         iov[0].iov_len = 4;
479         iov[1].iov_base = (char *)smb_buffer + 4;
480         iov[1].iov_len = smb_buf_length;
481 
482         return __smb_send_rqst(server, 1, &rqst);
483 }
484 
485 static int
486 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
487                       const int timeout, const int flags,
488                       unsigned int *instance)
489 {
490         long rc;
491         int *credits;
492         int optype;
493         long int t;
494         int scredits, in_flight;
495 
496         if (timeout < 0)
497                 t = MAX_JIFFY_OFFSET;
498         else
499                 t = msecs_to_jiffies(timeout);
500 
501         optype = flags & CIFS_OP_MASK;
502 
503         *instance = 0;
504 
505         credits = server->ops->get_credits_field(server, optype);
506         /* Since an echo is already inflight, no need to wait to send another */
507         if (*credits <= 0 && optype == CIFS_ECHO_OP)
508                 return -EAGAIN;
509 
510         spin_lock(&server->req_lock);
511         if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
512                 /* oplock breaks must not be held up */
513                 server->in_flight++;
514                 if (server->in_flight > server->max_in_flight)
515                         server->max_in_flight = server->in_flight;
516                 *credits -= 1;
517                 *instance = server->reconnect_instance;
518                 scredits = *credits;
519                 in_flight = server->in_flight;
520                 spin_unlock(&server->req_lock);
521 
522                 trace_smb3_nblk_credits(server->CurrentMid,
523                                 server->conn_id, server->hostname, scredits, -1, in_flight);
524                 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
525                                 __func__, 1, scredits);
526 
527                 return 0;
528         }
529 
530         while (1) {
531                 spin_unlock(&server->req_lock);
532 
533                 spin_lock(&server->srv_lock);
534                 if (server->tcpStatus == CifsExiting) {
535                         spin_unlock(&server->srv_lock);
536                         return -ENOENT;
537                 }
538                 spin_unlock(&server->srv_lock);
539 
540                 spin_lock(&server->req_lock);
541                 if (*credits < num_credits) {
542                         scredits = *credits;
543                         spin_unlock(&server->req_lock);
544 
545                         cifs_num_waiters_inc(server);
546                         rc = wait_event_killable_timeout(server->request_q,
547                                 has_credits(server, credits, num_credits), t);
548                         cifs_num_waiters_dec(server);
549                         if (!rc) {
550                                 spin_lock(&server->req_lock);
551                                 scredits = *credits;
552                                 in_flight = server->in_flight;
553                                 spin_unlock(&server->req_lock);
554 
555                                 trace_smb3_credit_timeout(server->CurrentMid,
556                                                 server->conn_id, server->hostname, scredits,
557                                                 num_credits, in_flight);
558                                 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
559                                                 timeout);
560                                 return -EBUSY;
561                         }
562                         if (rc == -ERESTARTSYS)
563                                 return -ERESTARTSYS;
564                         spin_lock(&server->req_lock);
565                 } else {
566                         /*
567                          * For normal commands, reserve the last MAX_COMPOUND
568                          * credits to compound requests.
569                          * Otherwise these compounds could be permanently
570                          * starved for credits by single-credit requests.
571                          *
572                          * To prevent spinning CPU, block this thread until
573                          * there are >MAX_COMPOUND credits available.
574                          * But only do this is we already have a lot of
575                          * credits in flight to avoid triggering this check
576                          * for servers that are slow to hand out credits on
577                          * new sessions.
578                          */
579                         if (!optype && num_credits == 1 &&
580                             server->in_flight > 2 * MAX_COMPOUND &&
581                             *credits <= MAX_COMPOUND) {
582                                 spin_unlock(&server->req_lock);
583 
584                                 cifs_num_waiters_inc(server);
585                                 rc = wait_event_killable_timeout(
586                                         server->request_q,
587                                         has_credits(server, credits,
588                                                     MAX_COMPOUND + 1),
589                                         t);
590                                 cifs_num_waiters_dec(server);
591                                 if (!rc) {
592                                         spin_lock(&server->req_lock);
593                                         scredits = *credits;
594                                         in_flight = server->in_flight;
595                                         spin_unlock(&server->req_lock);
596 
597                                         trace_smb3_credit_timeout(
598                                                         server->CurrentMid,
599                                                         server->conn_id, server->hostname,
600                                                         scredits, num_credits, in_flight);
601                                         cifs_server_dbg(VFS, "wait timed out after %d ms\n",
602                                                         timeout);
603                                         return -EBUSY;
604                                 }
605                                 if (rc == -ERESTARTSYS)
606                                         return -ERESTARTSYS;
607                                 spin_lock(&server->req_lock);
608                                 continue;
609                         }
610 
611                         /*
612                          * Can not count locking commands against total
613                          * as they are allowed to block on server.
614                          */
615 
616                         /* update # of requests on the wire to server */
617                         if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
618                                 *credits -= num_credits;
619                                 server->in_flight += num_credits;
620                                 if (server->in_flight > server->max_in_flight)
621                                         server->max_in_flight = server->in_flight;
622                                 *instance = server->reconnect_instance;
623                         }
624                         scredits = *credits;
625                         in_flight = server->in_flight;
626                         spin_unlock(&server->req_lock);
627 
628                         trace_smb3_waitff_credits(server->CurrentMid,
629                                         server->conn_id, server->hostname, scredits,
630                                         -(num_credits), in_flight);
631                         cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
632                                         __func__, num_credits, scredits);
633                         break;
634                 }
635         }
636         return 0;
637 }
638 
639 static int
640 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
641                       unsigned int *instance)
642 {
643         return wait_for_free_credits(server, 1, -1, flags,
644                                      instance);
645 }
646 
647 static int
648 wait_for_compound_request(struct TCP_Server_Info *server, int num,
649                           const int flags, unsigned int *instance)
650 {
651         int *credits;
652         int scredits, in_flight;
653 
654         credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
655 
656         spin_lock(&server->req_lock);
657         scredits = *credits;
658         in_flight = server->in_flight;
659 
660         if (*credits < num) {
661                 /*
662                  * If the server is tight on resources or just gives us less
663                  * credits for other reasons (e.g. requests are coming out of
664                  * order and the server delays granting more credits until it
665                  * processes a missing mid) and we exhausted most available
666                  * credits there may be situations when we try to send
667                  * a compound request but we don't have enough credits. At this
668                  * point the client needs to decide if it should wait for
669                  * additional credits or fail the request. If at least one
670                  * request is in flight there is a high probability that the
671                  * server will return enough credits to satisfy this compound
672                  * request.
673                  *
674                  * Return immediately if no requests in flight since we will be
675                  * stuck on waiting for credits.
676                  */
677                 if (server->in_flight == 0) {
678                         spin_unlock(&server->req_lock);
679                         trace_smb3_insufficient_credits(server->CurrentMid,
680                                         server->conn_id, server->hostname, scredits,
681                                         num, in_flight);
682                         cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
683                                         __func__, in_flight, num, scredits);
684                         return -EDEADLK;
685                 }
686         }
687         spin_unlock(&server->req_lock);
688 
689         return wait_for_free_credits(server, num, 60000, flags,
690                                      instance);
691 }
692 
693 int
694 cifs_wait_mtu_credits(struct TCP_Server_Info *server, size_t size,
695                       size_t *num, struct cifs_credits *credits)
696 {
697         *num = size;
698         credits->value = 0;
699         credits->instance = server->reconnect_instance;
700         return 0;
701 }
702 
703 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
704                         struct mid_q_entry **ppmidQ)
705 {
706         spin_lock(&ses->ses_lock);
707         if (ses->ses_status == SES_NEW) {
708                 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
709                         (in_buf->Command != SMB_COM_NEGOTIATE)) {
710                         spin_unlock(&ses->ses_lock);
711                         return -EAGAIN;
712                 }
713                 /* else ok - we are setting up session */
714         }
715 
716         if (ses->ses_status == SES_EXITING) {
717                 /* check if SMB session is bad because we are setting it up */
718                 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
719                         spin_unlock(&ses->ses_lock);
720                         return -EAGAIN;
721                 }
722                 /* else ok - we are shutting down session */
723         }
724         spin_unlock(&ses->ses_lock);
725 
726         *ppmidQ = alloc_mid(in_buf, ses->server);
727         if (*ppmidQ == NULL)
728                 return -ENOMEM;
729         spin_lock(&ses->server->mid_lock);
730         list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
731         spin_unlock(&ses->server->mid_lock);
732         return 0;
733 }
734 
735 static int
736 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
737 {
738         int error;
739 
740         error = wait_event_state(server->response_q,
741                                  midQ->mid_state != MID_REQUEST_SUBMITTED &&
742                                  midQ->mid_state != MID_RESPONSE_RECEIVED,
743                                  (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
744         if (error < 0)
745                 return -ERESTARTSYS;
746 
747         return 0;
748 }
749 
750 struct mid_q_entry *
751 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
752 {
753         int rc;
754         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
755         struct mid_q_entry *mid;
756 
757         if (rqst->rq_iov[0].iov_len != 4 ||
758             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
759                 return ERR_PTR(-EIO);
760 
761         /* enable signing if server requires it */
762         if (server->sign)
763                 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
764 
765         mid = alloc_mid(hdr, server);
766         if (mid == NULL)
767                 return ERR_PTR(-ENOMEM);
768 
769         rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
770         if (rc) {
771                 release_mid(mid);
772                 return ERR_PTR(rc);
773         }
774 
775         return mid;
776 }
777 
778 /*
779  * Send a SMB request and set the callback function in the mid to handle
780  * the result. Caller is responsible for dealing with timeouts.
781  */
782 int
783 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
784                 mid_receive_t *receive, mid_callback_t *callback,
785                 mid_handle_t *handle, void *cbdata, const int flags,
786                 const struct cifs_credits *exist_credits)
787 {
788         int rc;
789         struct mid_q_entry *mid;
790         struct cifs_credits credits = { .value = 0, .instance = 0 };
791         unsigned int instance;
792         int optype;
793 
794         optype = flags & CIFS_OP_MASK;
795 
796         if ((flags & CIFS_HAS_CREDITS) == 0) {
797                 rc = wait_for_free_request(server, flags, &instance);
798                 if (rc)
799                         return rc;
800                 credits.value = 1;
801                 credits.instance = instance;
802         } else
803                 instance = exist_credits->instance;
804 
805         cifs_server_lock(server);
806 
807         /*
808          * We can't use credits obtained from the previous session to send this
809          * request. Check if there were reconnects after we obtained credits and
810          * return -EAGAIN in such cases to let callers handle it.
811          */
812         if (instance != server->reconnect_instance) {
813                 cifs_server_unlock(server);
814                 add_credits_and_wake_if(server, &credits, optype);
815                 return -EAGAIN;
816         }
817 
818         mid = server->ops->setup_async_request(server, rqst);
819         if (IS_ERR(mid)) {
820                 cifs_server_unlock(server);
821                 add_credits_and_wake_if(server, &credits, optype);
822                 return PTR_ERR(mid);
823         }
824 
825         mid->receive = receive;
826         mid->callback = callback;
827         mid->callback_data = cbdata;
828         mid->handle = handle;
829         mid->mid_state = MID_REQUEST_SUBMITTED;
830 
831         /* put it on the pending_mid_q */
832         spin_lock(&server->mid_lock);
833         list_add_tail(&mid->qhead, &server->pending_mid_q);
834         spin_unlock(&server->mid_lock);
835 
836         /*
837          * Need to store the time in mid before calling I/O. For call_async,
838          * I/O response may come back and free the mid entry on another thread.
839          */
840         cifs_save_when_sent(mid);
841         rc = smb_send_rqst(server, 1, rqst, flags);
842 
843         if (rc < 0) {
844                 revert_current_mid(server, mid->credits);
845                 server->sequence_number -= 2;
846                 delete_mid(mid);
847         }
848 
849         cifs_server_unlock(server);
850 
851         if (rc == 0)
852                 return 0;
853 
854         add_credits_and_wake_if(server, &credits, optype);
855         return rc;
856 }
857 
858 /*
859  *
860  * Send an SMB Request.  No response info (other than return code)
861  * needs to be parsed.
862  *
863  * flags indicate the type of request buffer and how long to wait
864  * and whether to log NT STATUS code (error) before mapping it to POSIX error
865  *
866  */
867 int
868 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
869                  char *in_buf, int flags)
870 {
871         int rc;
872         struct kvec iov[1];
873         struct kvec rsp_iov;
874         int resp_buf_type;
875 
876         iov[0].iov_base = in_buf;
877         iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
878         flags |= CIFS_NO_RSP_BUF;
879         rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
880         cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
881 
882         return rc;
883 }
884 
885 static int
886 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
887 {
888         int rc = 0;
889 
890         cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
891                  __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
892 
893         spin_lock(&server->mid_lock);
894         switch (mid->mid_state) {
895         case MID_RESPONSE_READY:
896                 spin_unlock(&server->mid_lock);
897                 return rc;
898         case MID_RETRY_NEEDED:
899                 rc = -EAGAIN;
900                 break;
901         case MID_RESPONSE_MALFORMED:
902                 rc = -EIO;
903                 break;
904         case MID_SHUTDOWN:
905                 rc = -EHOSTDOWN;
906                 break;
907         default:
908                 if (!(mid->mid_flags & MID_DELETED)) {
909                         list_del_init(&mid->qhead);
910                         mid->mid_flags |= MID_DELETED;
911                 }
912                 spin_unlock(&server->mid_lock);
913                 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
914                          __func__, mid->mid, mid->mid_state);
915                 rc = -EIO;
916                 goto sync_mid_done;
917         }
918         spin_unlock(&server->mid_lock);
919 
920 sync_mid_done:
921         release_mid(mid);
922         return rc;
923 }
924 
925 static inline int
926 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
927             struct mid_q_entry *mid)
928 {
929         return server->ops->send_cancel ?
930                                 server->ops->send_cancel(server, rqst, mid) : 0;
931 }
932 
933 int
934 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
935                    bool log_error)
936 {
937         unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
938 
939         dump_smb(mid->resp_buf, min_t(u32, 92, len));
940 
941         /* convert the length into a more usable form */
942         if (server->sign) {
943                 struct kvec iov[2];
944                 int rc = 0;
945                 struct smb_rqst rqst = { .rq_iov = iov,
946                                          .rq_nvec = 2 };
947 
948                 iov[0].iov_base = mid->resp_buf;
949                 iov[0].iov_len = 4;
950                 iov[1].iov_base = (char *)mid->resp_buf + 4;
951                 iov[1].iov_len = len - 4;
952                 /* FIXME: add code to kill session */
953                 rc = cifs_verify_signature(&rqst, server,
954                                            mid->sequence_number);
955                 if (rc)
956                         cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
957                                  rc);
958         }
959 
960         /* BB special case reconnect tid and uid here? */
961         return map_and_check_smb_error(mid, log_error);
962 }
963 
964 struct mid_q_entry *
965 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
966                    struct smb_rqst *rqst)
967 {
968         int rc;
969         struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
970         struct mid_q_entry *mid;
971 
972         if (rqst->rq_iov[0].iov_len != 4 ||
973             rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
974                 return ERR_PTR(-EIO);
975 
976         rc = allocate_mid(ses, hdr, &mid);
977         if (rc)
978                 return ERR_PTR(rc);
979         rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
980         if (rc) {
981                 delete_mid(mid);
982                 return ERR_PTR(rc);
983         }
984         return mid;
985 }
986 
987 static void
988 cifs_compound_callback(struct mid_q_entry *mid)
989 {
990         struct TCP_Server_Info *server = mid->server;
991         struct cifs_credits credits = {
992                 .value = server->ops->get_credits(mid),
993                 .instance = server->reconnect_instance,
994         };
995 
996         add_credits(server, &credits, mid->optype);
997 
998         if (mid->mid_state == MID_RESPONSE_RECEIVED)
999                 mid->mid_state = MID_RESPONSE_READY;
1000 }
1001 
1002 static void
1003 cifs_compound_last_callback(struct mid_q_entry *mid)
1004 {
1005         cifs_compound_callback(mid);
1006         cifs_wake_up_task(mid);
1007 }
1008 
1009 static void
1010 cifs_cancelled_callback(struct mid_q_entry *mid)
1011 {
1012         cifs_compound_callback(mid);
1013         release_mid(mid);
1014 }
1015 
1016 /*
1017  * Return a channel (master if none) of @ses that can be used to send
1018  * regular requests.
1019  *
1020  * If we are currently binding a new channel (negprot/sess.setup),
1021  * return the new incomplete channel.
1022  */
1023 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1024 {
1025         uint index = 0;
1026         unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
1027         struct TCP_Server_Info *server = NULL;
1028         int i;
1029 
1030         if (!ses)
1031                 return NULL;
1032 
1033         spin_lock(&ses->chan_lock);
1034         for (i = 0; i < ses->chan_count; i++) {
1035                 server = ses->chans[i].server;
1036                 if (!server || server->terminate)
1037                         continue;
1038 
1039                 if (CIFS_CHAN_NEEDS_RECONNECT(ses, i))
1040                         continue;
1041 
1042                 /*
1043                  * strictly speaking, we should pick up req_lock to read
1044                  * server->in_flight. But it shouldn't matter much here if we
1045                  * race while reading this data. The worst that can happen is
1046                  * that we could use a channel that's not least loaded. Avoiding
1047                  * taking the lock could help reduce wait time, which is
1048                  * important for this function
1049                  */
1050                 if (server->in_flight < min_in_flight) {
1051                         min_in_flight = server->in_flight;
1052                         index = i;
1053                 }
1054                 if (server->in_flight > max_in_flight)
1055                         max_in_flight = server->in_flight;
1056         }
1057 
1058         /* if all channels are equally loaded, fall back to round-robin */
1059         if (min_in_flight == max_in_flight) {
1060                 index = (uint)atomic_inc_return(&ses->chan_seq);
1061                 index %= ses->chan_count;
1062         }
1063 
1064         server = ses->chans[index].server;
1065         spin_unlock(&ses->chan_lock);
1066 
1067         return server;
1068 }
1069 
1070 int
1071 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1072                    struct TCP_Server_Info *server,
1073                    const int flags, const int num_rqst, struct smb_rqst *rqst,
1074                    int *resp_buf_type, struct kvec *resp_iov)
1075 {
1076         int i, j, optype, rc = 0;
1077         struct mid_q_entry *midQ[MAX_COMPOUND];
1078         bool cancelled_mid[MAX_COMPOUND] = {false};
1079         struct cifs_credits credits[MAX_COMPOUND] = {
1080                 { .value = 0, .instance = 0 }
1081         };
1082         unsigned int instance;
1083         char *buf;
1084 
1085         optype = flags & CIFS_OP_MASK;
1086 
1087         for (i = 0; i < num_rqst; i++)
1088                 resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1089 
1090         if (!ses || !ses->server || !server) {
1091                 cifs_dbg(VFS, "Null session\n");
1092                 return -EIO;
1093         }
1094 
1095         spin_lock(&server->srv_lock);
1096         if (server->tcpStatus == CifsExiting) {
1097                 spin_unlock(&server->srv_lock);
1098                 return -ENOENT;
1099         }
1100         spin_unlock(&server->srv_lock);
1101 
1102         /*
1103          * Wait for all the requests to become available.
1104          * This approach still leaves the possibility to be stuck waiting for
1105          * credits if the server doesn't grant credits to the outstanding
1106          * requests and if the client is completely idle, not generating any
1107          * other requests.
1108          * This can be handled by the eventual session reconnect.
1109          */
1110         rc = wait_for_compound_request(server, num_rqst, flags,
1111                                        &instance);
1112         if (rc)
1113                 return rc;
1114 
1115         for (i = 0; i < num_rqst; i++) {
1116                 credits[i].value = 1;
1117                 credits[i].instance = instance;
1118         }
1119 
1120         /*
1121          * Make sure that we sign in the same order that we send on this socket
1122          * and avoid races inside tcp sendmsg code that could cause corruption
1123          * of smb data.
1124          */
1125 
1126         cifs_server_lock(server);
1127 
1128         /*
1129          * All the parts of the compound chain belong obtained credits from the
1130          * same session. We can not use credits obtained from the previous
1131          * session to send this request. Check if there were reconnects after
1132          * we obtained credits and return -EAGAIN in such cases to let callers
1133          * handle it.
1134          */
1135         if (instance != server->reconnect_instance) {
1136                 cifs_server_unlock(server);
1137                 for (j = 0; j < num_rqst; j++)
1138                         add_credits(server, &credits[j], optype);
1139                 return -EAGAIN;
1140         }
1141 
1142         for (i = 0; i < num_rqst; i++) {
1143                 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1144                 if (IS_ERR(midQ[i])) {
1145                         revert_current_mid(server, i);
1146                         for (j = 0; j < i; j++)
1147                                 delete_mid(midQ[j]);
1148                         cifs_server_unlock(server);
1149 
1150                         /* Update # of requests on wire to server */
1151                         for (j = 0; j < num_rqst; j++)
1152                                 add_credits(server, &credits[j], optype);
1153                         return PTR_ERR(midQ[i]);
1154                 }
1155 
1156                 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1157                 midQ[i]->optype = optype;
1158                 /*
1159                  * Invoke callback for every part of the compound chain
1160                  * to calculate credits properly. Wake up this thread only when
1161                  * the last element is received.
1162                  */
1163                 if (i < num_rqst - 1)
1164                         midQ[i]->callback = cifs_compound_callback;
1165                 else
1166                         midQ[i]->callback = cifs_compound_last_callback;
1167         }
1168         rc = smb_send_rqst(server, num_rqst, rqst, flags);
1169 
1170         for (i = 0; i < num_rqst; i++)
1171                 cifs_save_when_sent(midQ[i]);
1172 
1173         if (rc < 0) {
1174                 revert_current_mid(server, num_rqst);
1175                 server->sequence_number -= 2;
1176         }
1177 
1178         cifs_server_unlock(server);
1179 
1180         /*
1181          * If sending failed for some reason or it is an oplock break that we
1182          * will not receive a response to - return credits back
1183          */
1184         if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1185                 for (i = 0; i < num_rqst; i++)
1186                         add_credits(server, &credits[i], optype);
1187                 goto out;
1188         }
1189 
1190         /*
1191          * At this point the request is passed to the network stack - we assume
1192          * that any credits taken from the server structure on the client have
1193          * been spent and we can't return them back. Once we receive responses
1194          * we will collect credits granted by the server in the mid callbacks
1195          * and add those credits to the server structure.
1196          */
1197 
1198         /*
1199          * Compounding is never used during session establish.
1200          */
1201         spin_lock(&ses->ses_lock);
1202         if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1203                 spin_unlock(&ses->ses_lock);
1204 
1205                 cifs_server_lock(server);
1206                 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
1207                 cifs_server_unlock(server);
1208 
1209                 spin_lock(&ses->ses_lock);
1210         }
1211         spin_unlock(&ses->ses_lock);
1212 
1213         for (i = 0; i < num_rqst; i++) {
1214                 rc = wait_for_response(server, midQ[i]);
1215                 if (rc != 0)
1216                         break;
1217         }
1218         if (rc != 0) {
1219                 for (; i < num_rqst; i++) {
1220                         cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1221                                  midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1222                         send_cancel(server, &rqst[i], midQ[i]);
1223                         spin_lock(&server->mid_lock);
1224                         midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1225                         if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
1226                             midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
1227                                 midQ[i]->callback = cifs_cancelled_callback;
1228                                 cancelled_mid[i] = true;
1229                                 credits[i].value = 0;
1230                         }
1231                         spin_unlock(&server->mid_lock);
1232                 }
1233         }
1234 
1235         for (i = 0; i < num_rqst; i++) {
1236                 if (rc < 0)
1237                         goto out;
1238 
1239                 rc = cifs_sync_mid_result(midQ[i], server);
1240                 if (rc != 0) {
1241                         /* mark this mid as cancelled to not free it below */
1242                         cancelled_mid[i] = true;
1243                         goto out;
1244                 }
1245 
1246                 if (!midQ[i]->resp_buf ||
1247                     midQ[i]->mid_state != MID_RESPONSE_READY) {
1248                         rc = -EIO;
1249                         cifs_dbg(FYI, "Bad MID state?\n");
1250                         goto out;
1251                 }
1252 
1253                 buf = (char *)midQ[i]->resp_buf;
1254                 resp_iov[i].iov_base = buf;
1255                 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1256                         HEADER_PREAMBLE_SIZE(server);
1257 
1258                 if (midQ[i]->large_buf)
1259                         resp_buf_type[i] = CIFS_LARGE_BUFFER;
1260                 else
1261                         resp_buf_type[i] = CIFS_SMALL_BUFFER;
1262 
1263                 rc = server->ops->check_receive(midQ[i], server,
1264                                                      flags & CIFS_LOG_ERROR);
1265 
1266                 /* mark it so buf will not be freed by delete_mid */
1267                 if ((flags & CIFS_NO_RSP_BUF) == 0)
1268                         midQ[i]->resp_buf = NULL;
1269 
1270         }
1271 
1272         /*
1273          * Compounding is never used during session establish.
1274          */
1275         spin_lock(&ses->ses_lock);
1276         if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1277                 struct kvec iov = {
1278                         .iov_base = resp_iov[0].iov_base,
1279                         .iov_len = resp_iov[0].iov_len
1280                 };
1281                 spin_unlock(&ses->ses_lock);
1282                 cifs_server_lock(server);
1283                 smb311_update_preauth_hash(ses, server, &iov, 1);
1284                 cifs_server_unlock(server);
1285                 spin_lock(&ses->ses_lock);
1286         }
1287         spin_unlock(&ses->ses_lock);
1288 
1289 out:
1290         /*
1291          * This will dequeue all mids. After this it is important that the
1292          * demultiplex_thread will not process any of these mids any further.
1293          * This is prevented above by using a noop callback that will not
1294          * wake this thread except for the very last PDU.
1295          */
1296         for (i = 0; i < num_rqst; i++) {
1297                 if (!cancelled_mid[i])
1298                         delete_mid(midQ[i]);
1299         }
1300 
1301         return rc;
1302 }
1303 
1304 int
1305 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1306                struct TCP_Server_Info *server,
1307                struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1308                struct kvec *resp_iov)
1309 {
1310         return compound_send_recv(xid, ses, server, flags, 1,
1311                                   rqst, resp_buf_type, resp_iov);
1312 }
1313 
1314 int
1315 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1316              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1317              const int flags, struct kvec *resp_iov)
1318 {
1319         struct smb_rqst rqst;
1320         struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1321         int rc;
1322 
1323         if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1324                 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1325                                         GFP_KERNEL);
1326                 if (!new_iov) {
1327                         /* otherwise cifs_send_recv below sets resp_buf_type */
1328                         *resp_buf_type = CIFS_NO_BUFFER;
1329                         return -ENOMEM;
1330                 }
1331         } else
1332                 new_iov = s_iov;
1333 
1334         /* 1st iov is a RFC1001 length followed by the rest of the packet */
1335         memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1336 
1337         new_iov[0].iov_base = new_iov[1].iov_base;
1338         new_iov[0].iov_len = 4;
1339         new_iov[1].iov_base += 4;
1340         new_iov[1].iov_len -= 4;
1341 
1342         memset(&rqst, 0, sizeof(struct smb_rqst));
1343         rqst.rq_iov = new_iov;
1344         rqst.rq_nvec = n_vec + 1;
1345 
1346         rc = cifs_send_recv(xid, ses, ses->server,
1347                             &rqst, resp_buf_type, flags, resp_iov);
1348         if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1349                 kfree(new_iov);
1350         return rc;
1351 }
1352 
1353 int
1354 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1355             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1356             int *pbytes_returned, const int flags)
1357 {
1358         int rc = 0;
1359         struct mid_q_entry *midQ;
1360         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1361         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1362         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1363         struct cifs_credits credits = { .value = 1, .instance = 0 };
1364         struct TCP_Server_Info *server;
1365 
1366         if (ses == NULL) {
1367                 cifs_dbg(VFS, "Null smb session\n");
1368                 return -EIO;
1369         }
1370         server = ses->server;
1371         if (server == NULL) {
1372                 cifs_dbg(VFS, "Null tcp session\n");
1373                 return -EIO;
1374         }
1375 
1376         spin_lock(&server->srv_lock);
1377         if (server->tcpStatus == CifsExiting) {
1378                 spin_unlock(&server->srv_lock);
1379                 return -ENOENT;
1380         }
1381         spin_unlock(&server->srv_lock);
1382 
1383         /* Ensure that we do not send more than 50 overlapping requests
1384            to the same server. We may make this configurable later or
1385            use ses->maxReq */
1386 
1387         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1388                 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1389                                 len);
1390                 return -EIO;
1391         }
1392 
1393         rc = wait_for_free_request(server, flags, &credits.instance);
1394         if (rc)
1395                 return rc;
1396 
1397         /* make sure that we sign in the same order that we send on this socket
1398            and avoid races inside tcp sendmsg code that could cause corruption
1399            of smb data */
1400 
1401         cifs_server_lock(server);
1402 
1403         rc = allocate_mid(ses, in_buf, &midQ);
1404         if (rc) {
1405                 cifs_server_unlock(server);
1406                 /* Update # of requests on wire to server */
1407                 add_credits(server, &credits, 0);
1408                 return rc;
1409         }
1410 
1411         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1412         if (rc) {
1413                 cifs_server_unlock(server);
1414                 goto out;
1415         }
1416 
1417         midQ->mid_state = MID_REQUEST_SUBMITTED;
1418 
1419         rc = smb_send(server, in_buf, len);
1420         cifs_save_when_sent(midQ);
1421 
1422         if (rc < 0)
1423                 server->sequence_number -= 2;
1424 
1425         cifs_server_unlock(server);
1426 
1427         if (rc < 0)
1428                 goto out;
1429 
1430         rc = wait_for_response(server, midQ);
1431         if (rc != 0) {
1432                 send_cancel(server, &rqst, midQ);
1433                 spin_lock(&server->mid_lock);
1434                 if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1435                     midQ->mid_state == MID_RESPONSE_RECEIVED) {
1436                         /* no longer considered to be "in-flight" */
1437                         midQ->callback = release_mid;
1438                         spin_unlock(&server->mid_lock);
1439                         add_credits(server, &credits, 0);
1440                         return rc;
1441                 }
1442                 spin_unlock(&server->mid_lock);
1443         }
1444 
1445         rc = cifs_sync_mid_result(midQ, server);
1446         if (rc != 0) {
1447                 add_credits(server, &credits, 0);
1448                 return rc;
1449         }
1450 
1451         if (!midQ->resp_buf || !out_buf ||
1452             midQ->mid_state != MID_RESPONSE_READY) {
1453                 rc = -EIO;
1454                 cifs_server_dbg(VFS, "Bad MID state?\n");
1455                 goto out;
1456         }
1457 
1458         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1459         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1460         rc = cifs_check_receive(midQ, server, 0);
1461 out:
1462         delete_mid(midQ);
1463         add_credits(server, &credits, 0);
1464 
1465         return rc;
1466 }
1467 
1468 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1469    blocking lock to return. */
1470 
1471 static int
1472 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1473                         struct smb_hdr *in_buf,
1474                         struct smb_hdr *out_buf)
1475 {
1476         int bytes_returned;
1477         struct cifs_ses *ses = tcon->ses;
1478         LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1479 
1480         /* We just modify the current in_buf to change
1481            the type of lock from LOCKING_ANDX_SHARED_LOCK
1482            or LOCKING_ANDX_EXCLUSIVE_LOCK to
1483            LOCKING_ANDX_CANCEL_LOCK. */
1484 
1485         pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1486         pSMB->Timeout = 0;
1487         pSMB->hdr.Mid = get_next_mid(ses->server);
1488 
1489         return SendReceive(xid, ses, in_buf, out_buf,
1490                         &bytes_returned, 0);
1491 }
1492 
1493 int
1494 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1495             struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1496             int *pbytes_returned)
1497 {
1498         int rc = 0;
1499         int rstart = 0;
1500         struct mid_q_entry *midQ;
1501         struct cifs_ses *ses;
1502         unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1503         struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1504         struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1505         unsigned int instance;
1506         struct TCP_Server_Info *server;
1507 
1508         if (tcon == NULL || tcon->ses == NULL) {
1509                 cifs_dbg(VFS, "Null smb session\n");
1510                 return -EIO;
1511         }
1512         ses = tcon->ses;
1513         server = ses->server;
1514 
1515         if (server == NULL) {
1516                 cifs_dbg(VFS, "Null tcp session\n");
1517                 return -EIO;
1518         }
1519 
1520         spin_lock(&server->srv_lock);
1521         if (server->tcpStatus == CifsExiting) {
1522                 spin_unlock(&server->srv_lock);
1523                 return -ENOENT;
1524         }
1525         spin_unlock(&server->srv_lock);
1526 
1527         /* Ensure that we do not send more than 50 overlapping requests
1528            to the same server. We may make this configurable later or
1529            use ses->maxReq */
1530 
1531         if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1532                 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1533                               len);
1534                 return -EIO;
1535         }
1536 
1537         rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1538         if (rc)
1539                 return rc;
1540 
1541         /* make sure that we sign in the same order that we send on this socket
1542            and avoid races inside tcp sendmsg code that could cause corruption
1543            of smb data */
1544 
1545         cifs_server_lock(server);
1546 
1547         rc = allocate_mid(ses, in_buf, &midQ);
1548         if (rc) {
1549                 cifs_server_unlock(server);
1550                 return rc;
1551         }
1552 
1553         rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1554         if (rc) {
1555                 delete_mid(midQ);
1556                 cifs_server_unlock(server);
1557                 return rc;
1558         }
1559 
1560         midQ->mid_state = MID_REQUEST_SUBMITTED;
1561         rc = smb_send(server, in_buf, len);
1562         cifs_save_when_sent(midQ);
1563 
1564         if (rc < 0)
1565                 server->sequence_number -= 2;
1566 
1567         cifs_server_unlock(server);
1568 
1569         if (rc < 0) {
1570                 delete_mid(midQ);
1571                 return rc;
1572         }
1573 
1574         /* Wait for a reply - allow signals to interrupt. */
1575         rc = wait_event_interruptible(server->response_q,
1576                 (!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
1577                    midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
1578                 ((server->tcpStatus != CifsGood) &&
1579                  (server->tcpStatus != CifsNew)));
1580 
1581         /* Were we interrupted by a signal ? */
1582         spin_lock(&server->srv_lock);
1583         if ((rc == -ERESTARTSYS) &&
1584                 (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1585                  midQ->mid_state == MID_RESPONSE_RECEIVED) &&
1586                 ((server->tcpStatus == CifsGood) ||
1587                  (server->tcpStatus == CifsNew))) {
1588                 spin_unlock(&server->srv_lock);
1589 
1590                 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1591                         /* POSIX lock. We send a NT_CANCEL SMB to cause the
1592                            blocking lock to return. */
1593                         rc = send_cancel(server, &rqst, midQ);
1594                         if (rc) {
1595                                 delete_mid(midQ);
1596                                 return rc;
1597                         }
1598                 } else {
1599                         /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1600                            to cause the blocking lock to return. */
1601 
1602                         rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1603 
1604                         /* If we get -ENOLCK back the lock may have
1605                            already been removed. Don't exit in this case. */
1606                         if (rc && rc != -ENOLCK) {
1607                                 delete_mid(midQ);
1608                                 return rc;
1609                         }
1610                 }
1611 
1612                 rc = wait_for_response(server, midQ);
1613                 if (rc) {
1614                         send_cancel(server, &rqst, midQ);
1615                         spin_lock(&server->mid_lock);
1616                         if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
1617                             midQ->mid_state == MID_RESPONSE_RECEIVED) {
1618                                 /* no longer considered to be "in-flight" */
1619                                 midQ->callback = release_mid;
1620                                 spin_unlock(&server->mid_lock);
1621                                 return rc;
1622                         }
1623                         spin_unlock(&server->mid_lock);
1624                 }
1625 
1626                 /* We got the response - restart system call. */
1627                 rstart = 1;
1628                 spin_lock(&server->srv_lock);
1629         }
1630         spin_unlock(&server->srv_lock);
1631 
1632         rc = cifs_sync_mid_result(midQ, server);
1633         if (rc != 0)
1634                 return rc;
1635 
1636         /* rcvd frame is ok */
1637         if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
1638                 rc = -EIO;
1639                 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1640                 goto out;
1641         }
1642 
1643         *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1644         memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1645         rc = cifs_check_receive(midQ, server, 0);
1646 out:
1647         delete_mid(midQ);
1648         if (rstart && rc == -EACCES)
1649                 return -ERESTARTSYS;
1650         return rc;
1651 }
1652 
1653 /*
1654  * Discard any remaining data in the current SMB. To do this, we borrow the
1655  * current bigbuf.
1656  */
1657 int
1658 cifs_discard_remaining_data(struct TCP_Server_Info *server)
1659 {
1660         unsigned int rfclen = server->pdu_size;
1661         size_t remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
1662                 server->total_read;
1663 
1664         while (remaining > 0) {
1665                 ssize_t length;
1666 
1667                 length = cifs_discard_from_socket(server,
1668                                 min_t(size_t, remaining,
1669                                       CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1670                 if (length < 0)
1671                         return length;
1672                 server->total_read += length;
1673                 remaining -= length;
1674         }
1675 
1676         return 0;
1677 }
1678 
1679 static int
1680 __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1681                      bool malformed)
1682 {
1683         int length;
1684 
1685         length = cifs_discard_remaining_data(server);
1686         dequeue_mid(mid, malformed);
1687         mid->resp_buf = server->smallbuf;
1688         server->smallbuf = NULL;
1689         return length;
1690 }
1691 
1692 static int
1693 cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1694 {
1695         struct cifs_io_subrequest *rdata = mid->callback_data;
1696 
1697         return  __cifs_readv_discard(server, mid, rdata->result);
1698 }
1699 
1700 int
1701 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1702 {
1703         int length, len;
1704         unsigned int data_offset, data_len;
1705         struct cifs_io_subrequest *rdata = mid->callback_data;
1706         char *buf = server->smallbuf;
1707         unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
1708         bool use_rdma_mr = false;
1709 
1710         cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%zu\n",
1711                  __func__, mid->mid, rdata->subreq.start, rdata->subreq.len);
1712 
1713         /*
1714          * read the rest of READ_RSP header (sans Data array), or whatever we
1715          * can if there's not enough data. At this point, we've read down to
1716          * the Mid.
1717          */
1718         len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1719                                                         HEADER_SIZE(server) + 1;
1720 
1721         length = cifs_read_from_socket(server,
1722                                        buf + HEADER_SIZE(server) - 1, len);
1723         if (length < 0)
1724                 return length;
1725         server->total_read += length;
1726 
1727         if (server->ops->is_session_expired &&
1728             server->ops->is_session_expired(buf)) {
1729                 cifs_reconnect(server, true);
1730                 return -1;
1731         }
1732 
1733         if (server->ops->is_status_pending &&
1734             server->ops->is_status_pending(buf, server)) {
1735                 cifs_discard_remaining_data(server);
1736                 return -1;
1737         }
1738 
1739         /* set up first two iov for signature check and to get credits */
1740         rdata->iov[0].iov_base = buf;
1741         rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1742         rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
1743         rdata->iov[1].iov_len =
1744                 server->total_read - HEADER_PREAMBLE_SIZE(server);
1745         cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1746                  rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1747         cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1748                  rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1749 
1750         /* Was the SMB read successful? */
1751         rdata->result = server->ops->map_error(buf, false);
1752         if (rdata->result != 0) {
1753                 cifs_dbg(FYI, "%s: server returned error %d\n",
1754                          __func__, rdata->result);
1755                 /* normal error on read response */
1756                 return __cifs_readv_discard(server, mid, false);
1757         }
1758 
1759         /* Is there enough to get to the rest of the READ_RSP header? */
1760         if (server->total_read < server->vals->read_rsp_size) {
1761                 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1762                          __func__, server->total_read,
1763                          server->vals->read_rsp_size);
1764                 rdata->result = -EIO;
1765                 return cifs_readv_discard(server, mid);
1766         }
1767 
1768         data_offset = server->ops->read_data_offset(buf) +
1769                 HEADER_PREAMBLE_SIZE(server);
1770         if (data_offset < server->total_read) {
1771                 /*
1772                  * win2k8 sometimes sends an offset of 0 when the read
1773                  * is beyond the EOF. Treat it as if the data starts just after
1774                  * the header.
1775                  */
1776                 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1777                          __func__, data_offset);
1778                 data_offset = server->total_read;
1779         } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1780                 /* data_offset is beyond the end of smallbuf */
1781                 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1782                          __func__, data_offset);
1783                 rdata->result = -EIO;
1784                 return cifs_readv_discard(server, mid);
1785         }
1786 
1787         cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1788                  __func__, server->total_read, data_offset);
1789 
1790         len = data_offset - server->total_read;
1791         if (len > 0) {
1792                 /* read any junk before data into the rest of smallbuf */
1793                 length = cifs_read_from_socket(server,
1794                                                buf + server->total_read, len);
1795                 if (length < 0)
1796                         return length;
1797                 server->total_read += length;
1798         }
1799 
1800         /* how much data is in the response? */
1801 #ifdef CONFIG_CIFS_SMB_DIRECT
1802         use_rdma_mr = rdata->mr;
1803 #endif
1804         data_len = server->ops->read_data_length(buf, use_rdma_mr);
1805         if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1806                 /* data_len is corrupt -- discard frame */
1807                 rdata->result = -EIO;
1808                 return cifs_readv_discard(server, mid);
1809         }
1810 
1811 #ifdef CONFIG_CIFS_SMB_DIRECT
1812         if (rdata->mr)
1813                 length = data_len; /* An RDMA read is already done. */
1814         else
1815 #endif
1816         {
1817                 length = cifs_read_iter_from_socket(server, &rdata->subreq.io_iter,
1818                                                     data_len);
1819                 iov_iter_revert(&rdata->subreq.io_iter, data_len);
1820         }
1821         if (length > 0)
1822                 rdata->got_bytes += length;
1823         server->total_read += length;
1824 
1825         cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1826                  server->total_read, buflen, data_len);
1827 
1828         /* discard anything left over */
1829         if (server->total_read < buflen)
1830                 return cifs_readv_discard(server, mid);
1831 
1832         dequeue_mid(mid, false);
1833         mid->resp_buf = server->smallbuf;
1834         server->smallbuf = NULL;
1835         return length;
1836 }
1837 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php