~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sunrpc/svc.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * linux/net/sunrpc/svc.c
  4  *
  5  * High-level RPC service routines
  6  *
  7  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  8  *
  9  * Multiple threads pools and NUMAisation
 10  * Copyright (c) 2006 Silicon Graphics, Inc.
 11  * by Greg Banks <gnb@melbourne.sgi.com>
 12  */
 13 
 14 #include <linux/linkage.h>
 15 #include <linux/sched/signal.h>
 16 #include <linux/errno.h>
 17 #include <linux/net.h>
 18 #include <linux/in.h>
 19 #include <linux/mm.h>
 20 #include <linux/interrupt.h>
 21 #include <linux/module.h>
 22 #include <linux/kthread.h>
 23 #include <linux/slab.h>
 24 
 25 #include <linux/sunrpc/types.h>
 26 #include <linux/sunrpc/xdr.h>
 27 #include <linux/sunrpc/stats.h>
 28 #include <linux/sunrpc/svcsock.h>
 29 #include <linux/sunrpc/clnt.h>
 30 #include <linux/sunrpc/bc_xprt.h>
 31 
 32 #include <trace/events/sunrpc.h>
 33 
 34 #include "fail.h"
 35 
 36 #define RPCDBG_FACILITY RPCDBG_SVCDSP
 37 
 38 static void svc_unregister(const struct svc_serv *serv, struct net *net);
 39 
 40 #define SVC_POOL_DEFAULT        SVC_POOL_GLOBAL
 41 
 42 /*
 43  * Mode for mapping cpus to pools.
 44  */
 45 enum {
 46         SVC_POOL_AUTO = -1,     /* choose one of the others */
 47         SVC_POOL_GLOBAL,        /* no mapping, just a single global pool
 48                                  * (legacy & UP mode) */
 49         SVC_POOL_PERCPU,        /* one pool per cpu */
 50         SVC_POOL_PERNODE        /* one pool per numa node */
 51 };
 52 
 53 /*
 54  * Structure for mapping cpus to pools and vice versa.
 55  * Setup once during sunrpc initialisation.
 56  */
 57 
 58 struct svc_pool_map {
 59         int count;                      /* How many svc_servs use us */
 60         int mode;                       /* Note: int not enum to avoid
 61                                          * warnings about "enumeration value
 62                                          * not handled in switch" */
 63         unsigned int npools;
 64         unsigned int *pool_to;          /* maps pool id to cpu or node */
 65         unsigned int *to_pool;          /* maps cpu or node to pool id */
 66 };
 67 
 68 static struct svc_pool_map svc_pool_map = {
 69         .mode = SVC_POOL_DEFAULT
 70 };
 71 
 72 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
 73 
 74 static int
 75 __param_set_pool_mode(const char *val, struct svc_pool_map *m)
 76 {
 77         int err, mode;
 78 
 79         mutex_lock(&svc_pool_map_mutex);
 80 
 81         err = 0;
 82         if (!strncmp(val, "auto", 4))
 83                 mode = SVC_POOL_AUTO;
 84         else if (!strncmp(val, "global", 6))
 85                 mode = SVC_POOL_GLOBAL;
 86         else if (!strncmp(val, "percpu", 6))
 87                 mode = SVC_POOL_PERCPU;
 88         else if (!strncmp(val, "pernode", 7))
 89                 mode = SVC_POOL_PERNODE;
 90         else
 91                 err = -EINVAL;
 92 
 93         if (err)
 94                 goto out;
 95 
 96         if (m->count == 0)
 97                 m->mode = mode;
 98         else if (mode != m->mode)
 99                 err = -EBUSY;
100 out:
101         mutex_unlock(&svc_pool_map_mutex);
102         return err;
103 }
104 
105 static int
106 param_set_pool_mode(const char *val, const struct kernel_param *kp)
107 {
108         struct svc_pool_map *m = kp->arg;
109 
110         return __param_set_pool_mode(val, m);
111 }
112 
113 int sunrpc_set_pool_mode(const char *val)
114 {
115         return __param_set_pool_mode(val, &svc_pool_map);
116 }
117 EXPORT_SYMBOL(sunrpc_set_pool_mode);
118 
119 /**
120  * sunrpc_get_pool_mode - get the current pool_mode for the host
121  * @buf: where to write the current pool_mode
122  * @size: size of @buf
123  *
124  * Grab the current pool_mode from the svc_pool_map and write
125  * the resulting string to @buf. Returns the number of characters
126  * written to @buf (a'la snprintf()).
127  */
128 int
129 sunrpc_get_pool_mode(char *buf, size_t size)
130 {
131         struct svc_pool_map *m = &svc_pool_map;
132 
133         switch (m->mode)
134         {
135         case SVC_POOL_AUTO:
136                 return snprintf(buf, size, "auto");
137         case SVC_POOL_GLOBAL:
138                 return snprintf(buf, size, "global");
139         case SVC_POOL_PERCPU:
140                 return snprintf(buf, size, "percpu");
141         case SVC_POOL_PERNODE:
142                 return snprintf(buf, size, "pernode");
143         default:
144                 return snprintf(buf, size, "%d", m->mode);
145         }
146 }
147 EXPORT_SYMBOL(sunrpc_get_pool_mode);
148 
149 static int
150 param_get_pool_mode(char *buf, const struct kernel_param *kp)
151 {
152         char str[16];
153         int len;
154 
155         len = sunrpc_get_pool_mode(str, ARRAY_SIZE(str));
156 
157         /* Ensure we have room for newline and NUL */
158         len = min_t(int, len, ARRAY_SIZE(str) - 2);
159 
160         /* tack on the newline */
161         str[len] = '\n';
162         str[len + 1] = '\0';
163 
164         return sysfs_emit(buf, "%s", str);
165 }
166 
167 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
168                   &svc_pool_map, 0644);
169 
170 /*
171  * Detect best pool mapping mode heuristically,
172  * according to the machine's topology.
173  */
174 static int
175 svc_pool_map_choose_mode(void)
176 {
177         unsigned int node;
178 
179         if (nr_online_nodes > 1) {
180                 /*
181                  * Actually have multiple NUMA nodes,
182                  * so split pools on NUMA node boundaries
183                  */
184                 return SVC_POOL_PERNODE;
185         }
186 
187         node = first_online_node;
188         if (nr_cpus_node(node) > 2) {
189                 /*
190                  * Non-trivial SMP, or CONFIG_NUMA on
191                  * non-NUMA hardware, e.g. with a generic
192                  * x86_64 kernel on Xeons.  In this case we
193                  * want to divide the pools on cpu boundaries.
194                  */
195                 return SVC_POOL_PERCPU;
196         }
197 
198         /* default: one global pool */
199         return SVC_POOL_GLOBAL;
200 }
201 
202 /*
203  * Allocate the to_pool[] and pool_to[] arrays.
204  * Returns 0 on success or an errno.
205  */
206 static int
207 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
208 {
209         m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
210         if (!m->to_pool)
211                 goto fail;
212         m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
213         if (!m->pool_to)
214                 goto fail_free;
215 
216         return 0;
217 
218 fail_free:
219         kfree(m->to_pool);
220         m->to_pool = NULL;
221 fail:
222         return -ENOMEM;
223 }
224 
225 /*
226  * Initialise the pool map for SVC_POOL_PERCPU mode.
227  * Returns number of pools or <0 on error.
228  */
229 static int
230 svc_pool_map_init_percpu(struct svc_pool_map *m)
231 {
232         unsigned int maxpools = nr_cpu_ids;
233         unsigned int pidx = 0;
234         unsigned int cpu;
235         int err;
236 
237         err = svc_pool_map_alloc_arrays(m, maxpools);
238         if (err)
239                 return err;
240 
241         for_each_online_cpu(cpu) {
242                 BUG_ON(pidx >= maxpools);
243                 m->to_pool[cpu] = pidx;
244                 m->pool_to[pidx] = cpu;
245                 pidx++;
246         }
247         /* cpus brought online later all get mapped to pool0, sorry */
248 
249         return pidx;
250 };
251 
252 
253 /*
254  * Initialise the pool map for SVC_POOL_PERNODE mode.
255  * Returns number of pools or <0 on error.
256  */
257 static int
258 svc_pool_map_init_pernode(struct svc_pool_map *m)
259 {
260         unsigned int maxpools = nr_node_ids;
261         unsigned int pidx = 0;
262         unsigned int node;
263         int err;
264 
265         err = svc_pool_map_alloc_arrays(m, maxpools);
266         if (err)
267                 return err;
268 
269         for_each_node_with_cpus(node) {
270                 /* some architectures (e.g. SN2) have cpuless nodes */
271                 BUG_ON(pidx > maxpools);
272                 m->to_pool[node] = pidx;
273                 m->pool_to[pidx] = node;
274                 pidx++;
275         }
276         /* nodes brought online later all get mapped to pool0, sorry */
277 
278         return pidx;
279 }
280 
281 
282 /*
283  * Add a reference to the global map of cpus to pools (and
284  * vice versa) if pools are in use.
285  * Initialise the map if we're the first user.
286  * Returns the number of pools. If this is '1', no reference
287  * was taken.
288  */
289 static unsigned int
290 svc_pool_map_get(void)
291 {
292         struct svc_pool_map *m = &svc_pool_map;
293         int npools = -1;
294 
295         mutex_lock(&svc_pool_map_mutex);
296         if (m->count++) {
297                 mutex_unlock(&svc_pool_map_mutex);
298                 return m->npools;
299         }
300 
301         if (m->mode == SVC_POOL_AUTO)
302                 m->mode = svc_pool_map_choose_mode();
303 
304         switch (m->mode) {
305         case SVC_POOL_PERCPU:
306                 npools = svc_pool_map_init_percpu(m);
307                 break;
308         case SVC_POOL_PERNODE:
309                 npools = svc_pool_map_init_pernode(m);
310                 break;
311         }
312 
313         if (npools <= 0) {
314                 /* default, or memory allocation failure */
315                 npools = 1;
316                 m->mode = SVC_POOL_GLOBAL;
317         }
318         m->npools = npools;
319         mutex_unlock(&svc_pool_map_mutex);
320         return npools;
321 }
322 
323 /*
324  * Drop a reference to the global map of cpus to pools.
325  * When the last reference is dropped, the map data is
326  * freed; this allows the sysadmin to change the pool.
327  */
328 static void
329 svc_pool_map_put(void)
330 {
331         struct svc_pool_map *m = &svc_pool_map;
332 
333         mutex_lock(&svc_pool_map_mutex);
334         if (!--m->count) {
335                 kfree(m->to_pool);
336                 m->to_pool = NULL;
337                 kfree(m->pool_to);
338                 m->pool_to = NULL;
339                 m->npools = 0;
340         }
341         mutex_unlock(&svc_pool_map_mutex);
342 }
343 
344 static int svc_pool_map_get_node(unsigned int pidx)
345 {
346         const struct svc_pool_map *m = &svc_pool_map;
347 
348         if (m->count) {
349                 if (m->mode == SVC_POOL_PERCPU)
350                         return cpu_to_node(m->pool_to[pidx]);
351                 if (m->mode == SVC_POOL_PERNODE)
352                         return m->pool_to[pidx];
353         }
354         return NUMA_NO_NODE;
355 }
356 /*
357  * Set the given thread's cpus_allowed mask so that it
358  * will only run on cpus in the given pool.
359  */
360 static inline void
361 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
362 {
363         struct svc_pool_map *m = &svc_pool_map;
364         unsigned int node = m->pool_to[pidx];
365 
366         /*
367          * The caller checks for sv_nrpools > 1, which
368          * implies that we've been initialized.
369          */
370         WARN_ON_ONCE(m->count == 0);
371         if (m->count == 0)
372                 return;
373 
374         switch (m->mode) {
375         case SVC_POOL_PERCPU:
376         {
377                 set_cpus_allowed_ptr(task, cpumask_of(node));
378                 break;
379         }
380         case SVC_POOL_PERNODE:
381         {
382                 set_cpus_allowed_ptr(task, cpumask_of_node(node));
383                 break;
384         }
385         }
386 }
387 
388 /**
389  * svc_pool_for_cpu - Select pool to run a thread on this cpu
390  * @serv: An RPC service
391  *
392  * Use the active CPU and the svc_pool_map's mode setting to
393  * select the svc thread pool to use. Once initialized, the
394  * svc_pool_map does not change.
395  *
396  * Return value:
397  *   A pointer to an svc_pool
398  */
399 struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv)
400 {
401         struct svc_pool_map *m = &svc_pool_map;
402         int cpu = raw_smp_processor_id();
403         unsigned int pidx = 0;
404 
405         if (serv->sv_nrpools <= 1)
406                 return serv->sv_pools;
407 
408         switch (m->mode) {
409         case SVC_POOL_PERCPU:
410                 pidx = m->to_pool[cpu];
411                 break;
412         case SVC_POOL_PERNODE:
413                 pidx = m->to_pool[cpu_to_node(cpu)];
414                 break;
415         }
416 
417         return &serv->sv_pools[pidx % serv->sv_nrpools];
418 }
419 
420 int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
421 {
422         int err;
423 
424         err = rpcb_create_local(net);
425         if (err)
426                 return err;
427 
428         /* Remove any stale portmap registrations */
429         svc_unregister(serv, net);
430         return 0;
431 }
432 EXPORT_SYMBOL_GPL(svc_rpcb_setup);
433 
434 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net)
435 {
436         svc_unregister(serv, net);
437         rpcb_put_local(net);
438 }
439 EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
440 
441 static int svc_uses_rpcbind(struct svc_serv *serv)
442 {
443         struct svc_program      *progp;
444         unsigned int            i;
445 
446         for (progp = serv->sv_program; progp; progp = progp->pg_next) {
447                 for (i = 0; i < progp->pg_nvers; i++) {
448                         if (progp->pg_vers[i] == NULL)
449                                 continue;
450                         if (!progp->pg_vers[i]->vs_hidden)
451                                 return 1;
452                 }
453         }
454 
455         return 0;
456 }
457 
458 int svc_bind(struct svc_serv *serv, struct net *net)
459 {
460         if (!svc_uses_rpcbind(serv))
461                 return 0;
462         return svc_rpcb_setup(serv, net);
463 }
464 EXPORT_SYMBOL_GPL(svc_bind);
465 
466 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
467 static void
468 __svc_init_bc(struct svc_serv *serv)
469 {
470         lwq_init(&serv->sv_cb_list);
471 }
472 #else
473 static void
474 __svc_init_bc(struct svc_serv *serv)
475 {
476 }
477 #endif
478 
479 /*
480  * Create an RPC service
481  */
482 static struct svc_serv *
483 __svc_create(struct svc_program *prog, struct svc_stat *stats,
484              unsigned int bufsize, int npools, int (*threadfn)(void *data))
485 {
486         struct svc_serv *serv;
487         unsigned int vers;
488         unsigned int xdrsize;
489         unsigned int i;
490 
491         if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
492                 return NULL;
493         serv->sv_name      = prog->pg_name;
494         serv->sv_program   = prog;
495         serv->sv_stats     = stats;
496         if (bufsize > RPCSVC_MAXPAYLOAD)
497                 bufsize = RPCSVC_MAXPAYLOAD;
498         serv->sv_max_payload = bufsize? bufsize : 4096;
499         serv->sv_max_mesg  = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
500         serv->sv_threadfn = threadfn;
501         xdrsize = 0;
502         while (prog) {
503                 prog->pg_lovers = prog->pg_nvers-1;
504                 for (vers=0; vers<prog->pg_nvers ; vers++)
505                         if (prog->pg_vers[vers]) {
506                                 prog->pg_hivers = vers;
507                                 if (prog->pg_lovers > vers)
508                                         prog->pg_lovers = vers;
509                                 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
510                                         xdrsize = prog->pg_vers[vers]->vs_xdrsize;
511                         }
512                 prog = prog->pg_next;
513         }
514         serv->sv_xdrsize   = xdrsize;
515         INIT_LIST_HEAD(&serv->sv_tempsocks);
516         INIT_LIST_HEAD(&serv->sv_permsocks);
517         timer_setup(&serv->sv_temptimer, NULL, 0);
518         spin_lock_init(&serv->sv_lock);
519 
520         __svc_init_bc(serv);
521 
522         serv->sv_nrpools = npools;
523         serv->sv_pools =
524                 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
525                         GFP_KERNEL);
526         if (!serv->sv_pools) {
527                 kfree(serv);
528                 return NULL;
529         }
530 
531         for (i = 0; i < serv->sv_nrpools; i++) {
532                 struct svc_pool *pool = &serv->sv_pools[i];
533 
534                 dprintk("svc: initialising pool %u for %s\n",
535                                 i, serv->sv_name);
536 
537                 pool->sp_id = i;
538                 lwq_init(&pool->sp_xprts);
539                 INIT_LIST_HEAD(&pool->sp_all_threads);
540                 init_llist_head(&pool->sp_idle_threads);
541 
542                 percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL);
543                 percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL);
544                 percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL);
545         }
546 
547         return serv;
548 }
549 
550 /**
551  * svc_create - Create an RPC service
552  * @prog: the RPC program the new service will handle
553  * @bufsize: maximum message size for @prog
554  * @threadfn: a function to service RPC requests for @prog
555  *
556  * Returns an instantiated struct svc_serv object or NULL.
557  */
558 struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize,
559                             int (*threadfn)(void *data))
560 {
561         return __svc_create(prog, NULL, bufsize, 1, threadfn);
562 }
563 EXPORT_SYMBOL_GPL(svc_create);
564 
565 /**
566  * svc_create_pooled - Create an RPC service with pooled threads
567  * @prog: the RPC program the new service will handle
568  * @stats: the stats struct if desired
569  * @bufsize: maximum message size for @prog
570  * @threadfn: a function to service RPC requests for @prog
571  *
572  * Returns an instantiated struct svc_serv object or NULL.
573  */
574 struct svc_serv *svc_create_pooled(struct svc_program *prog,
575                                    struct svc_stat *stats,
576                                    unsigned int bufsize,
577                                    int (*threadfn)(void *data))
578 {
579         struct svc_serv *serv;
580         unsigned int npools = svc_pool_map_get();
581 
582         serv = __svc_create(prog, stats, bufsize, npools, threadfn);
583         if (!serv)
584                 goto out_err;
585         serv->sv_is_pooled = true;
586         return serv;
587 out_err:
588         svc_pool_map_put();
589         return NULL;
590 }
591 EXPORT_SYMBOL_GPL(svc_create_pooled);
592 
593 /*
594  * Destroy an RPC service. Should be called with appropriate locking to
595  * protect sv_permsocks and sv_tempsocks.
596  */
597 void
598 svc_destroy(struct svc_serv **servp)
599 {
600         struct svc_serv *serv = *servp;
601         unsigned int i;
602 
603         *servp = NULL;
604 
605         dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name);
606         timer_shutdown_sync(&serv->sv_temptimer);
607 
608         /*
609          * Remaining transports at this point are not expected.
610          */
611         WARN_ONCE(!list_empty(&serv->sv_permsocks),
612                   "SVC: permsocks remain for %s\n", serv->sv_program->pg_name);
613         WARN_ONCE(!list_empty(&serv->sv_tempsocks),
614                   "SVC: tempsocks remain for %s\n", serv->sv_program->pg_name);
615 
616         cache_clean_deferred(serv);
617 
618         if (serv->sv_is_pooled)
619                 svc_pool_map_put();
620 
621         for (i = 0; i < serv->sv_nrpools; i++) {
622                 struct svc_pool *pool = &serv->sv_pools[i];
623 
624                 percpu_counter_destroy(&pool->sp_messages_arrived);
625                 percpu_counter_destroy(&pool->sp_sockets_queued);
626                 percpu_counter_destroy(&pool->sp_threads_woken);
627         }
628         kfree(serv->sv_pools);
629         kfree(serv);
630 }
631 EXPORT_SYMBOL_GPL(svc_destroy);
632 
633 static bool
634 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
635 {
636         unsigned long pages, ret;
637 
638         /* bc_xprt uses fore channel allocated buffers */
639         if (svc_is_backchannel(rqstp))
640                 return true;
641 
642         pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
643                                        * We assume one is at most one page
644                                        */
645         WARN_ON_ONCE(pages > RPCSVC_MAXPAGES);
646         if (pages > RPCSVC_MAXPAGES)
647                 pages = RPCSVC_MAXPAGES;
648 
649         ret = alloc_pages_bulk_array_node(GFP_KERNEL, node, pages,
650                                           rqstp->rq_pages);
651         return ret == pages;
652 }
653 
654 /*
655  * Release an RPC server buffer
656  */
657 static void
658 svc_release_buffer(struct svc_rqst *rqstp)
659 {
660         unsigned int i;
661 
662         for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
663                 if (rqstp->rq_pages[i])
664                         put_page(rqstp->rq_pages[i]);
665 }
666 
667 struct svc_rqst *
668 svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
669 {
670         struct svc_rqst *rqstp;
671 
672         rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
673         if (!rqstp)
674                 return rqstp;
675 
676         folio_batch_init(&rqstp->rq_fbatch);
677 
678         rqstp->rq_server = serv;
679         rqstp->rq_pool = pool;
680 
681         rqstp->rq_scratch_page = alloc_pages_node(node, GFP_KERNEL, 0);
682         if (!rqstp->rq_scratch_page)
683                 goto out_enomem;
684 
685         rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
686         if (!rqstp->rq_argp)
687                 goto out_enomem;
688 
689         rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
690         if (!rqstp->rq_resp)
691                 goto out_enomem;
692 
693         if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
694                 goto out_enomem;
695 
696         return rqstp;
697 out_enomem:
698         svc_rqst_free(rqstp);
699         return NULL;
700 }
701 EXPORT_SYMBOL_GPL(svc_rqst_alloc);
702 
703 static struct svc_rqst *
704 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
705 {
706         struct svc_rqst *rqstp;
707 
708         rqstp = svc_rqst_alloc(serv, pool, node);
709         if (!rqstp)
710                 return ERR_PTR(-ENOMEM);
711 
712         spin_lock_bh(&serv->sv_lock);
713         serv->sv_nrthreads += 1;
714         spin_unlock_bh(&serv->sv_lock);
715 
716         pool->sp_nrthreads += 1;
717 
718         /* Protected by whatever lock the service uses when calling
719          * svc_set_num_threads()
720          */
721         list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
722 
723         return rqstp;
724 }
725 
726 /**
727  * svc_pool_wake_idle_thread - Awaken an idle thread in @pool
728  * @pool: service thread pool
729  *
730  * Can be called from soft IRQ or process context. Finding an idle
731  * service thread and marking it BUSY is atomic with respect to
732  * other calls to svc_pool_wake_idle_thread().
733  *
734  */
735 void svc_pool_wake_idle_thread(struct svc_pool *pool)
736 {
737         struct svc_rqst *rqstp;
738         struct llist_node *ln;
739 
740         rcu_read_lock();
741         ln = READ_ONCE(pool->sp_idle_threads.first);
742         if (ln) {
743                 rqstp = llist_entry(ln, struct svc_rqst, rq_idle);
744                 WRITE_ONCE(rqstp->rq_qtime, ktime_get());
745                 if (!task_is_running(rqstp->rq_task)) {
746                         wake_up_process(rqstp->rq_task);
747                         trace_svc_wake_up(rqstp->rq_task->pid);
748                         percpu_counter_inc(&pool->sp_threads_woken);
749                 }
750                 rcu_read_unlock();
751                 return;
752         }
753         rcu_read_unlock();
754 
755 }
756 EXPORT_SYMBOL_GPL(svc_pool_wake_idle_thread);
757 
758 static struct svc_pool *
759 svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
760 {
761         return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools];
762 }
763 
764 static struct svc_pool *
765 svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool,
766                 unsigned int *state)
767 {
768         struct svc_pool *pool;
769         unsigned int i;
770 
771         pool = target_pool;
772 
773         if (!pool) {
774                 for (i = 0; i < serv->sv_nrpools; i++) {
775                         pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
776                         if (pool->sp_nrthreads)
777                                 break;
778                 }
779         }
780 
781         if (pool && pool->sp_nrthreads) {
782                 set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
783                 set_bit(SP_NEED_VICTIM, &pool->sp_flags);
784                 return pool;
785         }
786         return NULL;
787 }
788 
789 static int
790 svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
791 {
792         struct svc_rqst *rqstp;
793         struct task_struct *task;
794         struct svc_pool *chosen_pool;
795         unsigned int state = serv->sv_nrthreads-1;
796         int node;
797 
798         do {
799                 nrservs--;
800                 chosen_pool = svc_pool_next(serv, pool, &state);
801                 node = svc_pool_map_get_node(chosen_pool->sp_id);
802 
803                 rqstp = svc_prepare_thread(serv, chosen_pool, node);
804                 if (IS_ERR(rqstp))
805                         return PTR_ERR(rqstp);
806                 task = kthread_create_on_node(serv->sv_threadfn, rqstp,
807                                               node, "%s", serv->sv_name);
808                 if (IS_ERR(task)) {
809                         svc_exit_thread(rqstp);
810                         return PTR_ERR(task);
811                 }
812 
813                 rqstp->rq_task = task;
814                 if (serv->sv_nrpools > 1)
815                         svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
816 
817                 svc_sock_update_bufs(serv);
818                 wake_up_process(task);
819         } while (nrservs > 0);
820 
821         return 0;
822 }
823 
824 static int
825 svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
826 {
827         unsigned int state = serv->sv_nrthreads-1;
828         struct svc_pool *victim;
829 
830         do {
831                 victim = svc_pool_victim(serv, pool, &state);
832                 if (!victim)
833                         break;
834                 svc_pool_wake_idle_thread(victim);
835                 wait_on_bit(&victim->sp_flags, SP_VICTIM_REMAINS,
836                             TASK_IDLE);
837                 nrservs++;
838         } while (nrservs < 0);
839         return 0;
840 }
841 
842 /**
843  * svc_set_num_threads - adjust number of threads per RPC service
844  * @serv: RPC service to adjust
845  * @pool: Specific pool from which to choose threads, or NULL
846  * @nrservs: New number of threads for @serv (0 or less means kill all threads)
847  *
848  * Create or destroy threads to make the number of threads for @serv the
849  * given number. If @pool is non-NULL, change only threads in that pool;
850  * otherwise, round-robin between all pools for @serv. @serv's
851  * sv_nrthreads is adjusted for each thread created or destroyed.
852  *
853  * Caller must ensure mutual exclusion between this and server startup or
854  * shutdown.
855  *
856  * Returns zero on success or a negative errno if an error occurred while
857  * starting a thread.
858  */
859 int
860 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
861 {
862         if (!pool)
863                 nrservs -= serv->sv_nrthreads;
864         else
865                 nrservs -= pool->sp_nrthreads;
866 
867         if (nrservs > 0)
868                 return svc_start_kthreads(serv, pool, nrservs);
869         if (nrservs < 0)
870                 return svc_stop_kthreads(serv, pool, nrservs);
871         return 0;
872 }
873 EXPORT_SYMBOL_GPL(svc_set_num_threads);
874 
875 /**
876  * svc_rqst_replace_page - Replace one page in rq_pages[]
877  * @rqstp: svc_rqst with pages to replace
878  * @page: replacement page
879  *
880  * When replacing a page in rq_pages, batch the release of the
881  * replaced pages to avoid hammering the page allocator.
882  *
883  * Return values:
884  *   %true: page replaced
885  *   %false: array bounds checking failed
886  */
887 bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page)
888 {
889         struct page **begin = rqstp->rq_pages;
890         struct page **end = &rqstp->rq_pages[RPCSVC_MAXPAGES];
891 
892         if (unlikely(rqstp->rq_next_page < begin || rqstp->rq_next_page > end)) {
893                 trace_svc_replace_page_err(rqstp);
894                 return false;
895         }
896 
897         if (*rqstp->rq_next_page) {
898                 if (!folio_batch_add(&rqstp->rq_fbatch,
899                                 page_folio(*rqstp->rq_next_page)))
900                         __folio_batch_release(&rqstp->rq_fbatch);
901         }
902 
903         get_page(page);
904         *(rqstp->rq_next_page++) = page;
905         return true;
906 }
907 EXPORT_SYMBOL_GPL(svc_rqst_replace_page);
908 
909 /**
910  * svc_rqst_release_pages - Release Reply buffer pages
911  * @rqstp: RPC transaction context
912  *
913  * Release response pages that might still be in flight after
914  * svc_send, and any spliced filesystem-owned pages.
915  */
916 void svc_rqst_release_pages(struct svc_rqst *rqstp)
917 {
918         int i, count = rqstp->rq_next_page - rqstp->rq_respages;
919 
920         if (count) {
921                 release_pages(rqstp->rq_respages, count);
922                 for (i = 0; i < count; i++)
923                         rqstp->rq_respages[i] = NULL;
924         }
925 }
926 
927 /*
928  * Called from a server thread as it's exiting. Caller must hold the "service
929  * mutex" for the service.
930  */
931 void
932 svc_rqst_free(struct svc_rqst *rqstp)
933 {
934         folio_batch_release(&rqstp->rq_fbatch);
935         svc_release_buffer(rqstp);
936         if (rqstp->rq_scratch_page)
937                 put_page(rqstp->rq_scratch_page);
938         kfree(rqstp->rq_resp);
939         kfree(rqstp->rq_argp);
940         kfree(rqstp->rq_auth_data);
941         kfree_rcu(rqstp, rq_rcu_head);
942 }
943 EXPORT_SYMBOL_GPL(svc_rqst_free);
944 
945 void
946 svc_exit_thread(struct svc_rqst *rqstp)
947 {
948         struct svc_serv *serv = rqstp->rq_server;
949         struct svc_pool *pool = rqstp->rq_pool;
950 
951         list_del_rcu(&rqstp->rq_all);
952 
953         pool->sp_nrthreads -= 1;
954 
955         spin_lock_bh(&serv->sv_lock);
956         serv->sv_nrthreads -= 1;
957         spin_unlock_bh(&serv->sv_lock);
958         svc_sock_update_bufs(serv);
959 
960         svc_rqst_free(rqstp);
961 
962         clear_and_wake_up_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
963 }
964 EXPORT_SYMBOL_GPL(svc_exit_thread);
965 
966 /*
967  * Register an "inet" protocol family netid with the local
968  * rpcbind daemon via an rpcbind v4 SET request.
969  *
970  * No netconfig infrastructure is available in the kernel, so
971  * we map IP_ protocol numbers to netids by hand.
972  *
973  * Returns zero on success; a negative errno value is returned
974  * if any error occurs.
975  */
976 static int __svc_rpcb_register4(struct net *net, const u32 program,
977                                 const u32 version,
978                                 const unsigned short protocol,
979                                 const unsigned short port)
980 {
981         const struct sockaddr_in sin = {
982                 .sin_family             = AF_INET,
983                 .sin_addr.s_addr        = htonl(INADDR_ANY),
984                 .sin_port               = htons(port),
985         };
986         const char *netid;
987         int error;
988 
989         switch (protocol) {
990         case IPPROTO_UDP:
991                 netid = RPCBIND_NETID_UDP;
992                 break;
993         case IPPROTO_TCP:
994                 netid = RPCBIND_NETID_TCP;
995                 break;
996         default:
997                 return -ENOPROTOOPT;
998         }
999 
1000         error = rpcb_v4_register(net, program, version,
1001                                         (const struct sockaddr *)&sin, netid);
1002 
1003         /*
1004          * User space didn't support rpcbind v4, so retry this
1005          * registration request with the legacy rpcbind v2 protocol.
1006          */
1007         if (error == -EPROTONOSUPPORT)
1008                 error = rpcb_register(net, program, version, protocol, port);
1009 
1010         return error;
1011 }
1012 
1013 #if IS_ENABLED(CONFIG_IPV6)
1014 /*
1015  * Register an "inet6" protocol family netid with the local
1016  * rpcbind daemon via an rpcbind v4 SET request.
1017  *
1018  * No netconfig infrastructure is available in the kernel, so
1019  * we map IP_ protocol numbers to netids by hand.
1020  *
1021  * Returns zero on success; a negative errno value is returned
1022  * if any error occurs.
1023  */
1024 static int __svc_rpcb_register6(struct net *net, const u32 program,
1025                                 const u32 version,
1026                                 const unsigned short protocol,
1027                                 const unsigned short port)
1028 {
1029         const struct sockaddr_in6 sin6 = {
1030                 .sin6_family            = AF_INET6,
1031                 .sin6_addr              = IN6ADDR_ANY_INIT,
1032                 .sin6_port              = htons(port),
1033         };
1034         const char *netid;
1035         int error;
1036 
1037         switch (protocol) {
1038         case IPPROTO_UDP:
1039                 netid = RPCBIND_NETID_UDP6;
1040                 break;
1041         case IPPROTO_TCP:
1042                 netid = RPCBIND_NETID_TCP6;
1043                 break;
1044         default:
1045                 return -ENOPROTOOPT;
1046         }
1047 
1048         error = rpcb_v4_register(net, program, version,
1049                                         (const struct sockaddr *)&sin6, netid);
1050 
1051         /*
1052          * User space didn't support rpcbind version 4, so we won't
1053          * use a PF_INET6 listener.
1054          */
1055         if (error == -EPROTONOSUPPORT)
1056                 error = -EAFNOSUPPORT;
1057 
1058         return error;
1059 }
1060 #endif  /* IS_ENABLED(CONFIG_IPV6) */
1061 
1062 /*
1063  * Register a kernel RPC service via rpcbind version 4.
1064  *
1065  * Returns zero on success; a negative errno value is returned
1066  * if any error occurs.
1067  */
1068 static int __svc_register(struct net *net, const char *progname,
1069                           const u32 program, const u32 version,
1070                           const int family,
1071                           const unsigned short protocol,
1072                           const unsigned short port)
1073 {
1074         int error = -EAFNOSUPPORT;
1075 
1076         switch (family) {
1077         case PF_INET:
1078                 error = __svc_rpcb_register4(net, program, version,
1079                                                 protocol, port);
1080                 break;
1081 #if IS_ENABLED(CONFIG_IPV6)
1082         case PF_INET6:
1083                 error = __svc_rpcb_register6(net, program, version,
1084                                                 protocol, port);
1085 #endif
1086         }
1087 
1088         trace_svc_register(progname, version, family, protocol, port, error);
1089         return error;
1090 }
1091 
1092 int svc_rpcbind_set_version(struct net *net,
1093                             const struct svc_program *progp,
1094                             u32 version, int family,
1095                             unsigned short proto,
1096                             unsigned short port)
1097 {
1098         return __svc_register(net, progp->pg_name, progp->pg_prog,
1099                                 version, family, proto, port);
1100 
1101 }
1102 EXPORT_SYMBOL_GPL(svc_rpcbind_set_version);
1103 
1104 int svc_generic_rpcbind_set(struct net *net,
1105                             const struct svc_program *progp,
1106                             u32 version, int family,
1107                             unsigned short proto,
1108                             unsigned short port)
1109 {
1110         const struct svc_version *vers = progp->pg_vers[version];
1111         int error;
1112 
1113         if (vers == NULL)
1114                 return 0;
1115 
1116         if (vers->vs_hidden) {
1117                 trace_svc_noregister(progp->pg_name, version, proto,
1118                                      port, family, 0);
1119                 return 0;
1120         }
1121 
1122         /*
1123          * Don't register a UDP port if we need congestion
1124          * control.
1125          */
1126         if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP)
1127                 return 0;
1128 
1129         error = svc_rpcbind_set_version(net, progp, version,
1130                                         family, proto, port);
1131 
1132         return (vers->vs_rpcb_optnl) ? 0 : error;
1133 }
1134 EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set);
1135 
1136 /**
1137  * svc_register - register an RPC service with the local portmapper
1138  * @serv: svc_serv struct for the service to register
1139  * @net: net namespace for the service to register
1140  * @family: protocol family of service's listener socket
1141  * @proto: transport protocol number to advertise
1142  * @port: port to advertise
1143  *
1144  * Service is registered for any address in the passed-in protocol family
1145  */
1146 int svc_register(const struct svc_serv *serv, struct net *net,
1147                  const int family, const unsigned short proto,
1148                  const unsigned short port)
1149 {
1150         struct svc_program      *progp;
1151         unsigned int            i;
1152         int                     error = 0;
1153 
1154         WARN_ON_ONCE(proto == 0 && port == 0);
1155         if (proto == 0 && port == 0)
1156                 return -EINVAL;
1157 
1158         for (progp = serv->sv_program; progp; progp = progp->pg_next) {
1159                 for (i = 0; i < progp->pg_nvers; i++) {
1160 
1161                         error = progp->pg_rpcbind_set(net, progp, i,
1162                                         family, proto, port);
1163                         if (error < 0) {
1164                                 printk(KERN_WARNING "svc: failed to register "
1165                                         "%sv%u RPC service (errno %d).\n",
1166                                         progp->pg_name, i, -error);
1167                                 break;
1168                         }
1169                 }
1170         }
1171 
1172         return error;
1173 }
1174 
1175 /*
1176  * If user space is running rpcbind, it should take the v4 UNSET
1177  * and clear everything for this [program, version].  If user space
1178  * is running portmap, it will reject the v4 UNSET, but won't have
1179  * any "inet6" entries anyway.  So a PMAP_UNSET should be sufficient
1180  * in this case to clear all existing entries for [program, version].
1181  */
1182 static void __svc_unregister(struct net *net, const u32 program, const u32 version,
1183                              const char *progname)
1184 {
1185         int error;
1186 
1187         error = rpcb_v4_register(net, program, version, NULL, "");
1188 
1189         /*
1190          * User space didn't support rpcbind v4, so retry this
1191          * request with the legacy rpcbind v2 protocol.
1192          */
1193         if (error == -EPROTONOSUPPORT)
1194                 error = rpcb_register(net, program, version, 0, 0);
1195 
1196         trace_svc_unregister(progname, version, error);
1197 }
1198 
1199 /*
1200  * All netids, bind addresses and ports registered for [program, version]
1201  * are removed from the local rpcbind database (if the service is not
1202  * hidden) to make way for a new instance of the service.
1203  *
1204  * The result of unregistration is reported via dprintk for those who want
1205  * verification of the result, but is otherwise not important.
1206  */
1207 static void svc_unregister(const struct svc_serv *serv, struct net *net)
1208 {
1209         struct sighand_struct *sighand;
1210         struct svc_program *progp;
1211         unsigned long flags;
1212         unsigned int i;
1213 
1214         clear_thread_flag(TIF_SIGPENDING);
1215 
1216         for (progp = serv->sv_program; progp; progp = progp->pg_next) {
1217                 for (i = 0; i < progp->pg_nvers; i++) {
1218                         if (progp->pg_vers[i] == NULL)
1219                                 continue;
1220                         if (progp->pg_vers[i]->vs_hidden)
1221                                 continue;
1222                         __svc_unregister(net, progp->pg_prog, i, progp->pg_name);
1223                 }
1224         }
1225 
1226         rcu_read_lock();
1227         sighand = rcu_dereference(current->sighand);
1228         spin_lock_irqsave(&sighand->siglock, flags);
1229         recalc_sigpending();
1230         spin_unlock_irqrestore(&sighand->siglock, flags);
1231         rcu_read_unlock();
1232 }
1233 
1234 /*
1235  * dprintk the given error with the address of the client that caused it.
1236  */
1237 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1238 static __printf(2, 3)
1239 void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1240 {
1241         struct va_format vaf;
1242         va_list args;
1243         char    buf[RPC_MAX_ADDRBUFLEN];
1244 
1245         va_start(args, fmt);
1246 
1247         vaf.fmt = fmt;
1248         vaf.va = &args;
1249 
1250         dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
1251 
1252         va_end(args);
1253 }
1254 #else
1255 static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
1256 #endif
1257 
1258 __be32
1259 svc_generic_init_request(struct svc_rqst *rqstp,
1260                 const struct svc_program *progp,
1261                 struct svc_process_info *ret)
1262 {
1263         const struct svc_version *versp = NULL; /* compiler food */
1264         const struct svc_procedure *procp = NULL;
1265 
1266         if (rqstp->rq_vers >= progp->pg_nvers )
1267                 goto err_bad_vers;
1268         versp = progp->pg_vers[rqstp->rq_vers];
1269         if (!versp)
1270                 goto err_bad_vers;
1271 
1272         /*
1273          * Some protocol versions (namely NFSv4) require some form of
1274          * congestion control.  (See RFC 7530 section 3.1 paragraph 2)
1275          * In other words, UDP is not allowed. We mark those when setting
1276          * up the svc_xprt, and verify that here.
1277          *
1278          * The spec is not very clear about what error should be returned
1279          * when someone tries to access a server that is listening on UDP
1280          * for lower versions. RPC_PROG_MISMATCH seems to be the closest
1281          * fit.
1282          */
1283         if (versp->vs_need_cong_ctrl && rqstp->rq_xprt &&
1284             !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags))
1285                 goto err_bad_vers;
1286 
1287         if (rqstp->rq_proc >= versp->vs_nproc)
1288                 goto err_bad_proc;
1289         rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
1290 
1291         /* Initialize storage for argp and resp */
1292         memset(rqstp->rq_argp, 0, procp->pc_argzero);
1293         memset(rqstp->rq_resp, 0, procp->pc_ressize);
1294 
1295         /* Bump per-procedure stats counter */
1296         this_cpu_inc(versp->vs_count[rqstp->rq_proc]);
1297 
1298         ret->dispatch = versp->vs_dispatch;
1299         return rpc_success;
1300 err_bad_vers:
1301         ret->mismatch.lovers = progp->pg_lovers;
1302         ret->mismatch.hivers = progp->pg_hivers;
1303         return rpc_prog_mismatch;
1304 err_bad_proc:
1305         return rpc_proc_unavail;
1306 }
1307 EXPORT_SYMBOL_GPL(svc_generic_init_request);
1308 
1309 /*
1310  * Common routine for processing the RPC request.
1311  */
1312 static int
1313 svc_process_common(struct svc_rqst *rqstp)
1314 {
1315         struct xdr_stream       *xdr = &rqstp->rq_res_stream;
1316         struct svc_program      *progp;
1317         const struct svc_procedure *procp = NULL;
1318         struct svc_serv         *serv = rqstp->rq_server;
1319         struct svc_process_info process;
1320         enum svc_auth_status    auth_res;
1321         unsigned int            aoffset;
1322         int                     rc;
1323         __be32                  *p;
1324 
1325         /* Will be turned off only when NFSv4 Sessions are used */
1326         set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
1327         clear_bit(RQ_DROPME, &rqstp->rq_flags);
1328 
1329         /* Construct the first words of the reply: */
1330         svcxdr_init_encode(rqstp);
1331         xdr_stream_encode_be32(xdr, rqstp->rq_xid);
1332         xdr_stream_encode_be32(xdr, rpc_reply);
1333 
1334         p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 4);
1335         if (unlikely(!p))
1336                 goto err_short_len;
1337         if (*p++ != cpu_to_be32(RPC_VERSION))
1338                 goto err_bad_rpc;
1339 
1340         xdr_stream_encode_be32(xdr, rpc_msg_accepted);
1341 
1342         rqstp->rq_prog = be32_to_cpup(p++);
1343         rqstp->rq_vers = be32_to_cpup(p++);
1344         rqstp->rq_proc = be32_to_cpup(p);
1345 
1346         for (progp = serv->sv_program; progp; progp = progp->pg_next)
1347                 if (rqstp->rq_prog == progp->pg_prog)
1348                         break;
1349 
1350         /*
1351          * Decode auth data, and add verifier to reply buffer.
1352          * We do this before anything else in order to get a decent
1353          * auth verifier.
1354          */
1355         auth_res = svc_authenticate(rqstp);
1356         /* Also give the program a chance to reject this call: */
1357         if (auth_res == SVC_OK && progp)
1358                 auth_res = progp->pg_authenticate(rqstp);
1359         trace_svc_authenticate(rqstp, auth_res);
1360         switch (auth_res) {
1361         case SVC_OK:
1362                 break;
1363         case SVC_GARBAGE:
1364                 goto err_garbage_args;
1365         case SVC_SYSERR:
1366                 goto err_system_err;
1367         case SVC_DENIED:
1368                 goto err_bad_auth;
1369         case SVC_CLOSE:
1370                 goto close;
1371         case SVC_DROP:
1372                 goto dropit;
1373         case SVC_COMPLETE:
1374                 goto sendit;
1375         default:
1376                 pr_warn_once("Unexpected svc_auth_status (%d)\n", auth_res);
1377                 goto err_system_err;
1378         }
1379 
1380         if (progp == NULL)
1381                 goto err_bad_prog;
1382 
1383         switch (progp->pg_init_request(rqstp, progp, &process)) {
1384         case rpc_success:
1385                 break;
1386         case rpc_prog_unavail:
1387                 goto err_bad_prog;
1388         case rpc_prog_mismatch:
1389                 goto err_bad_vers;
1390         case rpc_proc_unavail:
1391                 goto err_bad_proc;
1392         }
1393 
1394         procp = rqstp->rq_procinfo;
1395         /* Should this check go into the dispatcher? */
1396         if (!procp || !procp->pc_func)
1397                 goto err_bad_proc;
1398 
1399         /* Syntactic check complete */
1400         if (serv->sv_stats)
1401                 serv->sv_stats->rpccnt++;
1402         trace_svc_process(rqstp, progp->pg_name);
1403 
1404         aoffset = xdr_stream_pos(xdr);
1405 
1406         /* un-reserve some of the out-queue now that we have a
1407          * better idea of reply size
1408          */
1409         if (procp->pc_xdrressize)
1410                 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
1411 
1412         /* Call the function that processes the request. */
1413         rc = process.dispatch(rqstp);
1414         if (procp->pc_release)
1415                 procp->pc_release(rqstp);
1416         xdr_finish_decode(xdr);
1417 
1418         if (!rc)
1419                 goto dropit;
1420         if (rqstp->rq_auth_stat != rpc_auth_ok)
1421                 goto err_bad_auth;
1422 
1423         if (*rqstp->rq_accept_statp != rpc_success)
1424                 xdr_truncate_encode(xdr, aoffset);
1425 
1426         if (procp->pc_encode == NULL)
1427                 goto dropit;
1428 
1429  sendit:
1430         if (svc_authorise(rqstp))
1431                 goto close_xprt;
1432         return 1;               /* Caller can now send it */
1433 
1434  dropit:
1435         svc_authorise(rqstp);   /* doesn't hurt to call this twice */
1436         dprintk("svc: svc_process dropit\n");
1437         return 0;
1438 
1439  close:
1440         svc_authorise(rqstp);
1441 close_xprt:
1442         if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
1443                 svc_xprt_close(rqstp->rq_xprt);
1444         dprintk("svc: svc_process close\n");
1445         return 0;
1446 
1447 err_short_len:
1448         svc_printk(rqstp, "short len %u, dropping request\n",
1449                    rqstp->rq_arg.len);
1450         goto close_xprt;
1451 
1452 err_bad_rpc:
1453         if (serv->sv_stats)
1454                 serv->sv_stats->rpcbadfmt++;
1455         xdr_stream_encode_u32(xdr, RPC_MSG_DENIED);
1456         xdr_stream_encode_u32(xdr, RPC_MISMATCH);
1457         /* Only RPCv2 supported */
1458         xdr_stream_encode_u32(xdr, RPC_VERSION);
1459         xdr_stream_encode_u32(xdr, RPC_VERSION);
1460         return 1;       /* don't wrap */
1461 
1462 err_bad_auth:
1463         dprintk("svc: authentication failed (%d)\n",
1464                 be32_to_cpu(rqstp->rq_auth_stat));
1465         if (serv->sv_stats)
1466                 serv->sv_stats->rpcbadauth++;
1467         /* Restore write pointer to location of reply status: */
1468         xdr_truncate_encode(xdr, XDR_UNIT * 2);
1469         xdr_stream_encode_u32(xdr, RPC_MSG_DENIED);
1470         xdr_stream_encode_u32(xdr, RPC_AUTH_ERROR);
1471         xdr_stream_encode_be32(xdr, rqstp->rq_auth_stat);
1472         goto sendit;
1473 
1474 err_bad_prog:
1475         dprintk("svc: unknown program %d\n", rqstp->rq_prog);
1476         if (serv->sv_stats)
1477                 serv->sv_stats->rpcbadfmt++;
1478         *rqstp->rq_accept_statp = rpc_prog_unavail;
1479         goto sendit;
1480 
1481 err_bad_vers:
1482         svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1483                        rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
1484 
1485         if (serv->sv_stats)
1486                 serv->sv_stats->rpcbadfmt++;
1487         *rqstp->rq_accept_statp = rpc_prog_mismatch;
1488 
1489         /*
1490          * svc_authenticate() has already added the verifier and
1491          * advanced the stream just past rq_accept_statp.
1492          */
1493         xdr_stream_encode_u32(xdr, process.mismatch.lovers);
1494         xdr_stream_encode_u32(xdr, process.mismatch.hivers);
1495         goto sendit;
1496 
1497 err_bad_proc:
1498         svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
1499 
1500         if (serv->sv_stats)
1501                 serv->sv_stats->rpcbadfmt++;
1502         *rqstp->rq_accept_statp = rpc_proc_unavail;
1503         goto sendit;
1504 
1505 err_garbage_args:
1506         svc_printk(rqstp, "failed to decode RPC header\n");
1507 
1508         if (serv->sv_stats)
1509                 serv->sv_stats->rpcbadfmt++;
1510         *rqstp->rq_accept_statp = rpc_garbage_args;
1511         goto sendit;
1512 
1513 err_system_err:
1514         if (serv->sv_stats)
1515                 serv->sv_stats->rpcbadfmt++;
1516         *rqstp->rq_accept_statp = rpc_system_err;
1517         goto sendit;
1518 }
1519 
1520 /**
1521  * svc_process - Execute one RPC transaction
1522  * @rqstp: RPC transaction context
1523  *
1524  */
1525 void svc_process(struct svc_rqst *rqstp)
1526 {
1527         struct kvec             *resv = &rqstp->rq_res.head[0];
1528         __be32 *p;
1529 
1530 #if IS_ENABLED(CONFIG_FAIL_SUNRPC)
1531         if (!fail_sunrpc.ignore_server_disconnect &&
1532             should_fail(&fail_sunrpc.attr, 1))
1533                 svc_xprt_deferred_close(rqstp->rq_xprt);
1534 #endif
1535 
1536         /*
1537          * Setup response xdr_buf.
1538          * Initially it has just one page
1539          */
1540         rqstp->rq_next_page = &rqstp->rq_respages[1];
1541         resv->iov_base = page_address(rqstp->rq_respages[0]);
1542         resv->iov_len = 0;
1543         rqstp->rq_res.pages = rqstp->rq_next_page;
1544         rqstp->rq_res.len = 0;
1545         rqstp->rq_res.page_base = 0;
1546         rqstp->rq_res.page_len = 0;
1547         rqstp->rq_res.buflen = PAGE_SIZE;
1548         rqstp->rq_res.tail[0].iov_base = NULL;
1549         rqstp->rq_res.tail[0].iov_len = 0;
1550 
1551         svcxdr_init_decode(rqstp);
1552         p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2);
1553         if (unlikely(!p))
1554                 goto out_drop;
1555         rqstp->rq_xid = *p++;
1556         if (unlikely(*p != rpc_call))
1557                 goto out_baddir;
1558 
1559         if (!svc_process_common(rqstp))
1560                 goto out_drop;
1561         svc_send(rqstp);
1562         return;
1563 
1564 out_baddir:
1565         svc_printk(rqstp, "bad direction 0x%08x, dropping request\n",
1566                    be32_to_cpu(*p));
1567         if (rqstp->rq_server->sv_stats)
1568                 rqstp->rq_server->sv_stats->rpcbadfmt++;
1569 out_drop:
1570         svc_drop(rqstp);
1571 }
1572 
1573 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1574 /**
1575  * svc_process_bc - process a reverse-direction RPC request
1576  * @req: RPC request to be used for client-side processing
1577  * @rqstp: server-side execution context
1578  *
1579  */
1580 void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
1581 {
1582         struct rpc_timeout timeout = {
1583                 .to_increment           = 0,
1584         };
1585         struct rpc_task *task;
1586         int proc_error;
1587 
1588         /* Build the svc_rqst used by the common processing routine */
1589         rqstp->rq_xid = req->rq_xid;
1590         rqstp->rq_prot = req->rq_xprt->prot;
1591         rqstp->rq_bc_net = req->rq_xprt->xprt_net;
1592 
1593         rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1594         memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1595         memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1596         memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1597 
1598         /* Adjust the argument buffer length */
1599         rqstp->rq_arg.len = req->rq_private_buf.len;
1600         if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1601                 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
1602                 rqstp->rq_arg.page_len = 0;
1603         } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
1604                         rqstp->rq_arg.page_len)
1605                 rqstp->rq_arg.page_len = rqstp->rq_arg.len -
1606                         rqstp->rq_arg.head[0].iov_len;
1607         else
1608                 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
1609                         rqstp->rq_arg.page_len;
1610 
1611         /* Reset the response buffer */
1612         rqstp->rq_res.head[0].iov_len = 0;
1613 
1614         /*
1615          * Skip the XID and calldir fields because they've already
1616          * been processed by the caller.
1617          */
1618         svcxdr_init_decode(rqstp);
1619         if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2))
1620                 return;
1621 
1622         /* Parse and execute the bc call */
1623         proc_error = svc_process_common(rqstp);
1624 
1625         atomic_dec(&req->rq_xprt->bc_slot_count);
1626         if (!proc_error) {
1627                 /* Processing error: drop the request */
1628                 xprt_free_bc_request(req);
1629                 return;
1630         }
1631         /* Finally, send the reply synchronously */
1632         if (rqstp->bc_to_initval > 0) {
1633                 timeout.to_initval = rqstp->bc_to_initval;
1634                 timeout.to_retries = rqstp->bc_to_retries;
1635         } else {
1636                 timeout.to_initval = req->rq_xprt->timeout->to_initval;
1637                 timeout.to_retries = req->rq_xprt->timeout->to_retries;
1638         }
1639         timeout.to_maxval = timeout.to_initval;
1640         memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
1641         task = rpc_run_bc_task(req, &timeout);
1642 
1643         if (IS_ERR(task))
1644                 return;
1645 
1646         WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
1647         rpc_put_task(task);
1648 }
1649 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1650 
1651 /**
1652  * svc_max_payload - Return transport-specific limit on the RPC payload
1653  * @rqstp: RPC transaction context
1654  *
1655  * Returns the maximum number of payload bytes the current transport
1656  * allows.
1657  */
1658 u32 svc_max_payload(const struct svc_rqst *rqstp)
1659 {
1660         u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1661 
1662         if (rqstp->rq_server->sv_max_payload < max)
1663                 max = rqstp->rq_server->sv_max_payload;
1664         return max;
1665 }
1666 EXPORT_SYMBOL_GPL(svc_max_payload);
1667 
1668 /**
1669  * svc_proc_name - Return RPC procedure name in string form
1670  * @rqstp: svc_rqst to operate on
1671  *
1672  * Return value:
1673  *   Pointer to a NUL-terminated string
1674  */
1675 const char *svc_proc_name(const struct svc_rqst *rqstp)
1676 {
1677         if (rqstp && rqstp->rq_procinfo)
1678                 return rqstp->rq_procinfo->pc_name;
1679         return "unknown";
1680 }
1681 
1682 
1683 /**
1684  * svc_encode_result_payload - mark a range of bytes as a result payload
1685  * @rqstp: svc_rqst to operate on
1686  * @offset: payload's byte offset in rqstp->rq_res
1687  * @length: size of payload, in bytes
1688  *
1689  * Returns zero on success, or a negative errno if a permanent
1690  * error occurred.
1691  */
1692 int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset,
1693                               unsigned int length)
1694 {
1695         return rqstp->rq_xprt->xpt_ops->xpo_result_payload(rqstp, offset,
1696                                                            length);
1697 }
1698 EXPORT_SYMBOL_GPL(svc_encode_result_payload);
1699 
1700 /**
1701  * svc_fill_write_vector - Construct data argument for VFS write call
1702  * @rqstp: svc_rqst to operate on
1703  * @payload: xdr_buf containing only the write data payload
1704  *
1705  * Fills in rqstp::rq_vec, and returns the number of elements.
1706  */
1707 unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
1708                                    struct xdr_buf *payload)
1709 {
1710         struct page **pages = payload->pages;
1711         struct kvec *first = payload->head;
1712         struct kvec *vec = rqstp->rq_vec;
1713         size_t total = payload->len;
1714         unsigned int i;
1715 
1716         /* Some types of transport can present the write payload
1717          * entirely in rq_arg.pages. In this case, @first is empty.
1718          */
1719         i = 0;
1720         if (first->iov_len) {
1721                 vec[i].iov_base = first->iov_base;
1722                 vec[i].iov_len = min_t(size_t, total, first->iov_len);
1723                 total -= vec[i].iov_len;
1724                 ++i;
1725         }
1726 
1727         while (total) {
1728                 vec[i].iov_base = page_address(*pages);
1729                 vec[i].iov_len = min_t(size_t, total, PAGE_SIZE);
1730                 total -= vec[i].iov_len;
1731                 ++i;
1732                 ++pages;
1733         }
1734 
1735         WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec));
1736         return i;
1737 }
1738 EXPORT_SYMBOL_GPL(svc_fill_write_vector);
1739 
1740 /**
1741  * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call
1742  * @rqstp: svc_rqst to operate on
1743  * @first: buffer containing first section of pathname
1744  * @p: buffer containing remaining section of pathname
1745  * @total: total length of the pathname argument
1746  *
1747  * The VFS symlink API demands a NUL-terminated pathname in mapped memory.
1748  * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free
1749  * the returned string.
1750  */
1751 char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first,
1752                                 void *p, size_t total)
1753 {
1754         size_t len, remaining;
1755         char *result, *dst;
1756 
1757         result = kmalloc(total + 1, GFP_KERNEL);
1758         if (!result)
1759                 return ERR_PTR(-ESERVERFAULT);
1760 
1761         dst = result;
1762         remaining = total;
1763 
1764         len = min_t(size_t, total, first->iov_len);
1765         if (len) {
1766                 memcpy(dst, first->iov_base, len);
1767                 dst += len;
1768                 remaining -= len;
1769         }
1770 
1771         if (remaining) {
1772                 len = min_t(size_t, remaining, PAGE_SIZE);
1773                 memcpy(dst, p, len);
1774                 dst += len;
1775         }
1776 
1777         *dst = '\0';
1778 
1779         /* Sanity check: Linux doesn't allow the pathname argument to
1780          * contain a NUL byte.
1781          */
1782         if (strlen(result) != total) {
1783                 kfree(result);
1784                 return ERR_PTR(-EINVAL);
1785         }
1786         return result;
1787 }
1788 EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname);
1789 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php