1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/types.h> 3 #include <linux/sched.h> 4 #include <linux/module.h> 5 #include <linux/sunrpc/types.h> 6 #include <linux/sunrpc/xdr.h> 7 #include <linux/sunrpc/svcsock.h> 8 #include <linux/sunrpc/svcauth.h> 9 #include <linux/sunrpc/gss_api.h> 10 #include <linux/sunrpc/addr.h> 11 #include <linux/err.h> 12 #include <linux/seq_file.h> 13 #include <linux/hash.h> 14 #include <linux/string.h> 15 #include <linux/slab.h> 16 #include <net/sock.h> 17 #include <net/ipv6.h> 18 #include <linux/kernel.h> 19 #include <linux/user_namespace.h> 20 #include <trace/events/sunrpc.h> 21 22 #define RPCDBG_FACILITY RPCDBG_AUTH 23 24 #include "netns.h" 25 26 /* 27 * AUTHUNIX and AUTHNULL credentials are both handled here. 28 * AUTHNULL is treated just like AUTHUNIX except that the uid/gid 29 * are always nobody (-2). i.e. we do the same IP address checks for 30 * AUTHNULL as for AUTHUNIX, and that is done here. 31 */ 32 33 34 struct unix_domain { 35 struct auth_domain h; 36 /* other stuff later */ 37 }; 38 39 extern struct auth_ops svcauth_null; 40 extern struct auth_ops svcauth_unix; 41 extern struct auth_ops svcauth_tls; 42 43 static void svcauth_unix_domain_release_rcu(struct rcu_head *head) 44 { 45 struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head); 46 struct unix_domain *ud = container_of(dom, struct unix_domain, h); 47 48 kfree(dom->name); 49 kfree(ud); 50 } 51 52 static void svcauth_unix_domain_release(struct auth_domain *dom) 53 { 54 call_rcu(&dom->rcu_head, svcauth_unix_domain_release_rcu); 55 } 56 57 struct auth_domain *unix_domain_find(char *name) 58 { 59 struct auth_domain *rv; 60 struct unix_domain *new = NULL; 61 62 rv = auth_domain_find(name); 63 while(1) { 64 if (rv) { 65 if (new && rv != &new->h) 66 svcauth_unix_domain_release(&new->h); 67 68 if (rv->flavour != &svcauth_unix) { 69 auth_domain_put(rv); 70 return NULL; 71 } 72 return rv; 73 } 74 75 new = kmalloc(sizeof(*new), GFP_KERNEL); 76 if (new == NULL) 77 return NULL; 78 kref_init(&new->h.ref); 79 new->h.name = kstrdup(name, GFP_KERNEL); 80 if (new->h.name == NULL) { 81 kfree(new); 82 return NULL; 83 } 84 new->h.flavour = &svcauth_unix; 85 rv = auth_domain_lookup(name, &new->h); 86 } 87 } 88 EXPORT_SYMBOL_GPL(unix_domain_find); 89 90 91 /************************************************** 92 * cache for IP address to unix_domain 93 * as needed by AUTH_UNIX 94 */ 95 #define IP_HASHBITS 8 96 #define IP_HASHMAX (1<<IP_HASHBITS) 97 98 struct ip_map { 99 struct cache_head h; 100 char m_class[8]; /* e.g. "nfsd" */ 101 struct in6_addr m_addr; 102 struct unix_domain *m_client; 103 struct rcu_head m_rcu; 104 }; 105 106 static void ip_map_put(struct kref *kref) 107 { 108 struct cache_head *item = container_of(kref, struct cache_head, ref); 109 struct ip_map *im = container_of(item, struct ip_map,h); 110 111 if (test_bit(CACHE_VALID, &item->flags) && 112 !test_bit(CACHE_NEGATIVE, &item->flags)) 113 auth_domain_put(&im->m_client->h); 114 kfree_rcu(im, m_rcu); 115 } 116 117 static inline int hash_ip6(const struct in6_addr *ip) 118 { 119 return hash_32(ipv6_addr_hash(ip), IP_HASHBITS); 120 } 121 static int ip_map_match(struct cache_head *corig, struct cache_head *cnew) 122 { 123 struct ip_map *orig = container_of(corig, struct ip_map, h); 124 struct ip_map *new = container_of(cnew, struct ip_map, h); 125 return strcmp(orig->m_class, new->m_class) == 0 && 126 ipv6_addr_equal(&orig->m_addr, &new->m_addr); 127 } 128 static void ip_map_init(struct cache_head *cnew, struct cache_head *citem) 129 { 130 struct ip_map *new = container_of(cnew, struct ip_map, h); 131 struct ip_map *item = container_of(citem, struct ip_map, h); 132 133 strcpy(new->m_class, item->m_class); 134 new->m_addr = item->m_addr; 135 } 136 static void update(struct cache_head *cnew, struct cache_head *citem) 137 { 138 struct ip_map *new = container_of(cnew, struct ip_map, h); 139 struct ip_map *item = container_of(citem, struct ip_map, h); 140 141 kref_get(&item->m_client->h.ref); 142 new->m_client = item->m_client; 143 } 144 static struct cache_head *ip_map_alloc(void) 145 { 146 struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL); 147 if (i) 148 return &i->h; 149 else 150 return NULL; 151 } 152 153 static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h) 154 { 155 return sunrpc_cache_pipe_upcall(cd, h); 156 } 157 158 static void ip_map_request(struct cache_detail *cd, 159 struct cache_head *h, 160 char **bpp, int *blen) 161 { 162 char text_addr[40]; 163 struct ip_map *im = container_of(h, struct ip_map, h); 164 165 if (ipv6_addr_v4mapped(&(im->m_addr))) { 166 snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]); 167 } else { 168 snprintf(text_addr, 40, "%pI6", &im->m_addr); 169 } 170 qword_add(bpp, blen, im->m_class); 171 qword_add(bpp, blen, text_addr); 172 (*bpp)[-1] = '\n'; 173 } 174 175 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr); 176 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time64_t expiry); 177 178 static int ip_map_parse(struct cache_detail *cd, 179 char *mesg, int mlen) 180 { 181 /* class ipaddress [domainname] */ 182 /* should be safe just to use the start of the input buffer 183 * for scratch: */ 184 char *buf = mesg; 185 int len; 186 char class[8]; 187 union { 188 struct sockaddr sa; 189 struct sockaddr_in s4; 190 struct sockaddr_in6 s6; 191 } address; 192 struct sockaddr_in6 sin6; 193 int err; 194 195 struct ip_map *ipmp; 196 struct auth_domain *dom; 197 time64_t expiry; 198 199 if (mesg[mlen-1] != '\n') 200 return -EINVAL; 201 mesg[mlen-1] = 0; 202 203 /* class */ 204 len = qword_get(&mesg, class, sizeof(class)); 205 if (len <= 0) return -EINVAL; 206 207 /* ip address */ 208 len = qword_get(&mesg, buf, mlen); 209 if (len <= 0) return -EINVAL; 210 211 if (rpc_pton(cd->net, buf, len, &address.sa, sizeof(address)) == 0) 212 return -EINVAL; 213 switch (address.sa.sa_family) { 214 case AF_INET: 215 /* Form a mapped IPv4 address in sin6 */ 216 sin6.sin6_family = AF_INET6; 217 ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr, 218 &sin6.sin6_addr); 219 break; 220 #if IS_ENABLED(CONFIG_IPV6) 221 case AF_INET6: 222 memcpy(&sin6, &address.s6, sizeof(sin6)); 223 break; 224 #endif 225 default: 226 return -EINVAL; 227 } 228 229 err = get_expiry(&mesg, &expiry); 230 if (err) 231 return err; 232 233 /* domainname, or empty for NEGATIVE */ 234 len = qword_get(&mesg, buf, mlen); 235 if (len < 0) return -EINVAL; 236 237 if (len) { 238 dom = unix_domain_find(buf); 239 if (dom == NULL) 240 return -ENOENT; 241 } else 242 dom = NULL; 243 244 /* IPv6 scope IDs are ignored for now */ 245 ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr); 246 if (ipmp) { 247 err = __ip_map_update(cd, ipmp, 248 container_of(dom, struct unix_domain, h), 249 expiry); 250 } else 251 err = -ENOMEM; 252 253 if (dom) 254 auth_domain_put(dom); 255 256 cache_flush(); 257 return err; 258 } 259 260 static int ip_map_show(struct seq_file *m, 261 struct cache_detail *cd, 262 struct cache_head *h) 263 { 264 struct ip_map *im; 265 struct in6_addr addr; 266 char *dom = "-no-domain-"; 267 268 if (h == NULL) { 269 seq_puts(m, "#class IP domain\n"); 270 return 0; 271 } 272 im = container_of(h, struct ip_map, h); 273 /* class addr domain */ 274 addr = im->m_addr; 275 276 if (test_bit(CACHE_VALID, &h->flags) && 277 !test_bit(CACHE_NEGATIVE, &h->flags)) 278 dom = im->m_client->h.name; 279 280 if (ipv6_addr_v4mapped(&addr)) { 281 seq_printf(m, "%s %pI4 %s\n", 282 im->m_class, &addr.s6_addr32[3], dom); 283 } else { 284 seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom); 285 } 286 return 0; 287 } 288 289 290 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, 291 struct in6_addr *addr) 292 { 293 struct ip_map ip; 294 struct cache_head *ch; 295 296 strcpy(ip.m_class, class); 297 ip.m_addr = *addr; 298 ch = sunrpc_cache_lookup_rcu(cd, &ip.h, 299 hash_str(class, IP_HASHBITS) ^ 300 hash_ip6(addr)); 301 302 if (ch) 303 return container_of(ch, struct ip_map, h); 304 else 305 return NULL; 306 } 307 308 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, 309 struct unix_domain *udom, time64_t expiry) 310 { 311 struct ip_map ip; 312 struct cache_head *ch; 313 314 ip.m_client = udom; 315 ip.h.flags = 0; 316 if (!udom) 317 set_bit(CACHE_NEGATIVE, &ip.h.flags); 318 ip.h.expiry_time = expiry; 319 ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, 320 hash_str(ipm->m_class, IP_HASHBITS) ^ 321 hash_ip6(&ipm->m_addr)); 322 if (!ch) 323 return -ENOMEM; 324 cache_put(ch, cd); 325 return 0; 326 } 327 328 void svcauth_unix_purge(struct net *net) 329 { 330 struct sunrpc_net *sn; 331 332 sn = net_generic(net, sunrpc_net_id); 333 cache_purge(sn->ip_map_cache); 334 } 335 EXPORT_SYMBOL_GPL(svcauth_unix_purge); 336 337 static inline struct ip_map * 338 ip_map_cached_get(struct svc_xprt *xprt) 339 { 340 struct ip_map *ipm = NULL; 341 struct sunrpc_net *sn; 342 343 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) { 344 spin_lock(&xprt->xpt_lock); 345 ipm = xprt->xpt_auth_cache; 346 if (ipm != NULL) { 347 sn = net_generic(xprt->xpt_net, sunrpc_net_id); 348 if (cache_is_expired(sn->ip_map_cache, &ipm->h)) { 349 /* 350 * The entry has been invalidated since it was 351 * remembered, e.g. by a second mount from the 352 * same IP address. 353 */ 354 xprt->xpt_auth_cache = NULL; 355 spin_unlock(&xprt->xpt_lock); 356 cache_put(&ipm->h, sn->ip_map_cache); 357 return NULL; 358 } 359 cache_get(&ipm->h); 360 } 361 spin_unlock(&xprt->xpt_lock); 362 } 363 return ipm; 364 } 365 366 static inline void 367 ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm) 368 { 369 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) { 370 spin_lock(&xprt->xpt_lock); 371 if (xprt->xpt_auth_cache == NULL) { 372 /* newly cached, keep the reference */ 373 xprt->xpt_auth_cache = ipm; 374 ipm = NULL; 375 } 376 spin_unlock(&xprt->xpt_lock); 377 } 378 if (ipm) { 379 struct sunrpc_net *sn; 380 381 sn = net_generic(xprt->xpt_net, sunrpc_net_id); 382 cache_put(&ipm->h, sn->ip_map_cache); 383 } 384 } 385 386 void 387 svcauth_unix_info_release(struct svc_xprt *xpt) 388 { 389 struct ip_map *ipm; 390 391 ipm = xpt->xpt_auth_cache; 392 if (ipm != NULL) { 393 struct sunrpc_net *sn; 394 395 sn = net_generic(xpt->xpt_net, sunrpc_net_id); 396 cache_put(&ipm->h, sn->ip_map_cache); 397 } 398 } 399 400 /**************************************************************************** 401 * auth.unix.gid cache 402 * simple cache to map a UID to a list of GIDs 403 * because AUTH_UNIX aka AUTH_SYS has a max of UNX_NGROUPS 404 */ 405 #define GID_HASHBITS 8 406 #define GID_HASHMAX (1<<GID_HASHBITS) 407 408 struct unix_gid { 409 struct cache_head h; 410 kuid_t uid; 411 struct group_info *gi; 412 struct rcu_head rcu; 413 }; 414 415 static int unix_gid_hash(kuid_t uid) 416 { 417 return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS); 418 } 419 420 static void unix_gid_free(struct rcu_head *rcu) 421 { 422 struct unix_gid *ug = container_of(rcu, struct unix_gid, rcu); 423 struct cache_head *item = &ug->h; 424 425 if (test_bit(CACHE_VALID, &item->flags) && 426 !test_bit(CACHE_NEGATIVE, &item->flags)) 427 put_group_info(ug->gi); 428 kfree(ug); 429 } 430 431 static void unix_gid_put(struct kref *kref) 432 { 433 struct cache_head *item = container_of(kref, struct cache_head, ref); 434 struct unix_gid *ug = container_of(item, struct unix_gid, h); 435 436 call_rcu(&ug->rcu, unix_gid_free); 437 } 438 439 static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew) 440 { 441 struct unix_gid *orig = container_of(corig, struct unix_gid, h); 442 struct unix_gid *new = container_of(cnew, struct unix_gid, h); 443 return uid_eq(orig->uid, new->uid); 444 } 445 static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem) 446 { 447 struct unix_gid *new = container_of(cnew, struct unix_gid, h); 448 struct unix_gid *item = container_of(citem, struct unix_gid, h); 449 new->uid = item->uid; 450 } 451 static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem) 452 { 453 struct unix_gid *new = container_of(cnew, struct unix_gid, h); 454 struct unix_gid *item = container_of(citem, struct unix_gid, h); 455 456 get_group_info(item->gi); 457 new->gi = item->gi; 458 } 459 static struct cache_head *unix_gid_alloc(void) 460 { 461 struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL); 462 if (g) 463 return &g->h; 464 else 465 return NULL; 466 } 467 468 static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h) 469 { 470 return sunrpc_cache_pipe_upcall_timeout(cd, h); 471 } 472 473 static void unix_gid_request(struct cache_detail *cd, 474 struct cache_head *h, 475 char **bpp, int *blen) 476 { 477 char tuid[20]; 478 struct unix_gid *ug = container_of(h, struct unix_gid, h); 479 480 snprintf(tuid, 20, "%u", from_kuid(&init_user_ns, ug->uid)); 481 qword_add(bpp, blen, tuid); 482 (*bpp)[-1] = '\n'; 483 } 484 485 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid); 486 487 static int unix_gid_parse(struct cache_detail *cd, 488 char *mesg, int mlen) 489 { 490 /* uid expiry Ngid gid0 gid1 ... gidN-1 */ 491 int id; 492 kuid_t uid; 493 int gids; 494 int rv; 495 int i; 496 int err; 497 time64_t expiry; 498 struct unix_gid ug, *ugp; 499 500 if (mesg[mlen - 1] != '\n') 501 return -EINVAL; 502 mesg[mlen-1] = 0; 503 504 rv = get_int(&mesg, &id); 505 if (rv) 506 return -EINVAL; 507 uid = make_kuid(current_user_ns(), id); 508 ug.uid = uid; 509 510 err = get_expiry(&mesg, &expiry); 511 if (err) 512 return err; 513 514 rv = get_int(&mesg, &gids); 515 if (rv || gids < 0 || gids > 8192) 516 return -EINVAL; 517 518 ug.gi = groups_alloc(gids); 519 if (!ug.gi) 520 return -ENOMEM; 521 522 for (i = 0 ; i < gids ; i++) { 523 int gid; 524 kgid_t kgid; 525 rv = get_int(&mesg, &gid); 526 err = -EINVAL; 527 if (rv) 528 goto out; 529 kgid = make_kgid(current_user_ns(), gid); 530 if (!gid_valid(kgid)) 531 goto out; 532 ug.gi->gid[i] = kgid; 533 } 534 535 groups_sort(ug.gi); 536 ugp = unix_gid_lookup(cd, uid); 537 if (ugp) { 538 struct cache_head *ch; 539 ug.h.flags = 0; 540 ug.h.expiry_time = expiry; 541 ch = sunrpc_cache_update(cd, 542 &ug.h, &ugp->h, 543 unix_gid_hash(uid)); 544 if (!ch) 545 err = -ENOMEM; 546 else { 547 err = 0; 548 cache_put(ch, cd); 549 } 550 } else 551 err = -ENOMEM; 552 out: 553 if (ug.gi) 554 put_group_info(ug.gi); 555 return err; 556 } 557 558 static int unix_gid_show(struct seq_file *m, 559 struct cache_detail *cd, 560 struct cache_head *h) 561 { 562 struct user_namespace *user_ns = m->file->f_cred->user_ns; 563 struct unix_gid *ug; 564 int i; 565 int glen; 566 567 if (h == NULL) { 568 seq_puts(m, "#uid cnt: gids...\n"); 569 return 0; 570 } 571 ug = container_of(h, struct unix_gid, h); 572 if (test_bit(CACHE_VALID, &h->flags) && 573 !test_bit(CACHE_NEGATIVE, &h->flags)) 574 glen = ug->gi->ngroups; 575 else 576 glen = 0; 577 578 seq_printf(m, "%u %d:", from_kuid_munged(user_ns, ug->uid), glen); 579 for (i = 0; i < glen; i++) 580 seq_printf(m, " %d", from_kgid_munged(user_ns, ug->gi->gid[i])); 581 seq_printf(m, "\n"); 582 return 0; 583 } 584 585 static const struct cache_detail unix_gid_cache_template = { 586 .owner = THIS_MODULE, 587 .hash_size = GID_HASHMAX, 588 .name = "auth.unix.gid", 589 .cache_put = unix_gid_put, 590 .cache_upcall = unix_gid_upcall, 591 .cache_request = unix_gid_request, 592 .cache_parse = unix_gid_parse, 593 .cache_show = unix_gid_show, 594 .match = unix_gid_match, 595 .init = unix_gid_init, 596 .update = unix_gid_update, 597 .alloc = unix_gid_alloc, 598 }; 599 600 int unix_gid_cache_create(struct net *net) 601 { 602 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 603 struct cache_detail *cd; 604 int err; 605 606 cd = cache_create_net(&unix_gid_cache_template, net); 607 if (IS_ERR(cd)) 608 return PTR_ERR(cd); 609 err = cache_register_net(cd, net); 610 if (err) { 611 cache_destroy_net(cd, net); 612 return err; 613 } 614 sn->unix_gid_cache = cd; 615 return 0; 616 } 617 618 void unix_gid_cache_destroy(struct net *net) 619 { 620 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 621 struct cache_detail *cd = sn->unix_gid_cache; 622 623 sn->unix_gid_cache = NULL; 624 cache_purge(cd); 625 cache_unregister_net(cd, net); 626 cache_destroy_net(cd, net); 627 } 628 629 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid) 630 { 631 struct unix_gid ug; 632 struct cache_head *ch; 633 634 ug.uid = uid; 635 ch = sunrpc_cache_lookup_rcu(cd, &ug.h, unix_gid_hash(uid)); 636 if (ch) 637 return container_of(ch, struct unix_gid, h); 638 else 639 return NULL; 640 } 641 642 static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp) 643 { 644 struct unix_gid *ug; 645 struct group_info *gi; 646 int ret; 647 struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, 648 sunrpc_net_id); 649 650 ug = unix_gid_lookup(sn->unix_gid_cache, uid); 651 if (!ug) 652 return ERR_PTR(-EAGAIN); 653 ret = cache_check(sn->unix_gid_cache, &ug->h, &rqstp->rq_chandle); 654 switch (ret) { 655 case -ENOENT: 656 return ERR_PTR(-ENOENT); 657 case -ETIMEDOUT: 658 return ERR_PTR(-ESHUTDOWN); 659 case 0: 660 gi = get_group_info(ug->gi); 661 cache_put(&ug->h, sn->unix_gid_cache); 662 return gi; 663 default: 664 return ERR_PTR(-EAGAIN); 665 } 666 } 667 668 enum svc_auth_status 669 svcauth_unix_set_client(struct svc_rqst *rqstp) 670 { 671 struct sockaddr_in *sin; 672 struct sockaddr_in6 *sin6, sin6_storage; 673 struct ip_map *ipm; 674 struct group_info *gi; 675 struct svc_cred *cred = &rqstp->rq_cred; 676 struct svc_xprt *xprt = rqstp->rq_xprt; 677 struct net *net = xprt->xpt_net; 678 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 679 680 switch (rqstp->rq_addr.ss_family) { 681 case AF_INET: 682 sin = svc_addr_in(rqstp); 683 sin6 = &sin6_storage; 684 ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr); 685 break; 686 case AF_INET6: 687 sin6 = svc_addr_in6(rqstp); 688 break; 689 default: 690 BUG(); 691 } 692 693 rqstp->rq_client = NULL; 694 if (rqstp->rq_proc == 0) 695 goto out; 696 697 rqstp->rq_auth_stat = rpc_autherr_badcred; 698 ipm = ip_map_cached_get(xprt); 699 if (ipm == NULL) 700 ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class, 701 &sin6->sin6_addr); 702 703 if (ipm == NULL) 704 return SVC_DENIED; 705 706 switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) { 707 default: 708 BUG(); 709 case -ETIMEDOUT: 710 return SVC_CLOSE; 711 case -EAGAIN: 712 return SVC_DROP; 713 case -ENOENT: 714 return SVC_DENIED; 715 case 0: 716 rqstp->rq_client = &ipm->m_client->h; 717 kref_get(&rqstp->rq_client->ref); 718 ip_map_cached_put(xprt, ipm); 719 break; 720 } 721 722 gi = unix_gid_find(cred->cr_uid, rqstp); 723 switch (PTR_ERR(gi)) { 724 case -EAGAIN: 725 return SVC_DROP; 726 case -ESHUTDOWN: 727 return SVC_CLOSE; 728 case -ENOENT: 729 break; 730 default: 731 put_group_info(cred->cr_group_info); 732 cred->cr_group_info = gi; 733 } 734 735 out: 736 rqstp->rq_auth_stat = rpc_auth_ok; 737 return SVC_OK; 738 } 739 EXPORT_SYMBOL_GPL(svcauth_unix_set_client); 740 741 /** 742 * svcauth_null_accept - Decode and validate incoming RPC_AUTH_NULL credential 743 * @rqstp: RPC transaction 744 * 745 * Return values: 746 * %SVC_OK: Both credential and verifier are valid 747 * %SVC_DENIED: Credential or verifier is not valid 748 * %SVC_GARBAGE: Failed to decode credential or verifier 749 * %SVC_CLOSE: Temporary failure 750 * 751 * rqstp->rq_auth_stat is set as mandated by RFC 5531. 752 */ 753 static enum svc_auth_status 754 svcauth_null_accept(struct svc_rqst *rqstp) 755 { 756 struct xdr_stream *xdr = &rqstp->rq_arg_stream; 757 struct svc_cred *cred = &rqstp->rq_cred; 758 u32 flavor, len; 759 void *body; 760 761 /* Length of Call's credential body field: */ 762 if (xdr_stream_decode_u32(xdr, &len) < 0) 763 return SVC_GARBAGE; 764 if (len != 0) { 765 rqstp->rq_auth_stat = rpc_autherr_badcred; 766 return SVC_DENIED; 767 } 768 769 /* Call's verf field: */ 770 if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0) 771 return SVC_GARBAGE; 772 if (flavor != RPC_AUTH_NULL || len != 0) { 773 rqstp->rq_auth_stat = rpc_autherr_badverf; 774 return SVC_DENIED; 775 } 776 777 /* Signal that mapping to nobody uid/gid is required */ 778 cred->cr_uid = INVALID_UID; 779 cred->cr_gid = INVALID_GID; 780 cred->cr_group_info = groups_alloc(0); 781 if (cred->cr_group_info == NULL) 782 return SVC_CLOSE; /* kmalloc failure - client must retry */ 783 784 if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream, 785 RPC_AUTH_NULL, NULL, 0) < 0) 786 return SVC_CLOSE; 787 if (!svcxdr_set_accept_stat(rqstp)) 788 return SVC_CLOSE; 789 790 rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL; 791 return SVC_OK; 792 } 793 794 static int 795 svcauth_null_release(struct svc_rqst *rqstp) 796 { 797 if (rqstp->rq_client) 798 auth_domain_put(rqstp->rq_client); 799 rqstp->rq_client = NULL; 800 if (rqstp->rq_cred.cr_group_info) 801 put_group_info(rqstp->rq_cred.cr_group_info); 802 rqstp->rq_cred.cr_group_info = NULL; 803 804 return 0; /* don't drop */ 805 } 806 807 808 struct auth_ops svcauth_null = { 809 .name = "null", 810 .owner = THIS_MODULE, 811 .flavour = RPC_AUTH_NULL, 812 .accept = svcauth_null_accept, 813 .release = svcauth_null_release, 814 .set_client = svcauth_unix_set_client, 815 }; 816 817 818 /** 819 * svcauth_tls_accept - Decode and validate incoming RPC_AUTH_TLS credential 820 * @rqstp: RPC transaction 821 * 822 * Return values: 823 * %SVC_OK: Both credential and verifier are valid 824 * %SVC_DENIED: Credential or verifier is not valid 825 * %SVC_GARBAGE: Failed to decode credential or verifier 826 * %SVC_CLOSE: Temporary failure 827 * 828 * rqstp->rq_auth_stat is set as mandated by RFC 5531. 829 */ 830 static enum svc_auth_status 831 svcauth_tls_accept(struct svc_rqst *rqstp) 832 { 833 struct xdr_stream *xdr = &rqstp->rq_arg_stream; 834 struct svc_cred *cred = &rqstp->rq_cred; 835 struct svc_xprt *xprt = rqstp->rq_xprt; 836 u32 flavor, len; 837 void *body; 838 __be32 *p; 839 840 /* Length of Call's credential body field: */ 841 if (xdr_stream_decode_u32(xdr, &len) < 0) 842 return SVC_GARBAGE; 843 if (len != 0) { 844 rqstp->rq_auth_stat = rpc_autherr_badcred; 845 return SVC_DENIED; 846 } 847 848 /* Call's verf field: */ 849 if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0) 850 return SVC_GARBAGE; 851 if (flavor != RPC_AUTH_NULL || len != 0) { 852 rqstp->rq_auth_stat = rpc_autherr_badverf; 853 return SVC_DENIED; 854 } 855 856 /* AUTH_TLS is not valid on non-NULL procedures */ 857 if (rqstp->rq_proc != 0) { 858 rqstp->rq_auth_stat = rpc_autherr_badcred; 859 return SVC_DENIED; 860 } 861 862 /* Signal that mapping to nobody uid/gid is required */ 863 cred->cr_uid = INVALID_UID; 864 cred->cr_gid = INVALID_GID; 865 cred->cr_group_info = groups_alloc(0); 866 if (cred->cr_group_info == NULL) 867 return SVC_CLOSE; 868 869 if (xprt->xpt_ops->xpo_handshake) { 870 p = xdr_reserve_space(&rqstp->rq_res_stream, XDR_UNIT * 2 + 8); 871 if (!p) 872 return SVC_CLOSE; 873 trace_svc_tls_start(xprt); 874 *p++ = rpc_auth_null; 875 *p++ = cpu_to_be32(8); 876 memcpy(p, "STARTTLS", 8); 877 878 set_bit(XPT_HANDSHAKE, &xprt->xpt_flags); 879 svc_xprt_enqueue(xprt); 880 } else { 881 trace_svc_tls_unavailable(xprt); 882 if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream, 883 RPC_AUTH_NULL, NULL, 0) < 0) 884 return SVC_CLOSE; 885 } 886 if (!svcxdr_set_accept_stat(rqstp)) 887 return SVC_CLOSE; 888 889 rqstp->rq_cred.cr_flavor = RPC_AUTH_TLS; 890 return SVC_OK; 891 } 892 893 struct auth_ops svcauth_tls = { 894 .name = "tls", 895 .owner = THIS_MODULE, 896 .flavour = RPC_AUTH_TLS, 897 .accept = svcauth_tls_accept, 898 .release = svcauth_null_release, 899 .set_client = svcauth_unix_set_client, 900 }; 901 902 903 /** 904 * svcauth_unix_accept - Decode and validate incoming RPC_AUTH_SYS credential 905 * @rqstp: RPC transaction 906 * 907 * Return values: 908 * %SVC_OK: Both credential and verifier are valid 909 * %SVC_DENIED: Credential or verifier is not valid 910 * %SVC_GARBAGE: Failed to decode credential or verifier 911 * %SVC_CLOSE: Temporary failure 912 * 913 * rqstp->rq_auth_stat is set as mandated by RFC 5531. 914 */ 915 static enum svc_auth_status 916 svcauth_unix_accept(struct svc_rqst *rqstp) 917 { 918 struct xdr_stream *xdr = &rqstp->rq_arg_stream; 919 struct svc_cred *cred = &rqstp->rq_cred; 920 struct user_namespace *userns; 921 u32 flavor, len, i; 922 void *body; 923 __be32 *p; 924 925 /* 926 * This implementation ignores the length of the Call's 927 * credential body field and the timestamp and machinename 928 * fields. 929 */ 930 p = xdr_inline_decode(xdr, XDR_UNIT * 3); 931 if (!p) 932 return SVC_GARBAGE; 933 len = be32_to_cpup(p + 2); 934 if (len > RPC_MAX_MACHINENAME) 935 return SVC_GARBAGE; 936 if (!xdr_inline_decode(xdr, len)) 937 return SVC_GARBAGE; 938 939 /* 940 * Note: we skip uid_valid()/gid_valid() checks here for 941 * backwards compatibility with clients that use -1 id's. 942 * Instead, -1 uid or gid is later mapped to the 943 * (export-specific) anonymous id by nfsd_setuser. 944 * Supplementary gid's will be left alone. 945 */ 946 userns = (rqstp->rq_xprt && rqstp->rq_xprt->xpt_cred) ? 947 rqstp->rq_xprt->xpt_cred->user_ns : &init_user_ns; 948 if (xdr_stream_decode_u32(xdr, &i) < 0) 949 return SVC_GARBAGE; 950 cred->cr_uid = make_kuid(userns, i); 951 if (xdr_stream_decode_u32(xdr, &i) < 0) 952 return SVC_GARBAGE; 953 cred->cr_gid = make_kgid(userns, i); 954 955 if (xdr_stream_decode_u32(xdr, &len) < 0) 956 return SVC_GARBAGE; 957 if (len > UNX_NGROUPS) 958 goto badcred; 959 p = xdr_inline_decode(xdr, XDR_UNIT * len); 960 if (!p) 961 return SVC_GARBAGE; 962 cred->cr_group_info = groups_alloc(len); 963 if (cred->cr_group_info == NULL) 964 return SVC_CLOSE; 965 for (i = 0; i < len; i++) { 966 kgid_t kgid = make_kgid(userns, be32_to_cpup(p++)); 967 cred->cr_group_info->gid[i] = kgid; 968 } 969 groups_sort(cred->cr_group_info); 970 971 /* Call's verf field: */ 972 if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0) 973 return SVC_GARBAGE; 974 if (flavor != RPC_AUTH_NULL || len != 0) { 975 rqstp->rq_auth_stat = rpc_autherr_badverf; 976 return SVC_DENIED; 977 } 978 979 if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream, 980 RPC_AUTH_NULL, NULL, 0) < 0) 981 return SVC_CLOSE; 982 if (!svcxdr_set_accept_stat(rqstp)) 983 return SVC_CLOSE; 984 985 rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX; 986 return SVC_OK; 987 988 badcred: 989 rqstp->rq_auth_stat = rpc_autherr_badcred; 990 return SVC_DENIED; 991 } 992 993 static int 994 svcauth_unix_release(struct svc_rqst *rqstp) 995 { 996 /* Verifier (such as it is) is already in place. 997 */ 998 if (rqstp->rq_client) 999 auth_domain_put(rqstp->rq_client); 1000 rqstp->rq_client = NULL; 1001 if (rqstp->rq_cred.cr_group_info) 1002 put_group_info(rqstp->rq_cred.cr_group_info); 1003 rqstp->rq_cred.cr_group_info = NULL; 1004 1005 return 0; 1006 } 1007 1008 1009 struct auth_ops svcauth_unix = { 1010 .name = "unix", 1011 .owner = THIS_MODULE, 1012 .flavour = RPC_AUTH_UNIX, 1013 .accept = svcauth_unix_accept, 1014 .release = svcauth_unix_release, 1015 .domain_release = svcauth_unix_domain_release, 1016 .set_client = svcauth_unix_set_client, 1017 }; 1018 1019 static const struct cache_detail ip_map_cache_template = { 1020 .owner = THIS_MODULE, 1021 .hash_size = IP_HASHMAX, 1022 .name = "auth.unix.ip", 1023 .cache_put = ip_map_put, 1024 .cache_upcall = ip_map_upcall, 1025 .cache_request = ip_map_request, 1026 .cache_parse = ip_map_parse, 1027 .cache_show = ip_map_show, 1028 .match = ip_map_match, 1029 .init = ip_map_init, 1030 .update = update, 1031 .alloc = ip_map_alloc, 1032 }; 1033 1034 int ip_map_cache_create(struct net *net) 1035 { 1036 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1037 struct cache_detail *cd; 1038 int err; 1039 1040 cd = cache_create_net(&ip_map_cache_template, net); 1041 if (IS_ERR(cd)) 1042 return PTR_ERR(cd); 1043 err = cache_register_net(cd, net); 1044 if (err) { 1045 cache_destroy_net(cd, net); 1046 return err; 1047 } 1048 sn->ip_map_cache = cd; 1049 return 0; 1050 } 1051 1052 void ip_map_cache_destroy(struct net *net) 1053 { 1054 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1055 struct cache_detail *cd = sn->ip_map_cache; 1056 1057 sn->ip_map_cache = NULL; 1058 cache_purge(cd); 1059 cache_unregister_net(cd, net); 1060 cache_destroy_net(cd, net); 1061 } 1062
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.