1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* SCTP kernel implementation 3 * (C) Copyright Red Hat Inc. 2017 4 * 5 * This file is part of the SCTP kernel implementation 6 * 7 * These functions implement sctp diag support. 8 * 9 * Please send any bug reports or fixes you make to the 10 * email addresched(es): 11 * lksctp developers <linux-sctp@vger.kernel.org> 12 * 13 * Written or modified by: 14 * Xin Long <lucien.xin@gmail.com> 15 */ 16 17 #include <linux/module.h> 18 #include <linux/inet_diag.h> 19 #include <linux/sock_diag.h> 20 #include <net/sctp/sctp.h> 21 22 static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, 23 void *info); 24 25 /* define some functions to make asoc/ep fill look clean */ 26 static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r, 27 struct sock *sk, 28 struct sctp_association *asoc) 29 { 30 union sctp_addr laddr, paddr; 31 struct dst_entry *dst; 32 struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer; 33 34 laddr = list_entry(asoc->base.bind_addr.address_list.next, 35 struct sctp_sockaddr_entry, list)->a; 36 paddr = asoc->peer.primary_path->ipaddr; 37 dst = asoc->peer.primary_path->dst; 38 39 r->idiag_family = sk->sk_family; 40 r->id.idiag_sport = htons(asoc->base.bind_addr.port); 41 r->id.idiag_dport = htons(asoc->peer.port); 42 r->id.idiag_if = dst ? dst->dev->ifindex : 0; 43 sock_diag_save_cookie(sk, r->id.idiag_cookie); 44 45 #if IS_ENABLED(CONFIG_IPV6) 46 if (sk->sk_family == AF_INET6) { 47 *(struct in6_addr *)r->id.idiag_src = laddr.v6.sin6_addr; 48 *(struct in6_addr *)r->id.idiag_dst = paddr.v6.sin6_addr; 49 } else 50 #endif 51 { 52 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); 53 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); 54 55 r->id.idiag_src[0] = laddr.v4.sin_addr.s_addr; 56 r->id.idiag_dst[0] = paddr.v4.sin_addr.s_addr; 57 } 58 59 r->idiag_state = asoc->state; 60 if (timer_pending(t3_rtx)) { 61 r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX; 62 r->idiag_retrans = asoc->rtx_data_chunks; 63 r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies); 64 } 65 } 66 67 static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb, 68 struct list_head *address_list) 69 { 70 struct sctp_sockaddr_entry *laddr; 71 int addrlen = sizeof(struct sockaddr_storage); 72 int addrcnt = 0; 73 struct nlattr *attr; 74 void *info = NULL; 75 76 list_for_each_entry_rcu(laddr, address_list, list) 77 addrcnt++; 78 79 attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt); 80 if (!attr) 81 return -EMSGSIZE; 82 83 info = nla_data(attr); 84 list_for_each_entry_rcu(laddr, address_list, list) { 85 memcpy(info, &laddr->a, sizeof(laddr->a)); 86 memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a)); 87 info += addrlen; 88 } 89 90 return 0; 91 } 92 93 static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb, 94 struct sctp_association *asoc) 95 { 96 int addrlen = sizeof(struct sockaddr_storage); 97 struct sctp_transport *from; 98 struct nlattr *attr; 99 void *info = NULL; 100 101 attr = nla_reserve(skb, INET_DIAG_PEERS, 102 addrlen * asoc->peer.transport_count); 103 if (!attr) 104 return -EMSGSIZE; 105 106 info = nla_data(attr); 107 list_for_each_entry(from, &asoc->peer.transport_addr_list, 108 transports) { 109 memcpy(info, &from->ipaddr, sizeof(from->ipaddr)); 110 memset(info + sizeof(from->ipaddr), 0, 111 addrlen - sizeof(from->ipaddr)); 112 info += addrlen; 113 } 114 115 return 0; 116 } 117 118 /* sctp asoc/ep fill*/ 119 static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc, 120 struct sk_buff *skb, 121 const struct inet_diag_req_v2 *req, 122 struct user_namespace *user_ns, 123 int portid, u32 seq, u16 nlmsg_flags, 124 const struct nlmsghdr *unlh, 125 bool net_admin) 126 { 127 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 128 struct list_head *addr_list; 129 struct inet_diag_msg *r; 130 struct nlmsghdr *nlh; 131 int ext = req->idiag_ext; 132 struct sctp_infox infox; 133 void *info = NULL; 134 135 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), 136 nlmsg_flags); 137 if (!nlh) 138 return -EMSGSIZE; 139 140 r = nlmsg_data(nlh); 141 BUG_ON(!sk_fullsock(sk)); 142 143 r->idiag_timer = 0; 144 r->idiag_retrans = 0; 145 r->idiag_expires = 0; 146 if (asoc) { 147 inet_diag_msg_sctpasoc_fill(r, sk, asoc); 148 } else { 149 inet_diag_msg_common_fill(r, sk); 150 r->idiag_state = sk->sk_state; 151 } 152 153 if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin)) 154 goto errout; 155 156 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) { 157 u32 mem[SK_MEMINFO_VARS]; 158 int amt; 159 160 if (asoc && asoc->ep->sndbuf_policy) 161 amt = asoc->sndbuf_used; 162 else 163 amt = sk_wmem_alloc_get(sk); 164 mem[SK_MEMINFO_WMEM_ALLOC] = amt; 165 if (asoc && asoc->ep->rcvbuf_policy) 166 amt = atomic_read(&asoc->rmem_alloc); 167 else 168 amt = sk_rmem_alloc_get(sk); 169 mem[SK_MEMINFO_RMEM_ALLOC] = amt; 170 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf; 171 mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf; 172 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; 173 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; 174 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); 175 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); 176 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); 177 178 if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0) 179 goto errout; 180 } 181 182 if (ext & (1 << (INET_DIAG_INFO - 1))) { 183 struct nlattr *attr; 184 185 attr = nla_reserve_64bit(skb, INET_DIAG_INFO, 186 sizeof(struct sctp_info), 187 INET_DIAG_PAD); 188 if (!attr) 189 goto errout; 190 191 info = nla_data(attr); 192 } 193 infox.sctpinfo = (struct sctp_info *)info; 194 infox.asoc = asoc; 195 sctp_diag_get_info(sk, r, &infox); 196 197 addr_list = asoc ? &asoc->base.bind_addr.address_list 198 : &ep->base.bind_addr.address_list; 199 if (inet_diag_msg_sctpladdrs_fill(skb, addr_list)) 200 goto errout; 201 202 if (asoc && (ext & (1 << (INET_DIAG_CONG - 1)))) 203 if (nla_put_string(skb, INET_DIAG_CONG, "reno") < 0) 204 goto errout; 205 206 if (asoc && inet_diag_msg_sctpaddrs_fill(skb, asoc)) 207 goto errout; 208 209 nlmsg_end(skb, nlh); 210 return 0; 211 212 errout: 213 nlmsg_cancel(skb, nlh); 214 return -EMSGSIZE; 215 } 216 217 /* callback and param */ 218 struct sctp_comm_param { 219 struct sk_buff *skb; 220 struct netlink_callback *cb; 221 const struct inet_diag_req_v2 *r; 222 const struct nlmsghdr *nlh; 223 bool net_admin; 224 }; 225 226 static size_t inet_assoc_attr_size(struct sctp_association *asoc) 227 { 228 int addrlen = sizeof(struct sockaddr_storage); 229 int addrcnt = 0; 230 struct sctp_sockaddr_entry *laddr; 231 232 list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list, 233 list) 234 addrcnt++; 235 236 return nla_total_size(sizeof(struct sctp_info)) 237 + nla_total_size(addrlen * asoc->peer.transport_count) 238 + nla_total_size(addrlen * addrcnt) 239 + nla_total_size(sizeof(struct inet_diag_msg)) 240 + inet_diag_msg_attrs_size() 241 + nla_total_size(sizeof(struct inet_diag_meminfo)) 242 + 64; 243 } 244 245 static int sctp_sock_dump_one(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p) 246 { 247 struct sctp_association *assoc = tsp->asoc; 248 struct sctp_comm_param *commp = p; 249 struct sock *sk = ep->base.sk; 250 const struct inet_diag_req_v2 *req = commp->r; 251 struct sk_buff *skb = commp->skb; 252 struct sk_buff *rep; 253 int err; 254 255 err = sock_diag_check_cookie(sk, req->id.idiag_cookie); 256 if (err) 257 return err; 258 259 rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL); 260 if (!rep) 261 return -ENOMEM; 262 263 lock_sock(sk); 264 if (ep != assoc->ep) { 265 err = -EAGAIN; 266 goto out; 267 } 268 269 err = inet_sctp_diag_fill(sk, assoc, rep, req, sk_user_ns(NETLINK_CB(skb).sk), 270 NETLINK_CB(skb).portid, commp->nlh->nlmsg_seq, 0, 271 commp->nlh, commp->net_admin); 272 if (err < 0) { 273 WARN_ON(err == -EMSGSIZE); 274 goto out; 275 } 276 release_sock(sk); 277 278 return nlmsg_unicast(sock_net(skb->sk)->diag_nlsk, rep, NETLINK_CB(skb).portid); 279 280 out: 281 release_sock(sk); 282 kfree_skb(rep); 283 return err; 284 } 285 286 static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p) 287 { 288 struct sctp_comm_param *commp = p; 289 struct sock *sk = ep->base.sk; 290 struct sk_buff *skb = commp->skb; 291 struct netlink_callback *cb = commp->cb; 292 const struct inet_diag_req_v2 *r = commp->r; 293 struct sctp_association *assoc; 294 int err = 0; 295 296 lock_sock(sk); 297 if (ep != tsp->asoc->ep) 298 goto release; 299 list_for_each_entry(assoc, &ep->asocs, asocs) { 300 if (cb->args[4] < cb->args[1]) 301 goto next; 302 303 if (r->id.idiag_sport != htons(assoc->base.bind_addr.port) && 304 r->id.idiag_sport) 305 goto next; 306 if (r->id.idiag_dport != htons(assoc->peer.port) && 307 r->id.idiag_dport) 308 goto next; 309 310 if (!cb->args[3] && 311 inet_sctp_diag_fill(sk, NULL, skb, r, 312 sk_user_ns(NETLINK_CB(cb->skb).sk), 313 NETLINK_CB(cb->skb).portid, 314 cb->nlh->nlmsg_seq, 315 NLM_F_MULTI, cb->nlh, 316 commp->net_admin) < 0) { 317 err = 1; 318 goto release; 319 } 320 cb->args[3] = 1; 321 322 if (inet_sctp_diag_fill(sk, assoc, skb, r, 323 sk_user_ns(NETLINK_CB(cb->skb).sk), 324 NETLINK_CB(cb->skb).portid, 325 cb->nlh->nlmsg_seq, 0, cb->nlh, 326 commp->net_admin) < 0) { 327 err = 1; 328 goto release; 329 } 330 next: 331 cb->args[4]++; 332 } 333 cb->args[1] = 0; 334 cb->args[3] = 0; 335 cb->args[4] = 0; 336 release: 337 release_sock(sk); 338 return err; 339 } 340 341 static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p) 342 { 343 struct sctp_comm_param *commp = p; 344 struct sock *sk = ep->base.sk; 345 const struct inet_diag_req_v2 *r = commp->r; 346 347 /* find the ep only once through the transports by this condition */ 348 if (!list_is_first(&tsp->asoc->asocs, &ep->asocs)) 349 return 0; 350 351 if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family) 352 return 0; 353 354 return 1; 355 } 356 357 static int sctp_ep_dump(struct sctp_endpoint *ep, void *p) 358 { 359 struct sctp_comm_param *commp = p; 360 struct sock *sk = ep->base.sk; 361 struct sk_buff *skb = commp->skb; 362 struct netlink_callback *cb = commp->cb; 363 const struct inet_diag_req_v2 *r = commp->r; 364 struct net *net = sock_net(skb->sk); 365 struct inet_sock *inet = inet_sk(sk); 366 int err = 0; 367 368 if (!net_eq(sock_net(sk), net)) 369 goto out; 370 371 if (cb->args[4] < cb->args[1]) 372 goto next; 373 374 if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs)) 375 goto next; 376 377 if (r->sdiag_family != AF_UNSPEC && 378 sk->sk_family != r->sdiag_family) 379 goto next; 380 381 if (r->id.idiag_sport != inet->inet_sport && 382 r->id.idiag_sport) 383 goto next; 384 385 if (r->id.idiag_dport != inet->inet_dport && 386 r->id.idiag_dport) 387 goto next; 388 389 if (inet_sctp_diag_fill(sk, NULL, skb, r, 390 sk_user_ns(NETLINK_CB(cb->skb).sk), 391 NETLINK_CB(cb->skb).portid, 392 cb->nlh->nlmsg_seq, NLM_F_MULTI, 393 cb->nlh, commp->net_admin) < 0) { 394 err = 2; 395 goto out; 396 } 397 next: 398 cb->args[4]++; 399 out: 400 return err; 401 } 402 403 /* define the functions for sctp_diag_handler*/ 404 static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, 405 void *info) 406 { 407 struct sctp_infox *infox = (struct sctp_infox *)info; 408 409 if (infox->asoc) { 410 r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc); 411 r->idiag_wqueue = infox->asoc->sndbuf_used; 412 } else { 413 r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog); 414 r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog); 415 } 416 if (infox->sctpinfo) 417 sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo); 418 } 419 420 static int sctp_diag_dump_one(struct netlink_callback *cb, 421 const struct inet_diag_req_v2 *req) 422 { 423 struct sk_buff *skb = cb->skb; 424 struct net *net = sock_net(skb->sk); 425 const struct nlmsghdr *nlh = cb->nlh; 426 union sctp_addr laddr, paddr; 427 int dif = req->id.idiag_if; 428 struct sctp_comm_param commp = { 429 .skb = skb, 430 .r = req, 431 .nlh = nlh, 432 .net_admin = netlink_net_capable(skb, CAP_NET_ADMIN), 433 }; 434 435 if (req->sdiag_family == AF_INET) { 436 laddr.v4.sin_port = req->id.idiag_sport; 437 laddr.v4.sin_addr.s_addr = req->id.idiag_src[0]; 438 laddr.v4.sin_family = AF_INET; 439 440 paddr.v4.sin_port = req->id.idiag_dport; 441 paddr.v4.sin_addr.s_addr = req->id.idiag_dst[0]; 442 paddr.v4.sin_family = AF_INET; 443 } else { 444 laddr.v6.sin6_port = req->id.idiag_sport; 445 memcpy(&laddr.v6.sin6_addr, req->id.idiag_src, 446 sizeof(laddr.v6.sin6_addr)); 447 laddr.v6.sin6_family = AF_INET6; 448 449 paddr.v6.sin6_port = req->id.idiag_dport; 450 memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst, 451 sizeof(paddr.v6.sin6_addr)); 452 paddr.v6.sin6_family = AF_INET6; 453 } 454 455 return sctp_transport_lookup_process(sctp_sock_dump_one, 456 net, &laddr, &paddr, &commp, dif); 457 } 458 459 static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, 460 const struct inet_diag_req_v2 *r) 461 { 462 u32 idiag_states = r->idiag_states; 463 struct net *net = sock_net(skb->sk); 464 struct sctp_comm_param commp = { 465 .skb = skb, 466 .cb = cb, 467 .r = r, 468 .net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN), 469 }; 470 int pos = cb->args[2]; 471 472 /* eps hashtable dumps 473 * args: 474 * 0 : if it will traversal listen sock 475 * 1 : to record the sock pos of this time's traversal 476 * 4 : to work as a temporary variable to traversal list 477 */ 478 if (cb->args[0] == 0) { 479 if (!(idiag_states & TCPF_LISTEN)) 480 goto skip; 481 if (sctp_for_each_endpoint(sctp_ep_dump, &commp)) 482 goto done; 483 skip: 484 cb->args[0] = 1; 485 cb->args[1] = 0; 486 cb->args[4] = 0; 487 } 488 489 /* asocs by transport hashtable dump 490 * args: 491 * 1 : to record the assoc pos of this time's traversal 492 * 2 : to record the transport pos of this time's traversal 493 * 3 : to mark if we have dumped the ep info of the current asoc 494 * 4 : to work as a temporary variable to traversal list 495 * 5 : to save the sk we get from travelsing the tsp list. 496 */ 497 if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE))) 498 goto done; 499 500 sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump, 501 net, &pos, &commp); 502 cb->args[2] = pos; 503 504 done: 505 cb->args[1] = cb->args[4]; 506 cb->args[4] = 0; 507 } 508 509 static const struct inet_diag_handler sctp_diag_handler = { 510 .owner = THIS_MODULE, 511 .dump = sctp_diag_dump, 512 .dump_one = sctp_diag_dump_one, 513 .idiag_get_info = sctp_diag_get_info, 514 .idiag_type = IPPROTO_SCTP, 515 .idiag_info_size = sizeof(struct sctp_info), 516 }; 517 518 static int __init sctp_diag_init(void) 519 { 520 return inet_diag_register(&sctp_diag_handler); 521 } 522 523 static void __exit sctp_diag_exit(void) 524 { 525 inet_diag_unregister(&sctp_diag_handler); 526 } 527 528 module_init(sctp_diag_init); 529 module_exit(sctp_diag_exit); 530 MODULE_LICENSE("GPL"); 531 MODULE_DESCRIPTION("SCTP socket monitoring via SOCK_DIAG"); 532 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-132); 533
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.