1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* SCTP kernel implementation 3 * (C) Copyright IBM Corp. 2001, 2004 4 * Copyright (c) 1999-2000 Cisco, Inc. 5 * Copyright (c) 1999-2001 Motorola, Inc. 6 * Copyright (c) 2001 Intel Corp. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * This module provides the abstraction for an SCTP association. 12 * 13 * Please send any bug reports or fixes you make to the 14 * email address(es): 15 * lksctp developers <linux-sctp@vger.kernel.org> 16 * 17 * Written or modified by: 18 * La Monte H.P. Yarroll <piggy@acm.org> 19 * Karl Knutson <karl@athena.chicago.il.us> 20 * Jon Grimm <jgrimm@us.ibm.com> 21 * Xingang Guo <xingang.guo@intel.com> 22 * Hui Huang <hui.huang@nokia.com> 23 * Sridhar Samudrala <sri@us.ibm.com> 24 * Daisy Chang <daisyc@us.ibm.com> 25 * Ryan Layer <rmlayer@us.ibm.com> 26 * Kevin Gao <kevin.gao@intel.com> 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/types.h> 32 #include <linux/fcntl.h> 33 #include <linux/poll.h> 34 #include <linux/init.h> 35 36 #include <linux/slab.h> 37 #include <linux/in.h> 38 #include <net/ipv6.h> 39 #include <net/sctp/sctp.h> 40 #include <net/sctp/sm.h> 41 42 /* Forward declarations for internal functions. */ 43 static void sctp_select_active_and_retran_path(struct sctp_association *asoc); 44 static void sctp_assoc_bh_rcv(struct work_struct *work); 45 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); 46 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc); 47 48 /* 1st Level Abstractions. */ 49 50 /* Initialize a new association from provided memory. */ 51 static struct sctp_association *sctp_association_init( 52 struct sctp_association *asoc, 53 const struct sctp_endpoint *ep, 54 const struct sock *sk, 55 enum sctp_scope scope, gfp_t gfp) 56 { 57 struct sctp_sock *sp; 58 struct sctp_paramhdr *p; 59 int i; 60 61 /* Retrieve the SCTP per socket area. */ 62 sp = sctp_sk((struct sock *)sk); 63 64 /* Discarding const is appropriate here. */ 65 asoc->ep = (struct sctp_endpoint *)ep; 66 asoc->base.sk = (struct sock *)sk; 67 asoc->base.net = sock_net(sk); 68 69 sctp_endpoint_hold(asoc->ep); 70 sock_hold(asoc->base.sk); 71 72 /* Initialize the common base substructure. */ 73 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION; 74 75 /* Initialize the object handling fields. */ 76 refcount_set(&asoc->base.refcnt, 1); 77 78 /* Initialize the bind addr area. */ 79 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); 80 81 asoc->state = SCTP_STATE_CLOSED; 82 asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life); 83 asoc->user_frag = sp->user_frag; 84 85 /* Set the association max_retrans and RTO values from the 86 * socket values. 87 */ 88 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt; 89 asoc->pf_retrans = sp->pf_retrans; 90 asoc->ps_retrans = sp->ps_retrans; 91 asoc->pf_expose = sp->pf_expose; 92 93 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial); 94 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max); 95 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min); 96 97 /* Initialize the association's heartbeat interval based on the 98 * sock configured value. 99 */ 100 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval); 101 asoc->probe_interval = msecs_to_jiffies(sp->probe_interval); 102 103 asoc->encap_port = sp->encap_port; 104 105 /* Initialize path max retrans value. */ 106 asoc->pathmaxrxt = sp->pathmaxrxt; 107 108 asoc->flowlabel = sp->flowlabel; 109 asoc->dscp = sp->dscp; 110 111 /* Set association default SACK delay */ 112 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); 113 asoc->sackfreq = sp->sackfreq; 114 115 /* Set the association default flags controlling 116 * Heartbeat, SACK delay, and Path MTU Discovery. 117 */ 118 asoc->param_flags = sp->param_flags; 119 120 /* Initialize the maximum number of new data packets that can be sent 121 * in a burst. 122 */ 123 asoc->max_burst = sp->max_burst; 124 125 asoc->subscribe = sp->subscribe; 126 127 /* initialize association timers */ 128 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial; 129 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial; 130 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial; 131 132 /* sctpimpguide Section 2.12.2 133 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the 134 * recommended value of 5 times 'RTO.Max'. 135 */ 136 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] 137 = 5 * asoc->rto_max; 138 139 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; 140 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ; 141 142 /* Initializes the timers */ 143 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) 144 timer_setup(&asoc->timers[i], sctp_timer_events[i], 0); 145 146 /* Pull default initialization values from the sock options. 147 * Note: This assumes that the values have already been 148 * validated in the sock. 149 */ 150 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams; 151 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams; 152 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts; 153 154 asoc->max_init_timeo = 155 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo); 156 157 /* Set the local window size for receive. 158 * This is also the rcvbuf space per association. 159 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of 160 * 1500 bytes in one SCTP packet. 161 */ 162 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW) 163 asoc->rwnd = SCTP_DEFAULT_MINWINDOW; 164 else 165 asoc->rwnd = sk->sk_rcvbuf/2; 166 167 asoc->a_rwnd = asoc->rwnd; 168 169 /* Use my own max window until I learn something better. */ 170 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; 171 172 /* Initialize the receive memory counter */ 173 atomic_set(&asoc->rmem_alloc, 0); 174 175 init_waitqueue_head(&asoc->wait); 176 177 asoc->c.my_vtag = sctp_generate_tag(ep); 178 asoc->c.my_port = ep->base.bind_addr.port; 179 180 asoc->c.initial_tsn = sctp_generate_tsn(ep); 181 182 asoc->next_tsn = asoc->c.initial_tsn; 183 184 asoc->ctsn_ack_point = asoc->next_tsn - 1; 185 asoc->adv_peer_ack_point = asoc->ctsn_ack_point; 186 asoc->highest_sacked = asoc->ctsn_ack_point; 187 asoc->last_cwr_tsn = asoc->ctsn_ack_point; 188 189 /* ADDIP Section 4.1 Asconf Chunk Procedures 190 * 191 * When an endpoint has an ASCONF signaled change to be sent to the 192 * remote endpoint it should do the following: 193 * ... 194 * A2) a serial number should be assigned to the chunk. The serial 195 * number SHOULD be a monotonically increasing number. The serial 196 * numbers SHOULD be initialized at the start of the 197 * association to the same value as the initial TSN. 198 */ 199 asoc->addip_serial = asoc->c.initial_tsn; 200 asoc->strreset_outseq = asoc->c.initial_tsn; 201 202 INIT_LIST_HEAD(&asoc->addip_chunk_list); 203 INIT_LIST_HEAD(&asoc->asconf_ack_list); 204 205 /* Make an empty list of remote transport addresses. */ 206 INIT_LIST_HEAD(&asoc->peer.transport_addr_list); 207 208 /* RFC 2960 5.1 Normal Establishment of an Association 209 * 210 * After the reception of the first data chunk in an 211 * association the endpoint must immediately respond with a 212 * sack to acknowledge the data chunk. Subsequent 213 * acknowledgements should be done as described in Section 214 * 6.2. 215 * 216 * [We implement this by telling a new association that it 217 * already received one packet.] 218 */ 219 asoc->peer.sack_needed = 1; 220 asoc->peer.sack_generation = 1; 221 222 /* Create an input queue. */ 223 sctp_inq_init(&asoc->base.inqueue); 224 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); 225 226 /* Create an output queue. */ 227 sctp_outq_init(asoc, &asoc->outqueue); 228 229 sctp_ulpq_init(&asoc->ulpq, asoc); 230 231 if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp)) 232 goto stream_free; 233 234 /* Initialize default path MTU. */ 235 asoc->pathmtu = sp->pathmtu; 236 sctp_assoc_update_frag_point(asoc); 237 238 /* Assume that peer would support both address types unless we are 239 * told otherwise. 240 */ 241 asoc->peer.ipv4_address = 1; 242 if (asoc->base.sk->sk_family == PF_INET6) 243 asoc->peer.ipv6_address = 1; 244 INIT_LIST_HEAD(&asoc->asocs); 245 246 asoc->default_stream = sp->default_stream; 247 asoc->default_ppid = sp->default_ppid; 248 asoc->default_flags = sp->default_flags; 249 asoc->default_context = sp->default_context; 250 asoc->default_timetolive = sp->default_timetolive; 251 asoc->default_rcv_context = sp->default_rcv_context; 252 253 /* AUTH related initializations */ 254 INIT_LIST_HEAD(&asoc->endpoint_shared_keys); 255 if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp)) 256 goto stream_free; 257 258 asoc->active_key_id = ep->active_key_id; 259 asoc->strreset_enable = ep->strreset_enable; 260 261 /* Save the hmacs and chunks list into this association */ 262 if (ep->auth_hmacs_list) 263 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list, 264 ntohs(ep->auth_hmacs_list->param_hdr.length)); 265 if (ep->auth_chunk_list) 266 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list, 267 ntohs(ep->auth_chunk_list->param_hdr.length)); 268 269 /* Get the AUTH random number for this association */ 270 p = (struct sctp_paramhdr *)asoc->c.auth_random; 271 p->type = SCTP_PARAM_RANDOM; 272 p->length = htons(sizeof(*p) + SCTP_AUTH_RANDOM_LENGTH); 273 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH); 274 275 return asoc; 276 277 stream_free: 278 sctp_stream_free(&asoc->stream); 279 sock_put(asoc->base.sk); 280 sctp_endpoint_put(asoc->ep); 281 return NULL; 282 } 283 284 /* Allocate and initialize a new association */ 285 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, 286 const struct sock *sk, 287 enum sctp_scope scope, gfp_t gfp) 288 { 289 struct sctp_association *asoc; 290 291 asoc = kzalloc(sizeof(*asoc), gfp); 292 if (!asoc) 293 goto fail; 294 295 if (!sctp_association_init(asoc, ep, sk, scope, gfp)) 296 goto fail_init; 297 298 SCTP_DBG_OBJCNT_INC(assoc); 299 300 pr_debug("Created asoc %p\n", asoc); 301 302 return asoc; 303 304 fail_init: 305 kfree(asoc); 306 fail: 307 return NULL; 308 } 309 310 /* Free this association if possible. There may still be users, so 311 * the actual deallocation may be delayed. 312 */ 313 void sctp_association_free(struct sctp_association *asoc) 314 { 315 struct sock *sk = asoc->base.sk; 316 struct sctp_transport *transport; 317 struct list_head *pos, *temp; 318 int i; 319 320 /* Only real associations count against the endpoint, so 321 * don't bother for if this is a temporary association. 322 */ 323 if (!list_empty(&asoc->asocs)) { 324 list_del(&asoc->asocs); 325 326 /* Decrement the backlog value for a TCP-style listening 327 * socket. 328 */ 329 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 330 sk_acceptq_removed(sk); 331 } 332 333 /* Mark as dead, so other users can know this structure is 334 * going away. 335 */ 336 asoc->base.dead = true; 337 338 /* Dispose of any data lying around in the outqueue. */ 339 sctp_outq_free(&asoc->outqueue); 340 341 /* Dispose of any pending messages for the upper layer. */ 342 sctp_ulpq_free(&asoc->ulpq); 343 344 /* Dispose of any pending chunks on the inqueue. */ 345 sctp_inq_free(&asoc->base.inqueue); 346 347 sctp_tsnmap_free(&asoc->peer.tsn_map); 348 349 /* Free stream information. */ 350 sctp_stream_free(&asoc->stream); 351 352 if (asoc->strreset_chunk) 353 sctp_chunk_free(asoc->strreset_chunk); 354 355 /* Clean up the bound address list. */ 356 sctp_bind_addr_free(&asoc->base.bind_addr); 357 358 /* Do we need to go through all of our timers and 359 * delete them? To be safe we will try to delete all, but we 360 * should be able to go through and make a guess based 361 * on our state. 362 */ 363 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { 364 if (del_timer(&asoc->timers[i])) 365 sctp_association_put(asoc); 366 } 367 368 /* Free peer's cached cookie. */ 369 kfree(asoc->peer.cookie); 370 kfree(asoc->peer.peer_random); 371 kfree(asoc->peer.peer_chunks); 372 kfree(asoc->peer.peer_hmacs); 373 374 /* Release the transport structures. */ 375 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 376 transport = list_entry(pos, struct sctp_transport, transports); 377 list_del_rcu(pos); 378 sctp_unhash_transport(transport); 379 sctp_transport_free(transport); 380 } 381 382 asoc->peer.transport_count = 0; 383 384 sctp_asconf_queue_teardown(asoc); 385 386 /* Free pending address space being deleted */ 387 kfree(asoc->asconf_addr_del_pending); 388 389 /* AUTH - Free the endpoint shared keys */ 390 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); 391 392 /* AUTH - Free the association shared key */ 393 sctp_auth_key_put(asoc->asoc_shared_key); 394 395 sctp_association_put(asoc); 396 } 397 398 /* Cleanup and free up an association. */ 399 static void sctp_association_destroy(struct sctp_association *asoc) 400 { 401 if (unlikely(!asoc->base.dead)) { 402 WARN(1, "Attempt to destroy undead association %p!\n", asoc); 403 return; 404 } 405 406 sctp_endpoint_put(asoc->ep); 407 sock_put(asoc->base.sk); 408 409 if (asoc->assoc_id != 0) { 410 spin_lock_bh(&sctp_assocs_id_lock); 411 idr_remove(&sctp_assocs_id, asoc->assoc_id); 412 spin_unlock_bh(&sctp_assocs_id_lock); 413 } 414 415 WARN_ON(atomic_read(&asoc->rmem_alloc)); 416 417 kfree_rcu(asoc, rcu); 418 SCTP_DBG_OBJCNT_DEC(assoc); 419 } 420 421 /* Change the primary destination address for the peer. */ 422 void sctp_assoc_set_primary(struct sctp_association *asoc, 423 struct sctp_transport *transport) 424 { 425 int changeover = 0; 426 427 /* it's a changeover only if we already have a primary path 428 * that we are changing 429 */ 430 if (asoc->peer.primary_path != NULL && 431 asoc->peer.primary_path != transport) 432 changeover = 1 ; 433 434 asoc->peer.primary_path = transport; 435 sctp_ulpevent_notify_peer_addr_change(transport, 436 SCTP_ADDR_MADE_PRIM, 0); 437 438 /* Set a default msg_name for events. */ 439 memcpy(&asoc->peer.primary_addr, &transport->ipaddr, 440 sizeof(union sctp_addr)); 441 442 /* If the primary path is changing, assume that the 443 * user wants to use this new path. 444 */ 445 if ((transport->state == SCTP_ACTIVE) || 446 (transport->state == SCTP_UNKNOWN)) 447 asoc->peer.active_path = transport; 448 449 /* 450 * SFR-CACC algorithm: 451 * Upon the receipt of a request to change the primary 452 * destination address, on the data structure for the new 453 * primary destination, the sender MUST do the following: 454 * 455 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch 456 * to this destination address earlier. The sender MUST set 457 * CYCLING_CHANGEOVER to indicate that this switch is a 458 * double switch to the same destination address. 459 * 460 * Really, only bother is we have data queued or outstanding on 461 * the association. 462 */ 463 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen) 464 return; 465 466 if (transport->cacc.changeover_active) 467 transport->cacc.cycling_changeover = changeover; 468 469 /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that 470 * a changeover has occurred. 471 */ 472 transport->cacc.changeover_active = changeover; 473 474 /* 3) The sender MUST store the next TSN to be sent in 475 * next_tsn_at_change. 476 */ 477 transport->cacc.next_tsn_at_change = asoc->next_tsn; 478 } 479 480 /* Remove a transport from an association. */ 481 void sctp_assoc_rm_peer(struct sctp_association *asoc, 482 struct sctp_transport *peer) 483 { 484 struct sctp_transport *transport; 485 struct list_head *pos; 486 struct sctp_chunk *ch; 487 488 pr_debug("%s: association:%p addr:%pISpc\n", 489 __func__, asoc, &peer->ipaddr.sa); 490 491 /* If we are to remove the current retran_path, update it 492 * to the next peer before removing this peer from the list. 493 */ 494 if (asoc->peer.retran_path == peer) 495 sctp_assoc_update_retran_path(asoc); 496 497 /* Remove this peer from the list. */ 498 list_del_rcu(&peer->transports); 499 /* Remove this peer from the transport hashtable */ 500 sctp_unhash_transport(peer); 501 502 /* Get the first transport of asoc. */ 503 pos = asoc->peer.transport_addr_list.next; 504 transport = list_entry(pos, struct sctp_transport, transports); 505 506 /* Update any entries that match the peer to be deleted. */ 507 if (asoc->peer.primary_path == peer) 508 sctp_assoc_set_primary(asoc, transport); 509 if (asoc->peer.active_path == peer) 510 asoc->peer.active_path = transport; 511 if (asoc->peer.retran_path == peer) 512 asoc->peer.retran_path = transport; 513 if (asoc->peer.last_data_from == peer) 514 asoc->peer.last_data_from = transport; 515 516 if (asoc->strreset_chunk && 517 asoc->strreset_chunk->transport == peer) { 518 asoc->strreset_chunk->transport = transport; 519 sctp_transport_reset_reconf_timer(transport); 520 } 521 522 /* If we remove the transport an INIT was last sent to, set it to 523 * NULL. Combined with the update of the retran path above, this 524 * will cause the next INIT to be sent to the next available 525 * transport, maintaining the cycle. 526 */ 527 if (asoc->init_last_sent_to == peer) 528 asoc->init_last_sent_to = NULL; 529 530 /* If we remove the transport an SHUTDOWN was last sent to, set it 531 * to NULL. Combined with the update of the retran path above, this 532 * will cause the next SHUTDOWN to be sent to the next available 533 * transport, maintaining the cycle. 534 */ 535 if (asoc->shutdown_last_sent_to == peer) 536 asoc->shutdown_last_sent_to = NULL; 537 538 /* If we remove the transport an ASCONF was last sent to, set it to 539 * NULL. 540 */ 541 if (asoc->addip_last_asconf && 542 asoc->addip_last_asconf->transport == peer) 543 asoc->addip_last_asconf->transport = NULL; 544 545 /* If we have something on the transmitted list, we have to 546 * save it off. The best place is the active path. 547 */ 548 if (!list_empty(&peer->transmitted)) { 549 struct sctp_transport *active = asoc->peer.active_path; 550 551 /* Reset the transport of each chunk on this list */ 552 list_for_each_entry(ch, &peer->transmitted, 553 transmitted_list) { 554 ch->transport = NULL; 555 ch->rtt_in_progress = 0; 556 } 557 558 list_splice_tail_init(&peer->transmitted, 559 &active->transmitted); 560 561 /* Start a T3 timer here in case it wasn't running so 562 * that these migrated packets have a chance to get 563 * retransmitted. 564 */ 565 if (!timer_pending(&active->T3_rtx_timer)) 566 if (!mod_timer(&active->T3_rtx_timer, 567 jiffies + active->rto)) 568 sctp_transport_hold(active); 569 } 570 571 list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) 572 if (ch->transport == peer) 573 ch->transport = NULL; 574 575 asoc->peer.transport_count--; 576 577 sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_REMOVED, 0); 578 sctp_transport_free(peer); 579 } 580 581 /* Add a transport address to an association. */ 582 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, 583 const union sctp_addr *addr, 584 const gfp_t gfp, 585 const int peer_state) 586 { 587 struct sctp_transport *peer; 588 struct sctp_sock *sp; 589 unsigned short port; 590 591 sp = sctp_sk(asoc->base.sk); 592 593 /* AF_INET and AF_INET6 share common port field. */ 594 port = ntohs(addr->v4.sin_port); 595 596 pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__, 597 asoc, &addr->sa, peer_state); 598 599 /* Set the port if it has not been set yet. */ 600 if (0 == asoc->peer.port) 601 asoc->peer.port = port; 602 603 /* Check to see if this is a duplicate. */ 604 peer = sctp_assoc_lookup_paddr(asoc, addr); 605 if (peer) { 606 /* An UNKNOWN state is only set on transports added by 607 * user in sctp_connectx() call. Such transports should be 608 * considered CONFIRMED per RFC 4960, Section 5.4. 609 */ 610 if (peer->state == SCTP_UNKNOWN) { 611 peer->state = SCTP_ACTIVE; 612 } 613 return peer; 614 } 615 616 peer = sctp_transport_new(asoc->base.net, addr, gfp); 617 if (!peer) 618 return NULL; 619 620 sctp_transport_set_owner(peer, asoc); 621 622 /* Initialize the peer's heartbeat interval based on the 623 * association configured value. 624 */ 625 peer->hbinterval = asoc->hbinterval; 626 peer->probe_interval = asoc->probe_interval; 627 628 peer->encap_port = asoc->encap_port; 629 630 /* Set the path max_retrans. */ 631 peer->pathmaxrxt = asoc->pathmaxrxt; 632 633 /* And the partial failure retrans threshold */ 634 peer->pf_retrans = asoc->pf_retrans; 635 /* And the primary path switchover retrans threshold */ 636 peer->ps_retrans = asoc->ps_retrans; 637 638 /* Initialize the peer's SACK delay timeout based on the 639 * association configured value. 640 */ 641 peer->sackdelay = asoc->sackdelay; 642 peer->sackfreq = asoc->sackfreq; 643 644 if (addr->sa.sa_family == AF_INET6) { 645 __be32 info = addr->v6.sin6_flowinfo; 646 647 if (info) { 648 peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK); 649 peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK; 650 } else { 651 peer->flowlabel = asoc->flowlabel; 652 } 653 } 654 peer->dscp = asoc->dscp; 655 656 /* Enable/disable heartbeat, SACK delay, and path MTU discovery 657 * based on association setting. 658 */ 659 peer->param_flags = asoc->param_flags; 660 661 /* Initialize the pmtu of the transport. */ 662 sctp_transport_route(peer, NULL, sp); 663 664 /* If this is the first transport addr on this association, 665 * initialize the association PMTU to the peer's PMTU. 666 * If not and the current association PMTU is higher than the new 667 * peer's PMTU, reset the association PMTU to the new peer's PMTU. 668 */ 669 sctp_assoc_set_pmtu(asoc, asoc->pathmtu ? 670 min_t(int, peer->pathmtu, asoc->pathmtu) : 671 peer->pathmtu); 672 673 peer->pmtu_pending = 0; 674 675 /* The asoc->peer.port might not be meaningful yet, but 676 * initialize the packet structure anyway. 677 */ 678 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port, 679 asoc->peer.port); 680 681 /* 7.2.1 Slow-Start 682 * 683 * o The initial cwnd before DATA transmission or after a sufficiently 684 * long idle period MUST be set to 685 * min(4*MTU, max(2*MTU, 4380 bytes)) 686 * 687 * o The initial value of ssthresh MAY be arbitrarily high 688 * (for example, implementations MAY use the size of the 689 * receiver advertised window). 690 */ 691 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); 692 693 /* At this point, we may not have the receiver's advertised window, 694 * so initialize ssthresh to the default value and it will be set 695 * later when we process the INIT. 696 */ 697 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW; 698 699 peer->partial_bytes_acked = 0; 700 peer->flight_size = 0; 701 peer->burst_limited = 0; 702 703 /* Set the transport's RTO.initial value */ 704 peer->rto = asoc->rto_initial; 705 sctp_max_rto(asoc, peer); 706 707 /* Set the peer's active state. */ 708 peer->state = peer_state; 709 710 /* Add this peer into the transport hashtable */ 711 if (sctp_hash_transport(peer)) { 712 sctp_transport_free(peer); 713 return NULL; 714 } 715 716 sctp_transport_pl_reset(peer); 717 718 /* Attach the remote transport to our asoc. */ 719 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list); 720 asoc->peer.transport_count++; 721 722 sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_ADDED, 0); 723 724 /* If we do not yet have a primary path, set one. */ 725 if (!asoc->peer.primary_path) { 726 sctp_assoc_set_primary(asoc, peer); 727 asoc->peer.retran_path = peer; 728 } 729 730 if (asoc->peer.active_path == asoc->peer.retran_path && 731 peer->state != SCTP_UNCONFIRMED) { 732 asoc->peer.retran_path = peer; 733 } 734 735 return peer; 736 } 737 738 /* Delete a transport address from an association. */ 739 void sctp_assoc_del_peer(struct sctp_association *asoc, 740 const union sctp_addr *addr) 741 { 742 struct list_head *pos; 743 struct list_head *temp; 744 struct sctp_transport *transport; 745 746 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 747 transport = list_entry(pos, struct sctp_transport, transports); 748 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) { 749 /* Do book keeping for removing the peer and free it. */ 750 sctp_assoc_rm_peer(asoc, transport); 751 break; 752 } 753 } 754 } 755 756 /* Lookup a transport by address. */ 757 struct sctp_transport *sctp_assoc_lookup_paddr( 758 const struct sctp_association *asoc, 759 const union sctp_addr *address) 760 { 761 struct sctp_transport *t; 762 763 /* Cycle through all transports searching for a peer address. */ 764 765 list_for_each_entry(t, &asoc->peer.transport_addr_list, 766 transports) { 767 if (sctp_cmp_addr_exact(address, &t->ipaddr)) 768 return t; 769 } 770 771 return NULL; 772 } 773 774 /* Remove all transports except a give one */ 775 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc, 776 struct sctp_transport *primary) 777 { 778 struct sctp_transport *temp; 779 struct sctp_transport *t; 780 781 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list, 782 transports) { 783 /* if the current transport is not the primary one, delete it */ 784 if (t != primary) 785 sctp_assoc_rm_peer(asoc, t); 786 } 787 } 788 789 /* Engage in transport control operations. 790 * Mark the transport up or down and send a notification to the user. 791 * Select and update the new active and retran paths. 792 */ 793 void sctp_assoc_control_transport(struct sctp_association *asoc, 794 struct sctp_transport *transport, 795 enum sctp_transport_cmd command, 796 sctp_sn_error_t error) 797 { 798 int spc_state = SCTP_ADDR_AVAILABLE; 799 bool ulp_notify = true; 800 801 /* Record the transition on the transport. */ 802 switch (command) { 803 case SCTP_TRANSPORT_UP: 804 /* If we are moving from UNCONFIRMED state due 805 * to heartbeat success, report the SCTP_ADDR_CONFIRMED 806 * state to the user, otherwise report SCTP_ADDR_AVAILABLE. 807 */ 808 if (transport->state == SCTP_PF && 809 asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE) 810 ulp_notify = false; 811 else if (transport->state == SCTP_UNCONFIRMED && 812 error == SCTP_HEARTBEAT_SUCCESS) 813 spc_state = SCTP_ADDR_CONFIRMED; 814 815 transport->state = SCTP_ACTIVE; 816 sctp_transport_pl_reset(transport); 817 break; 818 819 case SCTP_TRANSPORT_DOWN: 820 /* If the transport was never confirmed, do not transition it 821 * to inactive state. Also, release the cached route since 822 * there may be a better route next time. 823 */ 824 if (transport->state != SCTP_UNCONFIRMED) { 825 transport->state = SCTP_INACTIVE; 826 sctp_transport_pl_reset(transport); 827 spc_state = SCTP_ADDR_UNREACHABLE; 828 } else { 829 sctp_transport_dst_release(transport); 830 ulp_notify = false; 831 } 832 break; 833 834 case SCTP_TRANSPORT_PF: 835 transport->state = SCTP_PF; 836 if (asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE) 837 ulp_notify = false; 838 else 839 spc_state = SCTP_ADDR_POTENTIALLY_FAILED; 840 break; 841 842 default: 843 return; 844 } 845 846 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification 847 * to the user. 848 */ 849 if (ulp_notify) 850 sctp_ulpevent_notify_peer_addr_change(transport, 851 spc_state, error); 852 853 /* Select new active and retran paths. */ 854 sctp_select_active_and_retran_path(asoc); 855 } 856 857 /* Hold a reference to an association. */ 858 void sctp_association_hold(struct sctp_association *asoc) 859 { 860 refcount_inc(&asoc->base.refcnt); 861 } 862 863 /* Release a reference to an association and cleanup 864 * if there are no more references. 865 */ 866 void sctp_association_put(struct sctp_association *asoc) 867 { 868 if (refcount_dec_and_test(&asoc->base.refcnt)) 869 sctp_association_destroy(asoc); 870 } 871 872 /* Allocate the next TSN, Transmission Sequence Number, for the given 873 * association. 874 */ 875 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc) 876 { 877 /* From Section 1.6 Serial Number Arithmetic: 878 * Transmission Sequence Numbers wrap around when they reach 879 * 2**32 - 1. That is, the next TSN a DATA chunk MUST use 880 * after transmitting TSN = 2*32 - 1 is TSN = 0. 881 */ 882 __u32 retval = asoc->next_tsn; 883 asoc->next_tsn++; 884 asoc->unack_data++; 885 886 return retval; 887 } 888 889 /* Compare two addresses to see if they match. Wildcard addresses 890 * only match themselves. 891 */ 892 int sctp_cmp_addr_exact(const union sctp_addr *ss1, 893 const union sctp_addr *ss2) 894 { 895 struct sctp_af *af; 896 897 af = sctp_get_af_specific(ss1->sa.sa_family); 898 if (unlikely(!af)) 899 return 0; 900 901 return af->cmp_addr(ss1, ss2); 902 } 903 904 /* Return an ecne chunk to get prepended to a packet. 905 * Note: We are sly and return a shared, prealloced chunk. FIXME: 906 * No we don't, but we could/should. 907 */ 908 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc) 909 { 910 if (!asoc->need_ecne) 911 return NULL; 912 913 /* Send ECNE if needed. 914 * Not being able to allocate a chunk here is not deadly. 915 */ 916 return sctp_make_ecne(asoc, asoc->last_ecne_tsn); 917 } 918 919 /* 920 * Find which transport this TSN was sent on. 921 */ 922 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, 923 __u32 tsn) 924 { 925 struct sctp_transport *active; 926 struct sctp_transport *match; 927 struct sctp_transport *transport; 928 struct sctp_chunk *chunk; 929 __be32 key = htonl(tsn); 930 931 match = NULL; 932 933 /* 934 * FIXME: In general, find a more efficient data structure for 935 * searching. 936 */ 937 938 /* 939 * The general strategy is to search each transport's transmitted 940 * list. Return which transport this TSN lives on. 941 * 942 * Let's be hopeful and check the active_path first. 943 * Another optimization would be to know if there is only one 944 * outbound path and not have to look for the TSN at all. 945 * 946 */ 947 948 active = asoc->peer.active_path; 949 950 list_for_each_entry(chunk, &active->transmitted, 951 transmitted_list) { 952 953 if (key == chunk->subh.data_hdr->tsn) { 954 match = active; 955 goto out; 956 } 957 } 958 959 /* If not found, go search all the other transports. */ 960 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 961 transports) { 962 963 if (transport == active) 964 continue; 965 list_for_each_entry(chunk, &transport->transmitted, 966 transmitted_list) { 967 if (key == chunk->subh.data_hdr->tsn) { 968 match = transport; 969 goto out; 970 } 971 } 972 } 973 out: 974 return match; 975 } 976 977 /* Do delayed input processing. This is scheduled by sctp_rcv(). */ 978 static void sctp_assoc_bh_rcv(struct work_struct *work) 979 { 980 struct sctp_association *asoc = 981 container_of(work, struct sctp_association, 982 base.inqueue.immediate); 983 struct net *net = asoc->base.net; 984 union sctp_subtype subtype; 985 struct sctp_endpoint *ep; 986 struct sctp_chunk *chunk; 987 struct sctp_inq *inqueue; 988 int first_time = 1; /* is this the first time through the loop */ 989 int error = 0; 990 int state; 991 992 /* The association should be held so we should be safe. */ 993 ep = asoc->ep; 994 995 inqueue = &asoc->base.inqueue; 996 sctp_association_hold(asoc); 997 while (NULL != (chunk = sctp_inq_pop(inqueue))) { 998 state = asoc->state; 999 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); 1000 1001 /* If the first chunk in the packet is AUTH, do special 1002 * processing specified in Section 6.3 of SCTP-AUTH spec 1003 */ 1004 if (first_time && subtype.chunk == SCTP_CID_AUTH) { 1005 struct sctp_chunkhdr *next_hdr; 1006 1007 next_hdr = sctp_inq_peek(inqueue); 1008 if (!next_hdr) 1009 goto normal; 1010 1011 /* If the next chunk is COOKIE-ECHO, skip the AUTH 1012 * chunk while saving a pointer to it so we can do 1013 * Authentication later (during cookie-echo 1014 * processing). 1015 */ 1016 if (next_hdr->type == SCTP_CID_COOKIE_ECHO) { 1017 chunk->auth_chunk = skb_clone(chunk->skb, 1018 GFP_ATOMIC); 1019 chunk->auth = 1; 1020 continue; 1021 } 1022 } 1023 1024 normal: 1025 /* SCTP-AUTH, Section 6.3: 1026 * The receiver has a list of chunk types which it expects 1027 * to be received only after an AUTH-chunk. This list has 1028 * been sent to the peer during the association setup. It 1029 * MUST silently discard these chunks if they are not placed 1030 * after an AUTH chunk in the packet. 1031 */ 1032 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) 1033 continue; 1034 1035 /* Remember where the last DATA chunk came from so we 1036 * know where to send the SACK. 1037 */ 1038 if (sctp_chunk_is_data(chunk)) 1039 asoc->peer.last_data_from = chunk->transport; 1040 else { 1041 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS); 1042 asoc->stats.ictrlchunks++; 1043 if (chunk->chunk_hdr->type == SCTP_CID_SACK) 1044 asoc->stats.isacks++; 1045 } 1046 1047 if (chunk->transport) 1048 chunk->transport->last_time_heard = ktime_get(); 1049 1050 /* Run through the state machine. */ 1051 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, 1052 state, ep, asoc, chunk, GFP_ATOMIC); 1053 1054 /* Check to see if the association is freed in response to 1055 * the incoming chunk. If so, get out of the while loop. 1056 */ 1057 if (asoc->base.dead) 1058 break; 1059 1060 /* If there is an error on chunk, discard this packet. */ 1061 if (error && chunk) 1062 chunk->pdiscard = 1; 1063 1064 if (first_time) 1065 first_time = 0; 1066 } 1067 sctp_association_put(asoc); 1068 } 1069 1070 /* This routine moves an association from its old sk to a new sk. */ 1071 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk) 1072 { 1073 struct sctp_sock *newsp = sctp_sk(newsk); 1074 struct sock *oldsk = assoc->base.sk; 1075 1076 /* Delete the association from the old endpoint's list of 1077 * associations. 1078 */ 1079 list_del_init(&assoc->asocs); 1080 1081 /* Decrement the backlog value for a TCP-style socket. */ 1082 if (sctp_style(oldsk, TCP)) 1083 sk_acceptq_removed(oldsk); 1084 1085 /* Release references to the old endpoint and the sock. */ 1086 sctp_endpoint_put(assoc->ep); 1087 sock_put(assoc->base.sk); 1088 1089 /* Get a reference to the new endpoint. */ 1090 assoc->ep = newsp->ep; 1091 sctp_endpoint_hold(assoc->ep); 1092 1093 /* Get a reference to the new sock. */ 1094 assoc->base.sk = newsk; 1095 sock_hold(assoc->base.sk); 1096 1097 /* Add the association to the new endpoint's list of associations. */ 1098 sctp_endpoint_add_asoc(newsp->ep, assoc); 1099 } 1100 1101 /* Update an association (possibly from unexpected COOKIE-ECHO processing). */ 1102 int sctp_assoc_update(struct sctp_association *asoc, 1103 struct sctp_association *new) 1104 { 1105 struct sctp_transport *trans; 1106 struct list_head *pos, *temp; 1107 1108 /* Copy in new parameters of peer. */ 1109 asoc->c = new->c; 1110 asoc->peer.rwnd = new->peer.rwnd; 1111 asoc->peer.sack_needed = new->peer.sack_needed; 1112 asoc->peer.auth_capable = new->peer.auth_capable; 1113 asoc->peer.i = new->peer.i; 1114 1115 if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, 1116 asoc->peer.i.initial_tsn, GFP_ATOMIC)) 1117 return -ENOMEM; 1118 1119 /* Remove any peer addresses not present in the new association. */ 1120 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 1121 trans = list_entry(pos, struct sctp_transport, transports); 1122 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) { 1123 sctp_assoc_rm_peer(asoc, trans); 1124 continue; 1125 } 1126 1127 if (asoc->state >= SCTP_STATE_ESTABLISHED) 1128 sctp_transport_reset(trans); 1129 } 1130 1131 /* If the case is A (association restart), use 1132 * initial_tsn as next_tsn. If the case is B, use 1133 * current next_tsn in case data sent to peer 1134 * has been discarded and needs retransmission. 1135 */ 1136 if (asoc->state >= SCTP_STATE_ESTABLISHED) { 1137 asoc->next_tsn = new->next_tsn; 1138 asoc->ctsn_ack_point = new->ctsn_ack_point; 1139 asoc->adv_peer_ack_point = new->adv_peer_ack_point; 1140 1141 /* Reinitialize SSN for both local streams 1142 * and peer's streams. 1143 */ 1144 sctp_stream_clear(&asoc->stream); 1145 1146 /* Flush the ULP reassembly and ordered queue. 1147 * Any data there will now be stale and will 1148 * cause problems. 1149 */ 1150 sctp_ulpq_flush(&asoc->ulpq); 1151 1152 /* reset the overall association error count so 1153 * that the restarted association doesn't get torn 1154 * down on the next retransmission timer. 1155 */ 1156 asoc->overall_error_count = 0; 1157 1158 } else { 1159 /* Add any peer addresses from the new association. */ 1160 list_for_each_entry(trans, &new->peer.transport_addr_list, 1161 transports) 1162 if (!sctp_assoc_add_peer(asoc, &trans->ipaddr, 1163 GFP_ATOMIC, trans->state)) 1164 return -ENOMEM; 1165 1166 asoc->ctsn_ack_point = asoc->next_tsn - 1; 1167 asoc->adv_peer_ack_point = asoc->ctsn_ack_point; 1168 1169 if (sctp_state(asoc, COOKIE_WAIT)) 1170 sctp_stream_update(&asoc->stream, &new->stream); 1171 1172 /* get a new assoc id if we don't have one yet. */ 1173 if (sctp_assoc_set_id(asoc, GFP_ATOMIC)) 1174 return -ENOMEM; 1175 } 1176 1177 /* SCTP-AUTH: Save the peer parameters from the new associations 1178 * and also move the association shared keys over 1179 */ 1180 kfree(asoc->peer.peer_random); 1181 asoc->peer.peer_random = new->peer.peer_random; 1182 new->peer.peer_random = NULL; 1183 1184 kfree(asoc->peer.peer_chunks); 1185 asoc->peer.peer_chunks = new->peer.peer_chunks; 1186 new->peer.peer_chunks = NULL; 1187 1188 kfree(asoc->peer.peer_hmacs); 1189 asoc->peer.peer_hmacs = new->peer.peer_hmacs; 1190 new->peer.peer_hmacs = NULL; 1191 1192 return sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); 1193 } 1194 1195 /* Update the retran path for sending a retransmitted packet. 1196 * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints: 1197 * 1198 * When there is outbound data to send and the primary path 1199 * becomes inactive (e.g., due to failures), or where the 1200 * SCTP user explicitly requests to send data to an 1201 * inactive destination transport address, before reporting 1202 * an error to its ULP, the SCTP endpoint should try to send 1203 * the data to an alternate active destination transport 1204 * address if one exists. 1205 * 1206 * When retransmitting data that timed out, if the endpoint 1207 * is multihomed, it should consider each source-destination 1208 * address pair in its retransmission selection policy. 1209 * When retransmitting timed-out data, the endpoint should 1210 * attempt to pick the most divergent source-destination 1211 * pair from the original source-destination pair to which 1212 * the packet was transmitted. 1213 * 1214 * Note: Rules for picking the most divergent source-destination 1215 * pair are an implementation decision and are not specified 1216 * within this document. 1217 * 1218 * Our basic strategy is to round-robin transports in priorities 1219 * according to sctp_trans_score() e.g., if no such 1220 * transport with state SCTP_ACTIVE exists, round-robin through 1221 * SCTP_UNKNOWN, etc. You get the picture. 1222 */ 1223 static u8 sctp_trans_score(const struct sctp_transport *trans) 1224 { 1225 switch (trans->state) { 1226 case SCTP_ACTIVE: 1227 return 3; /* best case */ 1228 case SCTP_UNKNOWN: 1229 return 2; 1230 case SCTP_PF: 1231 return 1; 1232 default: /* case SCTP_INACTIVE */ 1233 return 0; /* worst case */ 1234 } 1235 } 1236 1237 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, 1238 struct sctp_transport *trans2) 1239 { 1240 if (trans1->error_count > trans2->error_count) { 1241 return trans2; 1242 } else if (trans1->error_count == trans2->error_count && 1243 ktime_after(trans2->last_time_heard, 1244 trans1->last_time_heard)) { 1245 return trans2; 1246 } else { 1247 return trans1; 1248 } 1249 } 1250 1251 static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, 1252 struct sctp_transport *best) 1253 { 1254 u8 score_curr, score_best; 1255 1256 if (best == NULL || curr == best) 1257 return curr; 1258 1259 score_curr = sctp_trans_score(curr); 1260 score_best = sctp_trans_score(best); 1261 1262 /* First, try a score-based selection if both transport states 1263 * differ. If we're in a tie, lets try to make a more clever 1264 * decision here based on error counts and last time heard. 1265 */ 1266 if (score_curr > score_best) 1267 return curr; 1268 else if (score_curr == score_best) 1269 return sctp_trans_elect_tie(best, curr); 1270 else 1271 return best; 1272 } 1273 1274 void sctp_assoc_update_retran_path(struct sctp_association *asoc) 1275 { 1276 struct sctp_transport *trans = asoc->peer.retran_path; 1277 struct sctp_transport *trans_next = NULL; 1278 1279 /* We're done as we only have the one and only path. */ 1280 if (asoc->peer.transport_count == 1) 1281 return; 1282 /* If active_path and retran_path are the same and active, 1283 * then this is the only active path. Use it. 1284 */ 1285 if (asoc->peer.active_path == asoc->peer.retran_path && 1286 asoc->peer.active_path->state == SCTP_ACTIVE) 1287 return; 1288 1289 /* Iterate from retran_path's successor back to retran_path. */ 1290 for (trans = list_next_entry(trans, transports); 1; 1291 trans = list_next_entry(trans, transports)) { 1292 /* Manually skip the head element. */ 1293 if (&trans->transports == &asoc->peer.transport_addr_list) 1294 continue; 1295 if (trans->state == SCTP_UNCONFIRMED) 1296 continue; 1297 trans_next = sctp_trans_elect_best(trans, trans_next); 1298 /* Active is good enough for immediate return. */ 1299 if (trans_next->state == SCTP_ACTIVE) 1300 break; 1301 /* We've reached the end, time to update path. */ 1302 if (trans == asoc->peer.retran_path) 1303 break; 1304 } 1305 1306 asoc->peer.retran_path = trans_next; 1307 1308 pr_debug("%s: association:%p updated new path to addr:%pISpc\n", 1309 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa); 1310 } 1311 1312 static void sctp_select_active_and_retran_path(struct sctp_association *asoc) 1313 { 1314 struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL; 1315 struct sctp_transport *trans_pf = NULL; 1316 1317 /* Look for the two most recently used active transports. */ 1318 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 1319 transports) { 1320 /* Skip uninteresting transports. */ 1321 if (trans->state == SCTP_INACTIVE || 1322 trans->state == SCTP_UNCONFIRMED) 1323 continue; 1324 /* Keep track of the best PF transport from our 1325 * list in case we don't find an active one. 1326 */ 1327 if (trans->state == SCTP_PF) { 1328 trans_pf = sctp_trans_elect_best(trans, trans_pf); 1329 continue; 1330 } 1331 /* For active transports, pick the most recent ones. */ 1332 if (trans_pri == NULL || 1333 ktime_after(trans->last_time_heard, 1334 trans_pri->last_time_heard)) { 1335 trans_sec = trans_pri; 1336 trans_pri = trans; 1337 } else if (trans_sec == NULL || 1338 ktime_after(trans->last_time_heard, 1339 trans_sec->last_time_heard)) { 1340 trans_sec = trans; 1341 } 1342 } 1343 1344 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints 1345 * 1346 * By default, an endpoint should always transmit to the primary 1347 * path, unless the SCTP user explicitly specifies the 1348 * destination transport address (and possibly source transport 1349 * address) to use. [If the primary is active but not most recent, 1350 * bump the most recently used transport.] 1351 */ 1352 if ((asoc->peer.primary_path->state == SCTP_ACTIVE || 1353 asoc->peer.primary_path->state == SCTP_UNKNOWN) && 1354 asoc->peer.primary_path != trans_pri) { 1355 trans_sec = trans_pri; 1356 trans_pri = asoc->peer.primary_path; 1357 } 1358 1359 /* We did not find anything useful for a possible retransmission 1360 * path; either primary path that we found is the same as 1361 * the current one, or we didn't generally find an active one. 1362 */ 1363 if (trans_sec == NULL) 1364 trans_sec = trans_pri; 1365 1366 /* If we failed to find a usable transport, just camp on the 1367 * active or pick a PF iff it's the better choice. 1368 */ 1369 if (trans_pri == NULL) { 1370 trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf); 1371 trans_sec = trans_pri; 1372 } 1373 1374 /* Set the active and retran transports. */ 1375 asoc->peer.active_path = trans_pri; 1376 asoc->peer.retran_path = trans_sec; 1377 } 1378 1379 struct sctp_transport * 1380 sctp_assoc_choose_alter_transport(struct sctp_association *asoc, 1381 struct sctp_transport *last_sent_to) 1382 { 1383 /* If this is the first time packet is sent, use the active path, 1384 * else use the retran path. If the last packet was sent over the 1385 * retran path, update the retran path and use it. 1386 */ 1387 if (last_sent_to == NULL) { 1388 return asoc->peer.active_path; 1389 } else { 1390 if (last_sent_to == asoc->peer.retran_path) 1391 sctp_assoc_update_retran_path(asoc); 1392 1393 return asoc->peer.retran_path; 1394 } 1395 } 1396 1397 void sctp_assoc_update_frag_point(struct sctp_association *asoc) 1398 { 1399 int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu, 1400 sctp_datachk_len(&asoc->stream)); 1401 1402 if (asoc->user_frag) 1403 frag = min_t(int, frag, asoc->user_frag); 1404 1405 frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN - 1406 sctp_datachk_len(&asoc->stream)); 1407 1408 asoc->frag_point = SCTP_TRUNC4(frag); 1409 } 1410 1411 void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu) 1412 { 1413 if (asoc->pathmtu != pmtu) { 1414 asoc->pathmtu = pmtu; 1415 sctp_assoc_update_frag_point(asoc); 1416 } 1417 1418 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc, 1419 asoc->pathmtu, asoc->frag_point); 1420 } 1421 1422 /* Update the association's pmtu and frag_point by going through all the 1423 * transports. This routine is called when a transport's PMTU has changed. 1424 */ 1425 void sctp_assoc_sync_pmtu(struct sctp_association *asoc) 1426 { 1427 struct sctp_transport *t; 1428 __u32 pmtu = 0; 1429 1430 if (!asoc) 1431 return; 1432 1433 /* Get the lowest pmtu of all the transports. */ 1434 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { 1435 if (t->pmtu_pending && t->dst) { 1436 sctp_transport_update_pmtu(t, 1437 atomic_read(&t->mtu_info)); 1438 t->pmtu_pending = 0; 1439 } 1440 if (!pmtu || (t->pathmtu < pmtu)) 1441 pmtu = t->pathmtu; 1442 } 1443 1444 sctp_assoc_set_pmtu(asoc, pmtu); 1445 } 1446 1447 /* Should we send a SACK to update our peer? */ 1448 static inline bool sctp_peer_needs_update(struct sctp_association *asoc) 1449 { 1450 struct net *net = asoc->base.net; 1451 1452 switch (asoc->state) { 1453 case SCTP_STATE_ESTABLISHED: 1454 case SCTP_STATE_SHUTDOWN_PENDING: 1455 case SCTP_STATE_SHUTDOWN_RECEIVED: 1456 case SCTP_STATE_SHUTDOWN_SENT: 1457 if ((asoc->rwnd > asoc->a_rwnd) && 1458 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32, 1459 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift), 1460 asoc->pathmtu))) 1461 return true; 1462 break; 1463 default: 1464 break; 1465 } 1466 return false; 1467 } 1468 1469 /* Increase asoc's rwnd by len and send any window update SACK if needed. */ 1470 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) 1471 { 1472 struct sctp_chunk *sack; 1473 struct timer_list *timer; 1474 1475 if (asoc->rwnd_over) { 1476 if (asoc->rwnd_over >= len) { 1477 asoc->rwnd_over -= len; 1478 } else { 1479 asoc->rwnd += (len - asoc->rwnd_over); 1480 asoc->rwnd_over = 0; 1481 } 1482 } else { 1483 asoc->rwnd += len; 1484 } 1485 1486 /* If we had window pressure, start recovering it 1487 * once our rwnd had reached the accumulated pressure 1488 * threshold. The idea is to recover slowly, but up 1489 * to the initial advertised window. 1490 */ 1491 if (asoc->rwnd_press) { 1492 int change = min(asoc->pathmtu, asoc->rwnd_press); 1493 asoc->rwnd += change; 1494 asoc->rwnd_press -= change; 1495 } 1496 1497 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", 1498 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, 1499 asoc->a_rwnd); 1500 1501 /* Send a window update SACK if the rwnd has increased by at least the 1502 * minimum of the association's PMTU and half of the receive buffer. 1503 * The algorithm used is similar to the one described in 1504 * Section 4.2.3.3 of RFC 1122. 1505 */ 1506 if (sctp_peer_needs_update(asoc)) { 1507 asoc->a_rwnd = asoc->rwnd; 1508 1509 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " 1510 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd, 1511 asoc->a_rwnd); 1512 1513 sack = sctp_make_sack(asoc); 1514 if (!sack) 1515 return; 1516 1517 asoc->peer.sack_needed = 0; 1518 1519 sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC); 1520 1521 /* Stop the SACK timer. */ 1522 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; 1523 if (del_timer(timer)) 1524 sctp_association_put(asoc); 1525 } 1526 } 1527 1528 /* Decrease asoc's rwnd by len. */ 1529 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) 1530 { 1531 int rx_count; 1532 int over = 0; 1533 1534 if (unlikely(!asoc->rwnd || asoc->rwnd_over)) 1535 pr_debug("%s: association:%p has asoc->rwnd:%u, " 1536 "asoc->rwnd_over:%u!\n", __func__, asoc, 1537 asoc->rwnd, asoc->rwnd_over); 1538 1539 if (asoc->ep->rcvbuf_policy) 1540 rx_count = atomic_read(&asoc->rmem_alloc); 1541 else 1542 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); 1543 1544 /* If we've reached or overflowed our receive buffer, announce 1545 * a 0 rwnd if rwnd would still be positive. Store the 1546 * potential pressure overflow so that the window can be restored 1547 * back to original value. 1548 */ 1549 if (rx_count >= asoc->base.sk->sk_rcvbuf) 1550 over = 1; 1551 1552 if (asoc->rwnd >= len) { 1553 asoc->rwnd -= len; 1554 if (over) { 1555 asoc->rwnd_press += asoc->rwnd; 1556 asoc->rwnd = 0; 1557 } 1558 } else { 1559 asoc->rwnd_over += len - asoc->rwnd; 1560 asoc->rwnd = 0; 1561 } 1562 1563 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", 1564 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, 1565 asoc->rwnd_press); 1566 } 1567 1568 /* Build the bind address list for the association based on info from the 1569 * local endpoint and the remote peer. 1570 */ 1571 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, 1572 enum sctp_scope scope, gfp_t gfp) 1573 { 1574 struct sock *sk = asoc->base.sk; 1575 int flags; 1576 1577 /* Use scoping rules to determine the subset of addresses from 1578 * the endpoint. 1579 */ 1580 flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; 1581 if (!inet_v6_ipv6only(sk)) 1582 flags |= SCTP_ADDR4_ALLOWED; 1583 if (asoc->peer.ipv4_address) 1584 flags |= SCTP_ADDR4_PEERSUPP; 1585 if (asoc->peer.ipv6_address) 1586 flags |= SCTP_ADDR6_PEERSUPP; 1587 1588 return sctp_bind_addr_copy(asoc->base.net, 1589 &asoc->base.bind_addr, 1590 &asoc->ep->base.bind_addr, 1591 scope, gfp, flags); 1592 } 1593 1594 /* Build the association's bind address list from the cookie. */ 1595 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, 1596 struct sctp_cookie *cookie, 1597 gfp_t gfp) 1598 { 1599 struct sctp_init_chunk *peer_init = (struct sctp_init_chunk *)(cookie + 1); 1600 int var_size2 = ntohs(peer_init->chunk_hdr.length); 1601 int var_size3 = cookie->raw_addr_list_len; 1602 __u8 *raw = (__u8 *)peer_init + var_size2; 1603 1604 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3, 1605 asoc->ep->base.bind_addr.port, gfp); 1606 } 1607 1608 /* Lookup laddr in the bind address list of an association. */ 1609 int sctp_assoc_lookup_laddr(struct sctp_association *asoc, 1610 const union sctp_addr *laddr) 1611 { 1612 int found = 0; 1613 1614 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && 1615 sctp_bind_addr_match(&asoc->base.bind_addr, laddr, 1616 sctp_sk(asoc->base.sk))) 1617 found = 1; 1618 1619 return found; 1620 } 1621 1622 /* Set an association id for a given association */ 1623 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) 1624 { 1625 bool preload = gfpflags_allow_blocking(gfp); 1626 int ret; 1627 1628 /* If the id is already assigned, keep it. */ 1629 if (asoc->assoc_id) 1630 return 0; 1631 1632 if (preload) 1633 idr_preload(gfp); 1634 spin_lock_bh(&sctp_assocs_id_lock); 1635 /* 0, 1, 2 are used as SCTP_FUTURE_ASSOC, SCTP_CURRENT_ASSOC and 1636 * SCTP_ALL_ASSOC, so an available id must be > SCTP_ALL_ASSOC. 1637 */ 1638 ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, SCTP_ALL_ASSOC + 1, 0, 1639 GFP_NOWAIT); 1640 spin_unlock_bh(&sctp_assocs_id_lock); 1641 if (preload) 1642 idr_preload_end(); 1643 if (ret < 0) 1644 return ret; 1645 1646 asoc->assoc_id = (sctp_assoc_t)ret; 1647 return 0; 1648 } 1649 1650 /* Free the ASCONF queue */ 1651 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc) 1652 { 1653 struct sctp_chunk *asconf; 1654 struct sctp_chunk *tmp; 1655 1656 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) { 1657 list_del_init(&asconf->list); 1658 sctp_chunk_free(asconf); 1659 } 1660 } 1661 1662 /* Free asconf_ack cache */ 1663 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc) 1664 { 1665 struct sctp_chunk *ack; 1666 struct sctp_chunk *tmp; 1667 1668 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, 1669 transmitted_list) { 1670 list_del_init(&ack->transmitted_list); 1671 sctp_chunk_free(ack); 1672 } 1673 } 1674 1675 /* Clean up the ASCONF_ACK queue */ 1676 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc) 1677 { 1678 struct sctp_chunk *ack; 1679 struct sctp_chunk *tmp; 1680 1681 /* We can remove all the entries from the queue up to 1682 * the "Peer-Sequence-Number". 1683 */ 1684 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, 1685 transmitted_list) { 1686 if (ack->subh.addip_hdr->serial == 1687 htonl(asoc->peer.addip_serial)) 1688 break; 1689 1690 list_del_init(&ack->transmitted_list); 1691 sctp_chunk_free(ack); 1692 } 1693 } 1694 1695 /* Find the ASCONF_ACK whose serial number matches ASCONF */ 1696 struct sctp_chunk *sctp_assoc_lookup_asconf_ack( 1697 const struct sctp_association *asoc, 1698 __be32 serial) 1699 { 1700 struct sctp_chunk *ack; 1701 1702 /* Walk through the list of cached ASCONF-ACKs and find the 1703 * ack chunk whose serial number matches that of the request. 1704 */ 1705 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) { 1706 if (sctp_chunk_pending(ack)) 1707 continue; 1708 if (ack->subh.addip_hdr->serial == serial) { 1709 sctp_chunk_hold(ack); 1710 return ack; 1711 } 1712 } 1713 1714 return NULL; 1715 } 1716 1717 void sctp_asconf_queue_teardown(struct sctp_association *asoc) 1718 { 1719 /* Free any cached ASCONF_ACK chunk. */ 1720 sctp_assoc_free_asconf_acks(asoc); 1721 1722 /* Free the ASCONF queue. */ 1723 sctp_assoc_free_asconf_queue(asoc); 1724 1725 /* Free any cached ASCONF chunk. */ 1726 if (asoc->addip_last_asconf) 1727 sctp_chunk_free(asoc->addip_last_asconf); 1728 } 1729
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.