~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sctp/ulpqueue.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /* SCTP kernel implementation
  3  * (C) Copyright IBM Corp. 2001, 2004
  4  * Copyright (c) 1999-2000 Cisco, Inc.
  5  * Copyright (c) 1999-2001 Motorola, Inc.
  6  * Copyright (c) 2001 Intel Corp.
  7  * Copyright (c) 2001 Nokia, Inc.
  8  * Copyright (c) 2001 La Monte H.P. Yarroll
  9  *
 10  * This abstraction carries sctp events to the ULP (sockets).
 11  *
 12  * Please send any bug reports or fixes you make to the
 13  * email address(es):
 14  *    lksctp developers <linux-sctp@vger.kernel.org>
 15  *
 16  * Written or modified by:
 17  *    Jon Grimm             <jgrimm@us.ibm.com>
 18  *    La Monte H.P. Yarroll <piggy@acm.org>
 19  *    Sridhar Samudrala     <sri@us.ibm.com>
 20  */
 21 
 22 #include <linux/slab.h>
 23 #include <linux/types.h>
 24 #include <linux/skbuff.h>
 25 #include <net/sock.h>
 26 #include <net/busy_poll.h>
 27 #include <net/sctp/structs.h>
 28 #include <net/sctp/sctp.h>
 29 #include <net/sctp/sm.h>
 30 
 31 /* Forward declarations for internal helpers.  */
 32 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
 33                                               struct sctp_ulpevent *);
 34 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
 35                                               struct sctp_ulpevent *);
 36 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
 37 
 38 /* 1st Level Abstractions */
 39 
 40 /* Initialize a ULP queue from a block of memory.  */
 41 void sctp_ulpq_init(struct sctp_ulpq *ulpq, struct sctp_association *asoc)
 42 {
 43         memset(ulpq, 0, sizeof(struct sctp_ulpq));
 44 
 45         ulpq->asoc = asoc;
 46         skb_queue_head_init(&ulpq->reasm);
 47         skb_queue_head_init(&ulpq->reasm_uo);
 48         skb_queue_head_init(&ulpq->lobby);
 49         ulpq->pd_mode  = 0;
 50 }
 51 
 52 
 53 /* Flush the reassembly and ordering queues.  */
 54 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
 55 {
 56         struct sk_buff *skb;
 57         struct sctp_ulpevent *event;
 58 
 59         while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
 60                 event = sctp_skb2event(skb);
 61                 sctp_ulpevent_free(event);
 62         }
 63 
 64         while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
 65                 event = sctp_skb2event(skb);
 66                 sctp_ulpevent_free(event);
 67         }
 68 
 69         while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
 70                 event = sctp_skb2event(skb);
 71                 sctp_ulpevent_free(event);
 72         }
 73 }
 74 
 75 /* Dispose of a ulpqueue.  */
 76 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
 77 {
 78         sctp_ulpq_flush(ulpq);
 79 }
 80 
 81 /* Process an incoming DATA chunk.  */
 82 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
 83                         gfp_t gfp)
 84 {
 85         struct sk_buff_head temp;
 86         struct sctp_ulpevent *event;
 87         int event_eor = 0;
 88 
 89         /* Create an event from the incoming chunk. */
 90         event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
 91         if (!event)
 92                 return -ENOMEM;
 93 
 94         event->ssn = ntohs(chunk->subh.data_hdr->ssn);
 95         event->ppid = chunk->subh.data_hdr->ppid;
 96 
 97         /* Do reassembly if needed.  */
 98         event = sctp_ulpq_reasm(ulpq, event);
 99 
100         /* Do ordering if needed.  */
101         if (event) {
102                 /* Create a temporary list to collect chunks on.  */
103                 skb_queue_head_init(&temp);
104                 __skb_queue_tail(&temp, sctp_event2skb(event));
105 
106                 if (event->msg_flags & MSG_EOR)
107                         event = sctp_ulpq_order(ulpq, event);
108         }
109 
110         /* Send event to the ULP.  'event' is the sctp_ulpevent for
111          * very first SKB on the 'temp' list.
112          */
113         if (event) {
114                 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
115                 sctp_ulpq_tail_event(ulpq, &temp);
116         }
117 
118         return event_eor;
119 }
120 
121 /* Add a new event for propagation to the ULP.  */
122 /* Clear the partial delivery mode for this socket.   Note: This
123  * assumes that no association is currently in partial delivery mode.
124  */
125 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
126 {
127         struct sctp_sock *sp = sctp_sk(sk);
128 
129         if (atomic_dec_and_test(&sp->pd_mode)) {
130                 /* This means there are no other associations in PD, so
131                  * we can go ahead and clear out the lobby in one shot
132                  */
133                 if (!skb_queue_empty(&sp->pd_lobby)) {
134                         skb_queue_splice_tail_init(&sp->pd_lobby,
135                                                    &sk->sk_receive_queue);
136                         return 1;
137                 }
138         } else {
139                 /* There are other associations in PD, so we only need to
140                  * pull stuff out of the lobby that belongs to the
141                  * associations that is exiting PD (all of its notifications
142                  * are posted here).
143                  */
144                 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
145                         struct sk_buff *skb, *tmp;
146                         struct sctp_ulpevent *event;
147 
148                         sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
149                                 event = sctp_skb2event(skb);
150                                 if (event->asoc == asoc) {
151                                         __skb_unlink(skb, &sp->pd_lobby);
152                                         __skb_queue_tail(&sk->sk_receive_queue,
153                                                          skb);
154                                 }
155                         }
156                 }
157         }
158 
159         return 0;
160 }
161 
162 /* Set the pd_mode on the socket and ulpq */
163 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
164 {
165         struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
166 
167         atomic_inc(&sp->pd_mode);
168         ulpq->pd_mode = 1;
169 }
170 
171 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
172 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
173 {
174         ulpq->pd_mode = 0;
175         sctp_ulpq_reasm_drain(ulpq);
176         return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
177 }
178 
179 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
180 {
181         struct sock *sk = ulpq->asoc->base.sk;
182         struct sctp_sock *sp = sctp_sk(sk);
183         struct sctp_ulpevent *event;
184         struct sk_buff_head *queue;
185         struct sk_buff *skb;
186         int clear_pd = 0;
187 
188         skb = __skb_peek(skb_list);
189         event = sctp_skb2event(skb);
190 
191         /* If the socket is just going to throw this away, do not
192          * even try to deliver it.
193          */
194         if (sk->sk_shutdown & RCV_SHUTDOWN &&
195             (sk->sk_shutdown & SEND_SHUTDOWN ||
196              !sctp_ulpevent_is_notification(event)))
197                 goto out_free;
198 
199         if (!sctp_ulpevent_is_notification(event)) {
200                 sk_mark_napi_id(sk, skb);
201                 sk_incoming_cpu_update(sk);
202         }
203         /* Check if the user wishes to receive this event.  */
204         if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
205                 goto out_free;
206 
207         /* If we are in partial delivery mode, post to the lobby until
208          * partial delivery is cleared, unless, of course _this_ is
209          * the association the cause of the partial delivery.
210          */
211 
212         if (atomic_read(&sp->pd_mode) == 0) {
213                 queue = &sk->sk_receive_queue;
214         } else {
215                 if (ulpq->pd_mode) {
216                         /* If the association is in partial delivery, we
217                          * need to finish delivering the partially processed
218                          * packet before passing any other data.  This is
219                          * because we don't truly support stream interleaving.
220                          */
221                         if ((event->msg_flags & MSG_NOTIFICATION) ||
222                             (SCTP_DATA_NOT_FRAG ==
223                                     (event->msg_flags & SCTP_DATA_FRAG_MASK)))
224                                 queue = &sp->pd_lobby;
225                         else {
226                                 clear_pd = event->msg_flags & MSG_EOR;
227                                 queue = &sk->sk_receive_queue;
228                         }
229                 } else {
230                         /*
231                          * If fragment interleave is enabled, we
232                          * can queue this to the receive queue instead
233                          * of the lobby.
234                          */
235                         if (sp->frag_interleave)
236                                 queue = &sk->sk_receive_queue;
237                         else
238                                 queue = &sp->pd_lobby;
239                 }
240         }
241 
242         skb_queue_splice_tail_init(skb_list, queue);
243 
244         /* Did we just complete partial delivery and need to get
245          * rolling again?  Move pending data to the receive
246          * queue.
247          */
248         if (clear_pd)
249                 sctp_ulpq_clear_pd(ulpq);
250 
251         if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
252                 if (!sock_owned_by_user(sk))
253                         sp->data_ready_signalled = 1;
254                 sk->sk_data_ready(sk);
255         }
256         return 1;
257 
258 out_free:
259         sctp_queue_purge_ulpevents(skb_list);
260 
261         return 0;
262 }
263 
264 /* 2nd Level Abstractions */
265 
266 /* Helper function to store chunks that need to be reassembled.  */
267 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
268                                          struct sctp_ulpevent *event)
269 {
270         struct sk_buff *pos;
271         struct sctp_ulpevent *cevent;
272         __u32 tsn, ctsn;
273 
274         tsn = event->tsn;
275 
276         /* See if it belongs at the end. */
277         pos = skb_peek_tail(&ulpq->reasm);
278         if (!pos) {
279                 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
280                 return;
281         }
282 
283         /* Short circuit just dropping it at the end. */
284         cevent = sctp_skb2event(pos);
285         ctsn = cevent->tsn;
286         if (TSN_lt(ctsn, tsn)) {
287                 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
288                 return;
289         }
290 
291         /* Find the right place in this list. We store them by TSN.  */
292         skb_queue_walk(&ulpq->reasm, pos) {
293                 cevent = sctp_skb2event(pos);
294                 ctsn = cevent->tsn;
295 
296                 if (TSN_lt(tsn, ctsn))
297                         break;
298         }
299 
300         /* Insert before pos. */
301         __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
302 
303 }
304 
305 /* Helper function to return an event corresponding to the reassembled
306  * datagram.
307  * This routine creates a re-assembled skb given the first and last skb's
308  * as stored in the reassembly queue. The skb's may be non-linear if the sctp
309  * payload was fragmented on the way and ip had to reassemble them.
310  * We add the rest of skb's to the first skb's fraglist.
311  */
312 struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
313                                                   struct sk_buff_head *queue,
314                                                   struct sk_buff *f_frag,
315                                                   struct sk_buff *l_frag)
316 {
317         struct sk_buff *pos;
318         struct sk_buff *new = NULL;
319         struct sctp_ulpevent *event;
320         struct sk_buff *pnext, *last;
321         struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
322 
323         /* Store the pointer to the 2nd skb */
324         if (f_frag == l_frag)
325                 pos = NULL;
326         else
327                 pos = f_frag->next;
328 
329         /* Get the last skb in the f_frag's frag_list if present. */
330         for (last = list; list; last = list, list = list->next)
331                 ;
332 
333         /* Add the list of remaining fragments to the first fragments
334          * frag_list.
335          */
336         if (last)
337                 last->next = pos;
338         else {
339                 if (skb_cloned(f_frag)) {
340                         /* This is a cloned skb, we can't just modify
341                          * the frag_list.  We need a new skb to do that.
342                          * Instead of calling skb_unshare(), we'll do it
343                          * ourselves since we need to delay the free.
344                          */
345                         new = skb_copy(f_frag, GFP_ATOMIC);
346                         if (!new)
347                                 return NULL;    /* try again later */
348 
349                         sctp_skb_set_owner_r(new, f_frag->sk);
350 
351                         skb_shinfo(new)->frag_list = pos;
352                 } else
353                         skb_shinfo(f_frag)->frag_list = pos;
354         }
355 
356         /* Remove the first fragment from the reassembly queue.  */
357         __skb_unlink(f_frag, queue);
358 
359         /* if we did unshare, then free the old skb and re-assign */
360         if (new) {
361                 kfree_skb(f_frag);
362                 f_frag = new;
363         }
364 
365         while (pos) {
366 
367                 pnext = pos->next;
368 
369                 /* Update the len and data_len fields of the first fragment. */
370                 f_frag->len += pos->len;
371                 f_frag->data_len += pos->len;
372 
373                 /* Remove the fragment from the reassembly queue.  */
374                 __skb_unlink(pos, queue);
375 
376                 /* Break if we have reached the last fragment.  */
377                 if (pos == l_frag)
378                         break;
379                 pos->next = pnext;
380                 pos = pnext;
381         }
382 
383         event = sctp_skb2event(f_frag);
384         SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
385 
386         return event;
387 }
388 
389 
390 /* Helper function to check if an incoming chunk has filled up the last
391  * missing fragment in a SCTP datagram and return the corresponding event.
392  */
393 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
394 {
395         struct sk_buff *pos;
396         struct sctp_ulpevent *cevent;
397         struct sk_buff *first_frag = NULL;
398         __u32 ctsn, next_tsn;
399         struct sctp_ulpevent *retval = NULL;
400         struct sk_buff *pd_first = NULL;
401         struct sk_buff *pd_last = NULL;
402         size_t pd_len = 0;
403         struct sctp_association *asoc;
404         u32 pd_point;
405 
406         /* Initialized to 0 just to avoid compiler warning message.  Will
407          * never be used with this value. It is referenced only after it
408          * is set when we find the first fragment of a message.
409          */
410         next_tsn = 0;
411 
412         /* The chunks are held in the reasm queue sorted by TSN.
413          * Walk through the queue sequentially and look for a sequence of
414          * fragmented chunks that complete a datagram.
415          * 'first_frag' and next_tsn are reset when we find a chunk which
416          * is the first fragment of a datagram. Once these 2 fields are set
417          * we expect to find the remaining middle fragments and the last
418          * fragment in order. If not, first_frag is reset to NULL and we
419          * start the next pass when we find another first fragment.
420          *
421          * There is a potential to do partial delivery if user sets
422          * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
423          * to see if can do PD.
424          */
425         skb_queue_walk(&ulpq->reasm, pos) {
426                 cevent = sctp_skb2event(pos);
427                 ctsn = cevent->tsn;
428 
429                 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
430                 case SCTP_DATA_FIRST_FRAG:
431                         /* If this "FIRST_FRAG" is the first
432                          * element in the queue, then count it towards
433                          * possible PD.
434                          */
435                         if (skb_queue_is_first(&ulpq->reasm, pos)) {
436                             pd_first = pos;
437                             pd_last = pos;
438                             pd_len = pos->len;
439                         } else {
440                             pd_first = NULL;
441                             pd_last = NULL;
442                             pd_len = 0;
443                         }
444 
445                         first_frag = pos;
446                         next_tsn = ctsn + 1;
447                         break;
448 
449                 case SCTP_DATA_MIDDLE_FRAG:
450                         if ((first_frag) && (ctsn == next_tsn)) {
451                                 next_tsn++;
452                                 if (pd_first) {
453                                     pd_last = pos;
454                                     pd_len += pos->len;
455                                 }
456                         } else
457                                 first_frag = NULL;
458                         break;
459 
460                 case SCTP_DATA_LAST_FRAG:
461                         if (first_frag && (ctsn == next_tsn))
462                                 goto found;
463                         else
464                                 first_frag = NULL;
465                         break;
466                 }
467         }
468 
469         asoc = ulpq->asoc;
470         if (pd_first) {
471                 /* Make sure we can enter partial deliver.
472                  * We can trigger partial delivery only if framgent
473                  * interleave is set, or the socket is not already
474                  * in  partial delivery.
475                  */
476                 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
477                     atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
478                         goto done;
479 
480                 cevent = sctp_skb2event(pd_first);
481                 pd_point = sctp_sk(asoc->base.sk)->pd_point;
482                 if (pd_point && pd_point <= pd_len) {
483                         retval = sctp_make_reassembled_event(asoc->base.net,
484                                                              &ulpq->reasm,
485                                                              pd_first, pd_last);
486                         if (retval)
487                                 sctp_ulpq_set_pd(ulpq);
488                 }
489         }
490 done:
491         return retval;
492 found:
493         retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
494                                              &ulpq->reasm, first_frag, pos);
495         if (retval)
496                 retval->msg_flags |= MSG_EOR;
497         goto done;
498 }
499 
500 /* Retrieve the next set of fragments of a partial message. */
501 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
502 {
503         struct sk_buff *pos, *last_frag, *first_frag;
504         struct sctp_ulpevent *cevent;
505         __u32 ctsn, next_tsn;
506         int is_last;
507         struct sctp_ulpevent *retval;
508 
509         /* The chunks are held in the reasm queue sorted by TSN.
510          * Walk through the queue sequentially and look for the first
511          * sequence of fragmented chunks.
512          */
513 
514         if (skb_queue_empty(&ulpq->reasm))
515                 return NULL;
516 
517         last_frag = first_frag = NULL;
518         retval = NULL;
519         next_tsn = 0;
520         is_last = 0;
521 
522         skb_queue_walk(&ulpq->reasm, pos) {
523                 cevent = sctp_skb2event(pos);
524                 ctsn = cevent->tsn;
525 
526                 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
527                 case SCTP_DATA_FIRST_FRAG:
528                         if (!first_frag)
529                                 return NULL;
530                         goto done;
531                 case SCTP_DATA_MIDDLE_FRAG:
532                         if (!first_frag) {
533                                 first_frag = pos;
534                                 next_tsn = ctsn + 1;
535                                 last_frag = pos;
536                         } else if (next_tsn == ctsn) {
537                                 next_tsn++;
538                                 last_frag = pos;
539                         } else
540                                 goto done;
541                         break;
542                 case SCTP_DATA_LAST_FRAG:
543                         if (!first_frag)
544                                 first_frag = pos;
545                         else if (ctsn != next_tsn)
546                                 goto done;
547                         last_frag = pos;
548                         is_last = 1;
549                         goto done;
550                 default:
551                         return NULL;
552                 }
553         }
554 
555         /* We have the reassembled event. There is no need to look
556          * further.
557          */
558 done:
559         retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
560                                              first_frag, last_frag);
561         if (retval && is_last)
562                 retval->msg_flags |= MSG_EOR;
563 
564         return retval;
565 }
566 
567 
568 /* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
569  * need reassembling.
570  */
571 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
572                                                 struct sctp_ulpevent *event)
573 {
574         struct sctp_ulpevent *retval = NULL;
575 
576         /* Check if this is part of a fragmented message.  */
577         if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
578                 event->msg_flags |= MSG_EOR;
579                 return event;
580         }
581 
582         sctp_ulpq_store_reasm(ulpq, event);
583         if (!ulpq->pd_mode)
584                 retval = sctp_ulpq_retrieve_reassembled(ulpq);
585         else {
586                 __u32 ctsn, ctsnap;
587 
588                 /* Do not even bother unless this is the next tsn to
589                  * be delivered.
590                  */
591                 ctsn = event->tsn;
592                 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
593                 if (TSN_lte(ctsn, ctsnap))
594                         retval = sctp_ulpq_retrieve_partial(ulpq);
595         }
596 
597         return retval;
598 }
599 
600 /* Retrieve the first part (sequential fragments) for partial delivery.  */
601 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
602 {
603         struct sk_buff *pos, *last_frag, *first_frag;
604         struct sctp_ulpevent *cevent;
605         __u32 ctsn, next_tsn;
606         struct sctp_ulpevent *retval;
607 
608         /* The chunks are held in the reasm queue sorted by TSN.
609          * Walk through the queue sequentially and look for a sequence of
610          * fragmented chunks that start a datagram.
611          */
612 
613         if (skb_queue_empty(&ulpq->reasm))
614                 return NULL;
615 
616         last_frag = first_frag = NULL;
617         retval = NULL;
618         next_tsn = 0;
619 
620         skb_queue_walk(&ulpq->reasm, pos) {
621                 cevent = sctp_skb2event(pos);
622                 ctsn = cevent->tsn;
623 
624                 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
625                 case SCTP_DATA_FIRST_FRAG:
626                         if (!first_frag) {
627                                 first_frag = pos;
628                                 next_tsn = ctsn + 1;
629                                 last_frag = pos;
630                         } else
631                                 goto done;
632                         break;
633 
634                 case SCTP_DATA_MIDDLE_FRAG:
635                         if (!first_frag)
636                                 return NULL;
637                         if (ctsn == next_tsn) {
638                                 next_tsn++;
639                                 last_frag = pos;
640                         } else
641                                 goto done;
642                         break;
643 
644                 case SCTP_DATA_LAST_FRAG:
645                         if (!first_frag)
646                                 return NULL;
647                         else
648                                 goto done;
649                         break;
650 
651                 default:
652                         return NULL;
653                 }
654         }
655 
656         /* We have the reassembled event. There is no need to look
657          * further.
658          */
659 done:
660         retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
661                                              first_frag, last_frag);
662         return retval;
663 }
664 
665 /*
666  * Flush out stale fragments from the reassembly queue when processing
667  * a Forward TSN.
668  *
669  * RFC 3758, Section 3.6
670  *
671  * After receiving and processing a FORWARD TSN, the data receiver MUST
672  * take cautions in updating its re-assembly queue.  The receiver MUST
673  * remove any partially reassembled message, which is still missing one
674  * or more TSNs earlier than or equal to the new cumulative TSN point.
675  * In the event that the receiver has invoked the partial delivery API,
676  * a notification SHOULD also be generated to inform the upper layer API
677  * that the message being partially delivered will NOT be completed.
678  */
679 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
680 {
681         struct sk_buff *pos, *tmp;
682         struct sctp_ulpevent *event;
683         __u32 tsn;
684 
685         if (skb_queue_empty(&ulpq->reasm))
686                 return;
687 
688         skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
689                 event = sctp_skb2event(pos);
690                 tsn = event->tsn;
691 
692                 /* Since the entire message must be abandoned by the
693                  * sender (item A3 in Section 3.5, RFC 3758), we can
694                  * free all fragments on the list that are less then
695                  * or equal to ctsn_point
696                  */
697                 if (TSN_lte(tsn, fwd_tsn)) {
698                         __skb_unlink(pos, &ulpq->reasm);
699                         sctp_ulpevent_free(event);
700                 } else
701                         break;
702         }
703 }
704 
705 /*
706  * Drain the reassembly queue.  If we just cleared parted delivery, it
707  * is possible that the reassembly queue will contain already reassembled
708  * messages.  Retrieve any such messages and give them to the user.
709  */
710 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
711 {
712         struct sctp_ulpevent *event = NULL;
713 
714         if (skb_queue_empty(&ulpq->reasm))
715                 return;
716 
717         while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
718                 struct sk_buff_head temp;
719 
720                 skb_queue_head_init(&temp);
721                 __skb_queue_tail(&temp, sctp_event2skb(event));
722 
723                 /* Do ordering if needed.  */
724                 if (event->msg_flags & MSG_EOR)
725                         event = sctp_ulpq_order(ulpq, event);
726 
727                 /* Send event to the ULP.  'event' is the
728                  * sctp_ulpevent for  very first SKB on the  temp' list.
729                  */
730                 if (event)
731                         sctp_ulpq_tail_event(ulpq, &temp);
732         }
733 }
734 
735 
736 /* Helper function to gather skbs that have possibly become
737  * ordered by an incoming chunk.
738  */
739 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
740                                               struct sctp_ulpevent *event)
741 {
742         struct sk_buff_head *event_list;
743         struct sk_buff *pos, *tmp;
744         struct sctp_ulpevent *cevent;
745         struct sctp_stream *stream;
746         __u16 sid, csid, cssn;
747 
748         sid = event->stream;
749         stream  = &ulpq->asoc->stream;
750 
751         event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
752 
753         /* We are holding the chunks by stream, by SSN.  */
754         sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
755                 cevent = (struct sctp_ulpevent *) pos->cb;
756                 csid = cevent->stream;
757                 cssn = cevent->ssn;
758 
759                 /* Have we gone too far?  */
760                 if (csid > sid)
761                         break;
762 
763                 /* Have we not gone far enough?  */
764                 if (csid < sid)
765                         continue;
766 
767                 if (cssn != sctp_ssn_peek(stream, in, sid))
768                         break;
769 
770                 /* Found it, so mark in the stream. */
771                 sctp_ssn_next(stream, in, sid);
772 
773                 __skb_unlink(pos, &ulpq->lobby);
774 
775                 /* Attach all gathered skbs to the event.  */
776                 __skb_queue_tail(event_list, pos);
777         }
778 }
779 
780 /* Helper function to store chunks needing ordering.  */
781 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
782                                            struct sctp_ulpevent *event)
783 {
784         struct sk_buff *pos;
785         struct sctp_ulpevent *cevent;
786         __u16 sid, csid;
787         __u16 ssn, cssn;
788 
789         pos = skb_peek_tail(&ulpq->lobby);
790         if (!pos) {
791                 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
792                 return;
793         }
794 
795         sid = event->stream;
796         ssn = event->ssn;
797 
798         cevent = (struct sctp_ulpevent *) pos->cb;
799         csid = cevent->stream;
800         cssn = cevent->ssn;
801         if (sid > csid) {
802                 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
803                 return;
804         }
805 
806         if ((sid == csid) && SSN_lt(cssn, ssn)) {
807                 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
808                 return;
809         }
810 
811         /* Find the right place in this list.  We store them by
812          * stream ID and then by SSN.
813          */
814         skb_queue_walk(&ulpq->lobby, pos) {
815                 cevent = (struct sctp_ulpevent *) pos->cb;
816                 csid = cevent->stream;
817                 cssn = cevent->ssn;
818 
819                 if (csid > sid)
820                         break;
821                 if (csid == sid && SSN_lt(ssn, cssn))
822                         break;
823         }
824 
825 
826         /* Insert before pos. */
827         __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
828 }
829 
830 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
831                                              struct sctp_ulpevent *event)
832 {
833         __u16 sid, ssn;
834         struct sctp_stream *stream;
835 
836         /* Check if this message needs ordering.  */
837         if (event->msg_flags & SCTP_DATA_UNORDERED)
838                 return event;
839 
840         /* Note: The stream ID must be verified before this routine.  */
841         sid = event->stream;
842         ssn = event->ssn;
843         stream  = &ulpq->asoc->stream;
844 
845         /* Is this the expected SSN for this stream ID?  */
846         if (ssn != sctp_ssn_peek(stream, in, sid)) {
847                 /* We've received something out of order, so find where it
848                  * needs to be placed.  We order by stream and then by SSN.
849                  */
850                 sctp_ulpq_store_ordered(ulpq, event);
851                 return NULL;
852         }
853 
854         /* Mark that the next chunk has been found.  */
855         sctp_ssn_next(stream, in, sid);
856 
857         /* Go find any other chunks that were waiting for
858          * ordering.
859          */
860         sctp_ulpq_retrieve_ordered(ulpq, event);
861 
862         return event;
863 }
864 
865 /* Helper function to gather skbs that have possibly become
866  * ordered by forward tsn skipping their dependencies.
867  */
868 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
869 {
870         struct sk_buff *pos, *tmp;
871         struct sctp_ulpevent *cevent;
872         struct sctp_ulpevent *event;
873         struct sctp_stream *stream;
874         struct sk_buff_head temp;
875         struct sk_buff_head *lobby = &ulpq->lobby;
876         __u16 csid, cssn;
877 
878         stream = &ulpq->asoc->stream;
879 
880         /* We are holding the chunks by stream, by SSN.  */
881         skb_queue_head_init(&temp);
882         event = NULL;
883         sctp_skb_for_each(pos, lobby, tmp) {
884                 cevent = (struct sctp_ulpevent *) pos->cb;
885                 csid = cevent->stream;
886                 cssn = cevent->ssn;
887 
888                 /* Have we gone too far?  */
889                 if (csid > sid)
890                         break;
891 
892                 /* Have we not gone far enough?  */
893                 if (csid < sid)
894                         continue;
895 
896                 /* see if this ssn has been marked by skipping */
897                 if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
898                         break;
899 
900                 __skb_unlink(pos, lobby);
901                 if (!event)
902                         /* Create a temporary list to collect chunks on.  */
903                         event = sctp_skb2event(pos);
904 
905                 /* Attach all gathered skbs to the event.  */
906                 __skb_queue_tail(&temp, pos);
907         }
908 
909         /* If we didn't reap any data, see if the next expected SSN
910          * is next on the queue and if so, use that.
911          */
912         if (event == NULL && pos != (struct sk_buff *)lobby) {
913                 cevent = (struct sctp_ulpevent *) pos->cb;
914                 csid = cevent->stream;
915                 cssn = cevent->ssn;
916 
917                 if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
918                         sctp_ssn_next(stream, in, csid);
919                         __skb_unlink(pos, lobby);
920                         __skb_queue_tail(&temp, pos);
921                         event = sctp_skb2event(pos);
922                 }
923         }
924 
925         /* Send event to the ULP.  'event' is the sctp_ulpevent for
926          * very first SKB on the 'temp' list.
927          */
928         if (event) {
929                 /* see if we have more ordered that we can deliver */
930                 sctp_ulpq_retrieve_ordered(ulpq, event);
931                 sctp_ulpq_tail_event(ulpq, &temp);
932         }
933 }
934 
935 /* Skip over an SSN. This is used during the processing of
936  * Forwared TSN chunk to skip over the abandoned ordered data
937  */
938 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
939 {
940         struct sctp_stream *stream;
941 
942         /* Note: The stream ID must be verified before this routine.  */
943         stream  = &ulpq->asoc->stream;
944 
945         /* Is this an old SSN?  If so ignore. */
946         if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
947                 return;
948 
949         /* Mark that we are no longer expecting this SSN or lower. */
950         sctp_ssn_skip(stream, in, sid, ssn);
951 
952         /* Go find any other chunks that were waiting for
953          * ordering and deliver them if needed.
954          */
955         sctp_ulpq_reap_ordered(ulpq, sid);
956 }
957 
958 __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
959                             __u16 needed)
960 {
961         __u16 freed = 0;
962         __u32 tsn, last_tsn;
963         struct sk_buff *skb, *flist, *last;
964         struct sctp_ulpevent *event;
965         struct sctp_tsnmap *tsnmap;
966 
967         tsnmap = &ulpq->asoc->peer.tsn_map;
968 
969         while ((skb = skb_peek_tail(list)) != NULL) {
970                 event = sctp_skb2event(skb);
971                 tsn = event->tsn;
972 
973                 /* Don't renege below the Cumulative TSN ACK Point. */
974                 if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
975                         break;
976 
977                 /* Events in ordering queue may have multiple fragments
978                  * corresponding to additional TSNs.  Sum the total
979                  * freed space; find the last TSN.
980                  */
981                 freed += skb_headlen(skb);
982                 flist = skb_shinfo(skb)->frag_list;
983                 for (last = flist; flist; flist = flist->next) {
984                         last = flist;
985                         freed += skb_headlen(last);
986                 }
987                 if (last)
988                         last_tsn = sctp_skb2event(last)->tsn;
989                 else
990                         last_tsn = tsn;
991 
992                 /* Unlink the event, then renege all applicable TSNs. */
993                 __skb_unlink(skb, list);
994                 sctp_ulpevent_free(event);
995                 while (TSN_lte(tsn, last_tsn)) {
996                         sctp_tsnmap_renege(tsnmap, tsn);
997                         tsn++;
998                 }
999                 if (freed >= needed)
1000                         return freed;
1001         }
1002 
1003         return freed;
1004 }
1005 
1006 /* Renege 'needed' bytes from the ordering queue. */
1007 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1008 {
1009         return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1010 }
1011 
1012 /* Renege 'needed' bytes from the reassembly queue. */
1013 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1014 {
1015         return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1016 }
1017 
1018 /* Partial deliver the first message as there is pressure on rwnd. */
1019 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1020                                 gfp_t gfp)
1021 {
1022         struct sctp_ulpevent *event;
1023         struct sctp_association *asoc;
1024         struct sctp_sock *sp;
1025         __u32 ctsn;
1026         struct sk_buff *skb;
1027 
1028         asoc = ulpq->asoc;
1029         sp = sctp_sk(asoc->base.sk);
1030 
1031         /* If the association is already in Partial Delivery mode
1032          * we have nothing to do.
1033          */
1034         if (ulpq->pd_mode)
1035                 return;
1036 
1037         /* Data must be at or below the Cumulative TSN ACK Point to
1038          * start partial delivery.
1039          */
1040         skb = skb_peek(&asoc->ulpq.reasm);
1041         if (skb != NULL) {
1042                 ctsn = sctp_skb2event(skb)->tsn;
1043                 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1044                         return;
1045         }
1046 
1047         /* If the user enabled fragment interleave socket option,
1048          * multiple associations can enter partial delivery.
1049          * Otherwise, we can only enter partial delivery if the
1050          * socket is not in partial deliver mode.
1051          */
1052         if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1053                 /* Is partial delivery possible?  */
1054                 event = sctp_ulpq_retrieve_first(ulpq);
1055                 /* Send event to the ULP.   */
1056                 if (event) {
1057                         struct sk_buff_head temp;
1058 
1059                         skb_queue_head_init(&temp);
1060                         __skb_queue_tail(&temp, sctp_event2skb(event));
1061                         sctp_ulpq_tail_event(ulpq, &temp);
1062                         sctp_ulpq_set_pd(ulpq);
1063                         return;
1064                 }
1065         }
1066 }
1067 
1068 /* Renege some packets to make room for an incoming chunk.  */
1069 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1070                       gfp_t gfp)
1071 {
1072         struct sctp_association *asoc = ulpq->asoc;
1073         __u32 freed = 0;
1074         __u16 needed;
1075 
1076         needed = ntohs(chunk->chunk_hdr->length) -
1077                  sizeof(struct sctp_data_chunk);
1078 
1079         if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1080                 freed = sctp_ulpq_renege_order(ulpq, needed);
1081                 if (freed < needed)
1082                         freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1083         }
1084         /* If able to free enough room, accept this chunk. */
1085         if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
1086             freed >= needed) {
1087                 int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1088                 /*
1089                  * Enter partial delivery if chunk has not been
1090                  * delivered; otherwise, drain the reassembly queue.
1091                  */
1092                 if (retval <= 0)
1093                         sctp_ulpq_partial_delivery(ulpq, gfp);
1094                 else if (retval == 1)
1095                         sctp_ulpq_reasm_drain(ulpq);
1096         }
1097 }
1098 
1099 /* Notify the application if an association is aborted and in
1100  * partial delivery mode.  Send up any pending received messages.
1101  */
1102 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1103 {
1104         struct sctp_ulpevent *ev = NULL;
1105         struct sctp_sock *sp;
1106         struct sock *sk;
1107 
1108         if (!ulpq->pd_mode)
1109                 return;
1110 
1111         sk = ulpq->asoc->base.sk;
1112         sp = sctp_sk(sk);
1113         if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
1114                                        SCTP_PARTIAL_DELIVERY_EVENT))
1115                 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1116                                               SCTP_PARTIAL_DELIVERY_ABORTED,
1117                                               0, 0, 0, gfp);
1118         if (ev)
1119                 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1120 
1121         /* If there is data waiting, send it up the socket now. */
1122         if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
1123                 sp->data_ready_signalled = 1;
1124                 sk->sk_data_ready(sk);
1125         }
1126 }
1127 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php