FreeBSD kernel IPv4 code
sctp_indata.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
17 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD$");
37
38#include <netinet/sctp_os.h>
39#include <sys/proc.h>
40#include <netinet/sctp_var.h>
41#include <netinet/sctp_sysctl.h>
42#include <netinet/sctp_header.h>
43#include <netinet/sctp_pcb.h>
44#include <netinet/sctputil.h>
45#include <netinet/sctp_output.h>
46#include <netinet/sctp_uio.h>
47#include <netinet/sctp_auth.h>
48#include <netinet/sctp_timer.h>
49#include <netinet/sctp_asconf.h>
50#include <netinet/sctp_indata.h>
52#include <netinet/sctp_input.h>
53#include <netinet/sctp_crc32.h>
55/*
56 * NOTES: On the outbound side of things I need to check the sack timer to
57 * see if I should generate a sack into the chunk queue (if I have data to
58 * send that is and will be sending it .. for bundling.
59 *
60 * The callback in sctp_usrreq.c will get called when the socket is read from.
61 * This will cause sctp_service_queues() to get called on the top entry in
62 * the list.
63 */
64static uint32_t
66 struct sctp_stream_in *strm,
67 struct sctp_tcb *stcb,
68 struct sctp_association *asoc,
69 struct sctp_tmit_chunk *chk, int hold_rlock);
70
71void
72sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
73{
74 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
75}
76
77/* Calculate what the rwnd would be */
79sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
80{
81 uint32_t calc = 0;
82
83 /*
84 * This is really set wrong with respect to a 1-2-m socket. Since
85 * the sb_cc is the count that everyone as put up. When we re-write
86 * sctp_soreceive then we will fix this so that ONLY this
87 * associations data is taken into account.
88 */
89 if (stcb->sctp_socket == NULL) {
90 return (calc);
91 }
92
93 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
94 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
95 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
96 ("size_on_all_streams is %u", asoc->size_on_all_streams));
97 if (stcb->asoc.sb_cc == 0 &&
98 asoc->cnt_on_reasm_queue == 0 &&
99 asoc->cnt_on_all_streams == 0) {
100 /* Full rwnd granted */
102 return (calc);
103 }
104 /* get actual space */
105 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
106 /*
107 * take out what has NOT been put on socket queue and we yet hold
108 * for putting up.
109 */
110 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
111 asoc->cnt_on_reasm_queue * MSIZE));
112 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
113 asoc->cnt_on_all_streams * MSIZE));
114 if (calc == 0) {
115 /* out of space */
116 return (calc);
117 }
118
119 /* what is the overhead of all these rwnd's */
120 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
121 /*
122 * If the window gets too small due to ctrl-stuff, reduce it to 1,
123 * even it is 0. SWS engaged
124 */
125 if (calc < stcb->asoc.my_rwnd_control_len) {
126 calc = 1;
127 }
128 return (calc);
129}
130
131/*
132 * Build out our readq entry based on the incoming packet.
133 */
134struct sctp_queued_to_read *
136 struct sctp_nets *net,
137 uint32_t tsn, uint32_t ppid,
138 uint32_t context, uint16_t sid,
139 uint32_t mid, uint8_t flags,
140 struct mbuf *dm)
141{
142 struct sctp_queued_to_read *read_queue_e = NULL;
143
144 sctp_alloc_a_readq(stcb, read_queue_e);
145 if (read_queue_e == NULL) {
146 goto failed_build;
147 }
148 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
149 read_queue_e->sinfo_stream = sid;
150 read_queue_e->sinfo_flags = (flags << 8);
151 read_queue_e->sinfo_ppid = ppid;
152 read_queue_e->sinfo_context = context;
153 read_queue_e->sinfo_tsn = tsn;
154 read_queue_e->sinfo_cumtsn = tsn;
155 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
156 read_queue_e->mid = mid;
157 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
158 TAILQ_INIT(&read_queue_e->reasm);
159 read_queue_e->whoFrom = net;
160 atomic_add_int(&net->ref_count, 1);
161 read_queue_e->data = dm;
162 read_queue_e->stcb = stcb;
163 read_queue_e->port_from = stcb->rport;
165 read_queue_e->do_not_ref_stcb = 1;
166 }
167failed_build:
168 return (read_queue_e);
169}
170
171struct mbuf *
173{
174 struct sctp_extrcvinfo *seinfo;
175 struct sctp_sndrcvinfo *outinfo;
176 struct sctp_rcvinfo *rcvinfo;
177 struct sctp_nxtinfo *nxtinfo;
178 struct cmsghdr *cmh;
179 struct mbuf *ret;
180 int len;
181 int use_extended;
182 int provide_nxt;
183
187 /* user does not want any ancillary data */
188 return (NULL);
189 }
190
191 len = 0;
193 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
194 }
195 seinfo = (struct sctp_extrcvinfo *)sinfo;
198 provide_nxt = 1;
199 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
200 } else {
201 provide_nxt = 0;
202 }
205 use_extended = 1;
206 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
207 } else {
208 use_extended = 0;
209 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
210 }
211 } else {
212 use_extended = 0;
213 }
214
215 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
216 if (ret == NULL) {
217 /* No space */
218 return (ret);
219 }
220 SCTP_BUF_LEN(ret) = 0;
221
222 /* We need a CMSG header followed by the struct */
223 cmh = mtod(ret, struct cmsghdr *);
224 /*
225 * Make sure that there is no un-initialized padding between the
226 * cmsg header and cmsg data and after the cmsg data.
227 */
228 memset(cmh, 0, len);
230 cmh->cmsg_level = IPPROTO_SCTP;
231 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
232 cmh->cmsg_type = SCTP_RCVINFO;
233 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
234 rcvinfo->rcv_sid = sinfo->sinfo_stream;
235 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
236 rcvinfo->rcv_flags = sinfo->sinfo_flags;
237 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
238 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
239 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
240 rcvinfo->rcv_context = sinfo->sinfo_context;
241 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
242 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
243 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
244 }
245 if (provide_nxt) {
246 cmh->cmsg_level = IPPROTO_SCTP;
247 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
248 cmh->cmsg_type = SCTP_NXTINFO;
249 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
250 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
251 nxtinfo->nxt_flags = 0;
253 nxtinfo->nxt_flags |= SCTP_UNORDERED;
254 }
256 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
257 }
259 nxtinfo->nxt_flags |= SCTP_COMPLETE;
260 }
261 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
262 nxtinfo->nxt_length = seinfo->serinfo_next_length;
263 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
264 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
266 }
268 cmh->cmsg_level = IPPROTO_SCTP;
269 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
270 if (use_extended) {
271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
272 cmh->cmsg_type = SCTP_EXTRCV;
273 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
274 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
275 } else {
276 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
277 cmh->cmsg_type = SCTP_SNDRCV;
278 *outinfo = *sinfo;
279 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
280 }
281 }
282 return (ret);
283}
284
285static void
287{
288 uint32_t gap, i;
289 int in_r, in_nr;
290
291 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
292 return;
293 }
294 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
295 /*
296 * This tsn is behind the cum ack and thus we don't need to
297 * worry about it being moved from one to the other.
298 */
299 return;
300 }
302 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
303 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
304 KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__));
305 if (!in_nr) {
307 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
308 asoc->highest_tsn_inside_nr_map = tsn;
309 }
310 }
311 if (in_r) {
313 if (tsn == asoc->highest_tsn_inside_map) {
314 /* We must back down to see what the new highest is. */
315 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
317 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 asoc->highest_tsn_inside_map = i;
319 break;
320 }
321 }
322 if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) {
324 }
325 }
326 }
327}
328
329static int
331 struct sctp_association *asoc,
332 struct sctp_queued_to_read *control)
333{
334 struct sctp_queued_to_read *at;
335 struct sctp_readhead *q;
336 uint8_t flags, unordered;
337
338 flags = (control->sinfo_flags >> 8);
339 unordered = flags & SCTP_DATA_UNORDERED;
340 if (unordered) {
341 q = &strm->uno_inqueue;
342 if (asoc->idata_supported == 0) {
343 if (!TAILQ_EMPTY(q)) {
344 /*
345 * Only one stream can be here in old style
346 * -- abort
347 */
348 return (-1);
349 }
350 TAILQ_INSERT_TAIL(q, control, next_instrm);
351 control->on_strm_q = SCTP_ON_UNORDERED;
352 return (0);
353 }
354 } else {
355 q = &strm->inqueue;
356 }
357 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
358 control->end_added = 1;
359 control->first_frag_seen = 1;
360 control->last_frag_seen = 1;
361 }
362 if (TAILQ_EMPTY(q)) {
363 /* Empty queue */
364 TAILQ_INSERT_HEAD(q, control, next_instrm);
365 if (unordered) {
366 control->on_strm_q = SCTP_ON_UNORDERED;
367 } else {
368 control->on_strm_q = SCTP_ON_ORDERED;
369 }
370 return (0);
371 } else {
372 TAILQ_FOREACH(at, q, next_instrm) {
373 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
374 /*
375 * one in queue is bigger than the new one,
376 * insert before this one
377 */
378 TAILQ_INSERT_BEFORE(at, control, next_instrm);
379 if (unordered) {
380 control->on_strm_q = SCTP_ON_UNORDERED;
381 } else {
382 control->on_strm_q = SCTP_ON_ORDERED;
383 }
384 break;
385 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
386 /*
387 * Gak, He sent me a duplicate msg id
388 * number?? return -1 to abort.
389 */
390 return (-1);
391 } else {
392 if (TAILQ_NEXT(at, next_instrm) == NULL) {
393 /*
394 * We are at the end, insert it
395 * after this one
396 */
397 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
398 sctp_log_strm_del(control, at,
400 }
401 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
402 if (unordered) {
403 control->on_strm_q = SCTP_ON_UNORDERED;
404 } else {
405 control->on_strm_q = SCTP_ON_ORDERED;
406 }
407 break;
408 }
409 }
410 }
411 }
412 return (0);
413}
414
415static void
417 struct sctp_queued_to_read *control,
418 struct sctp_tmit_chunk *chk,
419 int *abort_flag, int opspot)
420{
421 char msg[SCTP_DIAG_INFO_LEN];
422 struct mbuf *oper;
423
424 if (stcb->asoc.idata_supported) {
425 SCTP_SNPRINTF(msg, sizeof(msg),
426 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
427 opspot,
428 control->fsn_included,
429 chk->rec.data.tsn,
430 chk->rec.data.sid,
431 chk->rec.data.fsn, chk->rec.data.mid);
432 } else {
433 SCTP_SNPRINTF(msg, sizeof(msg),
434 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
435 opspot,
436 control->fsn_included,
437 chk->rec.data.tsn,
438 chk->rec.data.sid,
439 chk->rec.data.fsn,
440 (uint16_t)chk->rec.data.mid);
441 }
443 sctp_m_freem(chk->data);
444 chk->data = NULL;
447 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED);
448 *abort_flag = 1;
449}
450
451static void
453{
454 /*
455 * The control could not be placed and must be cleaned.
456 */
457 struct sctp_tmit_chunk *chk, *nchk;
458
459 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
460 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
461 if (chk->data)
462 sctp_m_freem(chk->data);
463 chk->data = NULL;
465 }
467 if (control->data) {
468 sctp_m_freem(control->data);
469 control->data = NULL;
470 }
471 sctp_free_a_readq(stcb, control);
472}
473
474/*
475 * Queue the chunk either right into the socket buffer if it is the next one
476 * to go OR put it in the correct place in the delivery queue. If we do
477 * append to the so_buf, keep doing so until we are out of order as
478 * long as the control's entered are non-fragmented.
479 */
480static void
482 struct sctp_association *asoc,
483 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
484{
485 /*
486 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
487 * all the data in one stream this could happen quite rapidly. One
488 * could use the TSN to keep track of things, but this scheme breaks
489 * down in the other type of stream usage that could occur. Send a
490 * single msg to stream 0, send 4Billion messages to stream 1, now
491 * send a message to stream 0. You have a situation where the TSN
492 * has wrapped but not in the stream. Is this worth worrying about
493 * or should we just change our queue sort at the bottom to be by
494 * TSN.
495 *
496 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
497 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
498 * assignment this could happen... and I don't see how this would be
499 * a violation. So for now I am undecided an will leave the sort by
500 * SSN alone. Maybe a hybred approach is the answer
501 *
502 */
503 struct sctp_queued_to_read *at;
504 int queue_needed;
505 uint32_t nxt_todel;
506 struct mbuf *op_err;
507 struct sctp_stream_in *strm;
508 char msg[SCTP_DIAG_INFO_LEN];
509
510 strm = &asoc->strmin[control->sinfo_stream];
511 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
513 }
514 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
515 /* The incoming sseq is behind where we last delivered? */
516 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
517 strm->last_mid_delivered, control->mid);
518 /*
519 * throw it in the stream so it gets cleaned up in
520 * association destruction
521 */
522 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
523 if (asoc->idata_supported) {
524 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
525 strm->last_mid_delivered, control->sinfo_tsn,
526 control->sinfo_stream, control->mid);
527 } else {
528 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
530 control->sinfo_tsn,
531 control->sinfo_stream,
532 (uint16_t)control->mid);
533 }
536 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
537 *abort_flag = 1;
538 return;
539 }
540 queue_needed = 1;
541 asoc->size_on_all_streams += control->length;
543 nxt_todel = strm->last_mid_delivered + 1;
544 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
545 /* can be delivered right away? */
546 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
548 }
549 /* EY it wont be queued if it could be delivered directly */
550 queue_needed = 0;
551 if (asoc->size_on_all_streams >= control->length) {
552 asoc->size_on_all_streams -= control->length;
553 } else {
554#ifdef INVARIANTS
555 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
556#else
557 asoc->size_on_all_streams = 0;
558#endif
559 }
561 strm->last_mid_delivered++;
562 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
563 sctp_add_to_readq(stcb->sctp_ep, stcb,
564 control,
565 &stcb->sctp_socket->so_rcv, 1,
567 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
568 /* all delivered */
569 nxt_todel = strm->last_mid_delivered + 1;
570 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
571 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
572 if (control->on_strm_q == SCTP_ON_ORDERED) {
573 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
574 if (asoc->size_on_all_streams >= control->length) {
575 asoc->size_on_all_streams -= control->length;
576 } else {
577#ifdef INVARIANTS
578 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
579#else
580 asoc->size_on_all_streams = 0;
581#endif
582 }
584#ifdef INVARIANTS
585 } else {
586 panic("Huh control: %p is on_strm_q: %d",
587 control, control->on_strm_q);
588#endif
589 }
590 control->on_strm_q = 0;
591 strm->last_mid_delivered++;
592 /*
593 * We ignore the return of deliver_data here
594 * since we always can hold the chunk on the
595 * d-queue. And we have a finite number that
596 * can be delivered from the strq.
597 */
598 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
599 sctp_log_strm_del(control, NULL,
601 }
602 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
603 sctp_add_to_readq(stcb->sctp_ep, stcb,
604 control,
605 &stcb->sctp_socket->so_rcv, 1,
608 continue;
609 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
610 *need_reasm = 1;
611 }
612 break;
613 }
614 }
615 if (queue_needed) {
616 /*
617 * Ok, we did not deliver this guy, find the correct place
618 * to put it on the queue.
619 */
620 if (sctp_place_control_in_stream(strm, asoc, control)) {
621 SCTP_SNPRINTF(msg, sizeof(msg),
622 "Queue to str MID: %u duplicate", control->mid);
623 sctp_clean_up_control(stcb, control);
626 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
627 *abort_flag = 1;
628 }
629 }
630}
631
632static void
634{
635 struct mbuf *m, *prev = NULL;
636 struct sctp_tcb *stcb;
637
638 stcb = control->stcb;
639 control->held_length = 0;
640 control->length = 0;
641 m = control->data;
642 while (m) {
643 if (SCTP_BUF_LEN(m) == 0) {
644 /* Skip mbufs with NO length */
645 if (prev == NULL) {
646 /* First one */
647 control->data = sctp_m_free(m);
648 m = control->data;
649 } else {
650 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
651 m = SCTP_BUF_NEXT(prev);
652 }
653 if (m == NULL) {
654 control->tail_mbuf = prev;
655 }
656 continue;
657 }
658 prev = m;
659 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
660 if (control->on_read_q) {
661 /*
662 * On read queue so we must increment the SB stuff,
663 * we assume caller has done any locks of SB.
664 */
665 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
666 }
667 m = SCTP_BUF_NEXT(m);
668 }
669 if (prev) {
670 control->tail_mbuf = prev;
671 }
672}
673
674static void
675sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
676{
677 struct mbuf *prev = NULL;
678 struct sctp_tcb *stcb;
679
680 stcb = control->stcb;
681 if (stcb == NULL) {
682#ifdef INVARIANTS
683 panic("Control broken");
684#else
685 return;
686#endif
687 }
688 if (control->tail_mbuf == NULL) {
689 /* TSNH */
690 sctp_m_freem(control->data);
691 control->data = m;
693 return;
694 }
695 control->tail_mbuf->m_next = m;
696 while (m) {
697 if (SCTP_BUF_LEN(m) == 0) {
698 /* Skip mbufs with NO length */
699 if (prev == NULL) {
700 /* First one */
701 control->tail_mbuf->m_next = sctp_m_free(m);
702 m = control->tail_mbuf->m_next;
703 } else {
704 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
705 m = SCTP_BUF_NEXT(prev);
706 }
707 if (m == NULL) {
708 control->tail_mbuf = prev;
709 }
710 continue;
711 }
712 prev = m;
713 if (control->on_read_q) {
714 /*
715 * On read queue so we must increment the SB stuff,
716 * we assume caller has done any locks of SB.
717 */
718 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
719 }
720 *added += SCTP_BUF_LEN(m);
721 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
722 m = SCTP_BUF_NEXT(m);
723 }
724 if (prev) {
725 control->tail_mbuf = prev;
726 }
727}
728
729static void
731{
732 memset(nc, 0, sizeof(struct sctp_queued_to_read));
733 nc->sinfo_stream = control->sinfo_stream;
734 nc->mid = control->mid;
735 TAILQ_INIT(&nc->reasm);
736 nc->top_fsn = control->top_fsn;
737 nc->mid = control->mid;
738 nc->sinfo_flags = control->sinfo_flags;
739 nc->sinfo_ppid = control->sinfo_ppid;
740 nc->sinfo_context = control->sinfo_context;
741 nc->fsn_included = 0xffffffff;
742 nc->sinfo_tsn = control->sinfo_tsn;
743 nc->sinfo_cumtsn = control->sinfo_cumtsn;
744 nc->sinfo_assoc_id = control->sinfo_assoc_id;
745 nc->whoFrom = control->whoFrom;
746 atomic_add_int(&nc->whoFrom->ref_count, 1);
747 nc->stcb = control->stcb;
748 nc->port_from = control->port_from;
749 nc->do_not_ref_stcb = control->do_not_ref_stcb;
750}
751
752static void
754 struct sctp_inpcb *inp, uint32_t tsn)
755{
756 control->fsn_included = tsn;
757 if (control->on_read_q) {
758 /*
759 * We have to purge it from there, hopefully this will work
760 * :-)
761 */
762 TAILQ_REMOVE(&inp->read_queue, control, next);
763 control->on_read_q = 0;
764 }
765}
766
767static int
769 struct sctp_association *asoc,
770 struct sctp_stream_in *strm,
771 struct sctp_queued_to_read *control,
772 uint32_t pd_point,
773 int inp_read_lock_held)
774{
775 /*
776 * Special handling for the old un-ordered data chunk. All the
777 * chunks/TSN's go to mid 0. So we have to do the old style watching
778 * to see if we have it all. If you return one, no other control
779 * entries on the un-ordered queue will be looked at. In theory
780 * there should be no others entries in reality, unless the guy is
781 * sending both unordered NDATA and unordered DATA...
782 */
783 struct sctp_tmit_chunk *chk, *lchk, *tchk;
784 uint32_t fsn;
785 struct sctp_queued_to_read *nc;
786 int cnt_added;
787
788 if (control->first_frag_seen == 0) {
789 /* Nothing we can do, we have not seen the first piece yet */
790 return (1);
791 }
792 /* Collapse any we can */
793 cnt_added = 0;
794restart:
795 fsn = control->fsn_included + 1;
796 /* Now what can we add? */
797 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
798 if (chk->rec.data.fsn == fsn) {
799 /* Ok lets add it */
801 if (nc == NULL) {
802 break;
803 }
804 memset(nc, 0, sizeof(struct sctp_queued_to_read));
805 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
806 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held);
807 fsn++;
808 cnt_added++;
809 chk = NULL;
810 if (control->end_added) {
811 /* We are done */
812 if (!TAILQ_EMPTY(&control->reasm)) {
813 /*
814 * Ok we have to move anything left
815 * on the control queue to a new
816 * control.
817 */
819 tchk = TAILQ_FIRST(&control->reasm);
821 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
822 if (asoc->size_on_reasm_queue >= tchk->send_size) {
823 asoc->size_on_reasm_queue -= tchk->send_size;
824 } else {
825#ifdef INVARIANTS
826 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
827#else
828 asoc->size_on_reasm_queue = 0;
829#endif
830 }
832 nc->first_frag_seen = 1;
833 nc->fsn_included = tchk->rec.data.fsn;
834 nc->data = tchk->data;
835 nc->sinfo_ppid = tchk->rec.data.ppid;
836 nc->sinfo_tsn = tchk->rec.data.tsn;
837 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
838 tchk->data = NULL;
841 tchk = TAILQ_FIRST(&control->reasm);
842 }
843 /* Spin the rest onto the queue */
844 while (tchk) {
845 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
846 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
847 tchk = TAILQ_FIRST(&control->reasm);
848 }
849 /*
850 * Now lets add it to the queue
851 * after removing control
852 */
853 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
855 if (control->on_strm_q) {
856 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
857 control->on_strm_q = 0;
858 }
859 }
860 if (control->pdapi_started) {
861 strm->pd_api_started = 0;
862 control->pdapi_started = 0;
863 }
864 if (control->on_strm_q) {
865 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
866 control->on_strm_q = 0;
867 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
868 }
869 if (control->on_read_q == 0) {
871 &stcb->sctp_socket->so_rcv, control->end_added,
872 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
873 }
875 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
876 /*
877 * Switch to the new guy and
878 * continue
879 */
880 control = nc;
881 goto restart;
882 } else {
883 if (nc->on_strm_q == 0) {
885 }
886 }
887 return (1);
888 } else {
890 }
891 } else {
892 /* Can't add more */
893 break;
894 }
895 }
896 if (cnt_added && strm->pd_api_started) {
898 }
899 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
900 strm->pd_api_started = 1;
901 control->pdapi_started = 1;
903 &stcb->sctp_socket->so_rcv, control->end_added,
904 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
906 return (0);
907 } else {
908 return (1);
909 }
910}
911
912static void
914 struct sctp_association *asoc,
915 struct sctp_queued_to_read *control,
916 struct sctp_tmit_chunk *chk,
917 int *abort_flag)
918{
919 struct sctp_tmit_chunk *at;
920 int inserted;
921
922 /*
923 * Here we need to place the chunk into the control structure sorted
924 * in the correct order.
925 */
927 /* Its the very first one. */
929 "chunk is a first fsn: %u becomes fsn_included\n",
930 chk->rec.data.fsn);
931 at = TAILQ_FIRST(&control->reasm);
932 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
933 /*
934 * The first chunk in the reassembly is a smaller
935 * TSN than this one, even though this has a first,
936 * it must be from a subsequent msg.
937 */
938 goto place_chunk;
939 }
940 if (control->first_frag_seen) {
941 /*
942 * In old un-ordered we can reassembly on one
943 * control multiple messages. As long as the next
944 * FIRST is greater then the old first (TSN i.e. FSN
945 * wise)
946 */
947 struct mbuf *tdata;
948 uint32_t tmp;
949
950 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
951 /*
952 * Easy way the start of a new guy beyond
953 * the lowest
954 */
955 goto place_chunk;
956 }
957 if ((chk->rec.data.fsn == control->fsn_included) ||
958 (control->pdapi_started)) {
959 /*
960 * Ok this should not happen, if it does we
961 * started the pd-api on the higher TSN
962 * (since the equals part is a TSN failure
963 * it must be that).
964 *
965 * We are completly hosed in that case since
966 * I have no way to recover. This really
967 * will only happen if we can get more TSN's
968 * higher before the pd-api-point.
969 */
970 sctp_abort_in_reasm(stcb, control, chk,
971 abort_flag,
973
974 return;
975 }
976 /*
977 * Ok we have two firsts and the one we just got is
978 * smaller than the one we previously placed.. yuck!
979 * We must swap them out.
980 */
981 /* swap the mbufs */
982 tdata = control->data;
983 control->data = chk->data;
984 chk->data = tdata;
985 /* Save the lengths */
986 chk->send_size = control->length;
987 /* Recompute length of control and tail pointer */
989 /* Fix the FSN included */
990 tmp = control->fsn_included;
991 control->fsn_included = chk->rec.data.fsn;
992 chk->rec.data.fsn = tmp;
993 /* Fix the TSN included */
994 tmp = control->sinfo_tsn;
995 control->sinfo_tsn = chk->rec.data.tsn;
996 chk->rec.data.tsn = tmp;
997 /* Fix the PPID included */
998 tmp = control->sinfo_ppid;
999 control->sinfo_ppid = chk->rec.data.ppid;
1000 chk->rec.data.ppid = tmp;
1001 /* Fix tail pointer */
1002 goto place_chunk;
1003 }
1004 control->first_frag_seen = 1;
1005 control->fsn_included = chk->rec.data.fsn;
1006 control->top_fsn = chk->rec.data.fsn;
1007 control->sinfo_tsn = chk->rec.data.tsn;
1008 control->sinfo_ppid = chk->rec.data.ppid;
1009 control->data = chk->data;
1010 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1011 chk->data = NULL;
1013 sctp_setup_tail_pointer(control);
1014 return;
1015 }
1016place_chunk:
1017 inserted = 0;
1018 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1019 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1020 /*
1021 * This one in queue is bigger than the new one,
1022 * insert the new one before at.
1023 */
1024 asoc->size_on_reasm_queue += chk->send_size;
1026 inserted = 1;
1027 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1028 break;
1029 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1030 /*
1031 * They sent a duplicate fsn number. This really
1032 * should not happen since the FSN is a TSN and it
1033 * should have been dropped earlier.
1034 */
1035 sctp_abort_in_reasm(stcb, control, chk,
1036 abort_flag,
1038 return;
1039 }
1040 }
1041 if (inserted == 0) {
1042 /* Its at the end */
1043 asoc->size_on_reasm_queue += chk->send_size;
1045 control->top_fsn = chk->rec.data.fsn;
1046 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1047 }
1048}
1049
1050static int
1052 struct sctp_stream_in *strm, int inp_read_lock_held)
1053{
1054 /*
1055 * Given a stream, strm, see if any of the SSN's on it that are
1056 * fragmented are ready to deliver. If so go ahead and place them on
1057 * the read queue. In so placing if we have hit the end, then we
1058 * need to remove them from the stream's queue.
1059 */
1060 struct sctp_queued_to_read *control, *nctl = NULL;
1061 uint32_t next_to_del;
1062 uint32_t pd_point;
1063 int ret = 0;
1064
1065 if (stcb->sctp_socket) {
1068 } else {
1069 pd_point = stcb->sctp_ep->partial_delivery_point;
1070 }
1071 control = TAILQ_FIRST(&strm->uno_inqueue);
1072
1073 if ((control != NULL) &&
1074 (asoc->idata_supported == 0)) {
1075 /* Special handling needed for "old" data format */
1076 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1077 goto done_un;
1078 }
1079 }
1080 if (strm->pd_api_started) {
1081 /* Can't add more */
1082 return (0);
1083 }
1084 while (control) {
1085 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1086 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1087 nctl = TAILQ_NEXT(control, next_instrm);
1088 if (control->end_added) {
1089 /* We just put the last bit on */
1090 if (control->on_strm_q) {
1091#ifdef INVARIANTS
1092 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1093 panic("Huh control: %p on_q: %d -- not unordered?",
1094 control, control->on_strm_q);
1095 }
1096#endif
1097 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1098 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1099 if (asoc->size_on_all_streams >= control->length) {
1100 asoc->size_on_all_streams -= control->length;
1101 } else {
1102#ifdef INVARIANTS
1103 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1104#else
1105 asoc->size_on_all_streams = 0;
1106#endif
1107 }
1109 control->on_strm_q = 0;
1110 }
1111 if (control->on_read_q == 0) {
1113 control,
1114 &stcb->sctp_socket->so_rcv, control->end_added,
1115 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1116 }
1117 } else {
1118 /* Can we do a PD-API for this un-ordered guy? */
1119 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1120 strm->pd_api_started = 1;
1121 control->pdapi_started = 1;
1123 control,
1124 &stcb->sctp_socket->so_rcv, control->end_added,
1125 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1126
1127 break;
1128 }
1129 }
1130 control = nctl;
1131 }
1132done_un:
1133 control = TAILQ_FIRST(&strm->inqueue);
1134 if (strm->pd_api_started) {
1135 /* Can't add more */
1136 return (0);
1137 }
1138 if (control == NULL) {
1139 return (ret);
1140 }
1141 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1142 /*
1143 * Ok the guy at the top was being partially delivered
1144 * completed, so we remove it. Note the pd_api flag was
1145 * taken off when the chunk was merged on in
1146 * sctp_queue_data_for_reasm below.
1147 */
1148 nctl = TAILQ_NEXT(control, next_instrm);
1150 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1151 control, control->end_added, control->mid,
1152 control->top_fsn, control->fsn_included,
1153 strm->last_mid_delivered);
1154 if (control->end_added) {
1155 if (control->on_strm_q) {
1156#ifdef INVARIANTS
1157 if (control->on_strm_q != SCTP_ON_ORDERED) {
1158 panic("Huh control: %p on_q: %d -- not ordered?",
1159 control, control->on_strm_q);
1160 }
1161#endif
1162 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1163 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1164 if (asoc->size_on_all_streams >= control->length) {
1165 asoc->size_on_all_streams -= control->length;
1166 } else {
1167#ifdef INVARIANTS
1168 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1169#else
1170 asoc->size_on_all_streams = 0;
1171#endif
1172 }
1174 control->on_strm_q = 0;
1175 }
1176 if (strm->pd_api_started && control->pdapi_started) {
1177 control->pdapi_started = 0;
1178 strm->pd_api_started = 0;
1179 }
1180 if (control->on_read_q == 0) {
1182 control,
1183 &stcb->sctp_socket->so_rcv, control->end_added,
1184 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1185 }
1186 control = nctl;
1187 }
1188 }
1189 if (strm->pd_api_started) {
1190 /*
1191 * Can't add more must have gotten an un-ordered above being
1192 * partially delivered.
1193 */
1194 return (0);
1195 }
1196deliver_more:
1197 next_to_del = strm->last_mid_delivered + 1;
1198 if (control) {
1200 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1201 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1202 next_to_del);
1203 nctl = TAILQ_NEXT(control, next_instrm);
1204 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1205 (control->first_frag_seen)) {
1206 int done;
1207
1208 /* Ok we can deliver it onto the stream. */
1209 if (control->end_added) {
1210 /* We are done with it afterwards */
1211 if (control->on_strm_q) {
1212#ifdef INVARIANTS
1213 if (control->on_strm_q != SCTP_ON_ORDERED) {
1214 panic("Huh control: %p on_q: %d -- not ordered?",
1215 control, control->on_strm_q);
1216 }
1217#endif
1218 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1219 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1220 if (asoc->size_on_all_streams >= control->length) {
1221 asoc->size_on_all_streams -= control->length;
1222 } else {
1223#ifdef INVARIANTS
1224 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1225#else
1226 asoc->size_on_all_streams = 0;
1227#endif
1228 }
1230 control->on_strm_q = 0;
1231 }
1232 ret++;
1233 }
1234 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1235 /*
1236 * A singleton now slipping through - mark
1237 * it non-revokable too
1238 */
1239 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1240 } else if (control->end_added == 0) {
1241 /*
1242 * Check if we can defer adding until its
1243 * all there
1244 */
1245 if ((control->length < pd_point) || (strm->pd_api_started)) {
1246 /*
1247 * Don't need it or cannot add more
1248 * (one being delivered that way)
1249 */
1250 goto out;
1251 }
1252 }
1253 done = (control->end_added) && (control->last_frag_seen);
1254 if (control->on_read_q == 0) {
1255 if (!done) {
1256 if (asoc->size_on_all_streams >= control->length) {
1257 asoc->size_on_all_streams -= control->length;
1258 } else {
1259#ifdef INVARIANTS
1260 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1261#else
1262 asoc->size_on_all_streams = 0;
1263#endif
1264 }
1265 strm->pd_api_started = 1;
1266 control->pdapi_started = 1;
1267 }
1269 control,
1270 &stcb->sctp_socket->so_rcv, control->end_added,
1271 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1272 }
1273 strm->last_mid_delivered = next_to_del;
1274 if (done) {
1275 control = nctl;
1276 goto deliver_more;
1277 }
1278 }
1279 }
1280out:
1281 return (ret);
1282}
1283
1286 struct sctp_stream_in *strm,
1287 struct sctp_tcb *stcb, struct sctp_association *asoc,
1288 struct sctp_tmit_chunk *chk, int hold_rlock)
1289{
1290 /*
1291 * Given a control and a chunk, merge the data from the chk onto the
1292 * control and free up the chunk resources.
1293 */
1294 uint32_t added = 0;
1295 int i_locked = 0;
1296
1297 if (control->on_read_q && (hold_rlock == 0)) {
1298 /*
1299 * Its being pd-api'd so we must do some locks.
1300 */
1302 i_locked = 1;
1303 }
1304 if (control->data == NULL) {
1305 control->data = chk->data;
1306 sctp_setup_tail_pointer(control);
1307 } else {
1308 sctp_add_to_tail_pointer(control, chk->data, &added);
1309 }
1310 control->fsn_included = chk->rec.data.fsn;
1311 asoc->size_on_reasm_queue -= chk->send_size;
1313 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1314 chk->data = NULL;
1316 control->first_frag_seen = 1;
1317 control->sinfo_tsn = chk->rec.data.tsn;
1318 control->sinfo_ppid = chk->rec.data.ppid;
1319 }
1321 /* Its complete */
1322 if ((control->on_strm_q) && (control->on_read_q)) {
1323 if (control->pdapi_started) {
1324 control->pdapi_started = 0;
1325 strm->pd_api_started = 0;
1326 }
1327 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1328 /* Unordered */
1329 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1330 control->on_strm_q = 0;
1331 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1332 /* Ordered */
1333 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1334 /*
1335 * Don't need to decrement
1336 * size_on_all_streams, since control is on
1337 * the read queue.
1338 */
1340 control->on_strm_q = 0;
1341#ifdef INVARIANTS
1342 } else if (control->on_strm_q) {
1343 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1344 control->on_strm_q);
1345#endif
1346 }
1347 }
1348 control->end_added = 1;
1349 control->last_frag_seen = 1;
1350 }
1351 if (i_locked) {
1353 }
1355 return (added);
1356}
1357
1358/*
1359 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1360 * queue, see if anthing can be delivered. If so pull it off (or as much as
1361 * we can. If we run out of space then we must dump what we can and set the
1362 * appropriate flag to say we queued what we could.
1363 */
1364static void
1366 struct sctp_queued_to_read *control,
1367 struct sctp_tmit_chunk *chk,
1368 int created_control,
1369 int *abort_flag, uint32_t tsn)
1370{
1371 uint32_t next_fsn;
1372 struct sctp_tmit_chunk *at, *nat;
1373 struct sctp_stream_in *strm;
1374 int do_wakeup, unordered;
1375 uint32_t lenadded;
1376
1377 strm = &asoc->strmin[control->sinfo_stream];
1378 /*
1379 * For old un-ordered data chunks.
1380 */
1381 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1382 unordered = 1;
1383 } else {
1384 unordered = 0;
1385 }
1386 /* Must be added to the stream-in queue */
1387 if (created_control) {
1388 if ((unordered == 0) || (asoc->idata_supported)) {
1390 }
1391 if (sctp_place_control_in_stream(strm, asoc, control)) {
1392 /* Duplicate SSN? */
1393 sctp_abort_in_reasm(stcb, control, chk,
1394 abort_flag,
1396 sctp_clean_up_control(stcb, control);
1397 return;
1398 }
1399 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1400 /*
1401 * Ok we created this control and now lets validate
1402 * that its legal i.e. there is a B bit set, if not
1403 * and we have up to the cum-ack then its invalid.
1404 */
1405 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1406 sctp_abort_in_reasm(stcb, control, chk,
1407 abort_flag,
1409 return;
1410 }
1411 }
1412 }
1413 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1414 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1415 return;
1416 }
1417 /*
1418 * Ok we must queue the chunk into the reasembly portion: o if its
1419 * the first it goes to the control mbuf. o if its not first but the
1420 * next in sequence it goes to the control, and each succeeding one
1421 * in order also goes. o if its not in order we place it on the list
1422 * in its place.
1423 */
1425 /* Its the very first one. */
1427 "chunk is a first fsn: %u becomes fsn_included\n",
1428 chk->rec.data.fsn);
1429 if (control->first_frag_seen) {
1430 /*
1431 * Error on senders part, they either sent us two
1432 * data chunks with FIRST, or they sent two
1433 * un-ordered chunks that were fragmented at the
1434 * same time in the same stream.
1435 */
1436 sctp_abort_in_reasm(stcb, control, chk,
1437 abort_flag,
1439 return;
1440 }
1441 control->first_frag_seen = 1;
1442 control->sinfo_ppid = chk->rec.data.ppid;
1443 control->sinfo_tsn = chk->rec.data.tsn;
1444 control->fsn_included = chk->rec.data.fsn;
1445 control->data = chk->data;
1446 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1447 chk->data = NULL;
1449 sctp_setup_tail_pointer(control);
1450 asoc->size_on_all_streams += control->length;
1451 } else {
1452 /* Place the chunk in our list */
1453 int inserted = 0;
1454
1455 if (control->last_frag_seen == 0) {
1456 /* Still willing to raise highest FSN seen */
1457 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1459 "We have a new top_fsn: %u\n",
1460 chk->rec.data.fsn);
1461 control->top_fsn = chk->rec.data.fsn;
1462 }
1465 "The last fsn is now in place fsn: %u\n",
1466 chk->rec.data.fsn);
1467 control->last_frag_seen = 1;
1468 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1470 "New fsn: %u is not at top_fsn: %u -- abort\n",
1471 chk->rec.data.fsn,
1472 control->top_fsn);
1473 sctp_abort_in_reasm(stcb, control, chk,
1474 abort_flag,
1476 return;
1477 }
1478 }
1479 if (asoc->idata_supported || control->first_frag_seen) {
1480 /*
1481 * For IDATA we always check since we know
1482 * that the first fragment is 0. For old
1483 * DATA we have to receive the first before
1484 * we know the first FSN (which is the TSN).
1485 */
1486 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1487 /*
1488 * We have already delivered up to
1489 * this so its a dup
1490 */
1491 sctp_abort_in_reasm(stcb, control, chk,
1492 abort_flag,
1494 return;
1495 }
1496 }
1497 } else {
1499 /* Second last? huh? */
1501 "Duplicate last fsn: %u (top: %u) -- abort\n",
1502 chk->rec.data.fsn, control->top_fsn);
1503 sctp_abort_in_reasm(stcb, control,
1504 chk, abort_flag,
1506 return;
1507 }
1508 if (asoc->idata_supported || control->first_frag_seen) {
1509 /*
1510 * For IDATA we always check since we know
1511 * that the first fragment is 0. For old
1512 * DATA we have to receive the first before
1513 * we know the first FSN (which is the TSN).
1514 */
1515
1516 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1517 /*
1518 * We have already delivered up to
1519 * this so its a dup
1520 */
1522 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1523 chk->rec.data.fsn, control->fsn_included);
1524 sctp_abort_in_reasm(stcb, control, chk,
1525 abort_flag,
1527 return;
1528 }
1529 }
1530 /*
1531 * validate not beyond top FSN if we have seen last
1532 * one
1533 */
1534 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1536 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1537 chk->rec.data.fsn,
1538 control->top_fsn);
1539 sctp_abort_in_reasm(stcb, control, chk,
1540 abort_flag,
1542 return;
1543 }
1544 }
1545 /*
1546 * If we reach here, we need to place the new chunk in the
1547 * reassembly for this control.
1548 */
1550 "chunk is a not first fsn: %u needs to be inserted\n",
1551 chk->rec.data.fsn);
1552 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1553 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1555 /* Last not at the end? huh? */
1557 "Last fragment not last in list: -- abort\n");
1558 sctp_abort_in_reasm(stcb, control,
1559 chk, abort_flag,
1561 return;
1562 }
1563 /*
1564 * This one in queue is bigger than the new
1565 * one, insert the new one before at.
1566 */
1568 "Insert it before fsn: %u\n",
1569 at->rec.data.fsn);
1570 asoc->size_on_reasm_queue += chk->send_size;
1572 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1573 inserted = 1;
1574 break;
1575 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1576 /*
1577 * Gak, He sent me a duplicate str seq
1578 * number
1579 */
1580 /*
1581 * foo bar, I guess I will just free this
1582 * new guy, should we abort too? FIX ME
1583 * MAYBE? Or it COULD be that the SSN's have
1584 * wrapped. Maybe I should compare to TSN
1585 * somehow... sigh for now just blow away
1586 * the chunk!
1587 */
1589 "Duplicate to fsn: %u -- abort\n",
1590 at->rec.data.fsn);
1591 sctp_abort_in_reasm(stcb, control,
1592 chk, abort_flag,
1594 return;
1595 }
1596 }
1597 if (inserted == 0) {
1598 /* Goes on the end */
1599 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1600 chk->rec.data.fsn);
1601 asoc->size_on_reasm_queue += chk->send_size;
1603 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1604 }
1605 }
1606 /*
1607 * Ok lets see if we can suck any up into the control structure that
1608 * are in seq if it makes sense.
1609 */
1610 do_wakeup = 0;
1611 /*
1612 * If the first fragment has not been seen there is no sense in
1613 * looking.
1614 */
1615 if (control->first_frag_seen) {
1616 next_fsn = control->fsn_included + 1;
1617 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1618 if (at->rec.data.fsn == next_fsn) {
1619 /* We can add this one now to the control */
1621 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1622 control, at,
1623 at->rec.data.fsn,
1624 next_fsn, control->fsn_included);
1625 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1626 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1627 if (control->on_read_q) {
1628 do_wakeup = 1;
1629 } else {
1630 /*
1631 * We only add to the
1632 * size-on-all-streams if its not on
1633 * the read q. The read q flag will
1634 * cause a sballoc so its accounted
1635 * for there.
1636 */
1637 asoc->size_on_all_streams += lenadded;
1638 }
1639 next_fsn++;
1640 if (control->end_added && control->pdapi_started) {
1641 if (strm->pd_api_started) {
1642 strm->pd_api_started = 0;
1643 control->pdapi_started = 0;
1644 }
1645 if (control->on_read_q == 0) {
1646 sctp_add_to_readq(stcb->sctp_ep, stcb,
1647 control,
1648 &stcb->sctp_socket->so_rcv, control->end_added,
1650 }
1651 break;
1652 }
1653 } else {
1654 break;
1655 }
1656 }
1657 }
1658 if (do_wakeup) {
1659 /* Need to wakeup the reader */
1661 }
1662}
1663
1664static struct sctp_queued_to_read *
1665sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1666{
1667 struct sctp_queued_to_read *control;
1668
1669 if (ordered) {
1670 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1671 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1672 break;
1673 }
1674 }
1675 } else {
1676 if (idata_supported) {
1677 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1678 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1679 break;
1680 }
1681 }
1682 } else {
1683 control = TAILQ_FIRST(&strm->uno_inqueue);
1684 }
1685 }
1686 return (control);
1687}
1688
1689static int
1691 struct mbuf **m, int offset, int chk_length,
1692 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1693 int *break_flag, int last_chunk, uint8_t chk_type)
1694{
1695 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1696 struct sctp_stream_in *strm;
1697 uint32_t tsn, fsn, gap, mid;
1698 struct mbuf *dmbuf;
1699 int the_len;
1700 int need_reasm_check = 0;
1701 uint16_t sid;
1702 struct mbuf *op_err;
1703 char msg[SCTP_DIAG_INFO_LEN];
1704 struct sctp_queued_to_read *control, *ncontrol;
1705 uint32_t ppid;
1706 uint8_t chk_flags;
1707 struct sctp_stream_reset_list *liste;
1708 int ordered;
1709 size_t clen;
1710 int created_control = 0;
1711
1712 if (chk_type == SCTP_IDATA) {
1713 struct sctp_idata_chunk *chunk, chunk_buf;
1714
1715 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1716 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1717 chk_flags = chunk->ch.chunk_flags;
1718 clen = sizeof(struct sctp_idata_chunk);
1719 tsn = ntohl(chunk->dp.tsn);
1720 sid = ntohs(chunk->dp.sid);
1721 mid = ntohl(chunk->dp.mid);
1722 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1723 fsn = 0;
1724 ppid = chunk->dp.ppid_fsn.ppid;
1725 } else {
1726 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1727 ppid = 0xffffffff; /* Use as an invalid value. */
1728 }
1729 } else {
1730 struct sctp_data_chunk *chunk, chunk_buf;
1731
1732 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1733 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1734 chk_flags = chunk->ch.chunk_flags;
1735 clen = sizeof(struct sctp_data_chunk);
1736 tsn = ntohl(chunk->dp.tsn);
1737 sid = ntohs(chunk->dp.sid);
1738 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1739 fsn = tsn;
1740 ppid = chunk->dp.ppid;
1741 }
1742 if ((size_t)chk_length == clen) {
1743 /*
1744 * Need to send an abort since we had a empty data chunk.
1745 */
1748 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1749 *abort_flag = 1;
1750 return (0);
1751 }
1753 asoc->send_sack = 1;
1754 }
1755 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1756 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1758 }
1759 if (stcb == NULL) {
1760 return (0);
1761 }
1762 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1763 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1764 /* It is a duplicate */
1765 SCTP_STAT_INCR(sctps_recvdupdata);
1766 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1767 /* Record a dup for the next outbound sack */
1768 asoc->dup_tsns[asoc->numduptsns] = tsn;
1769 asoc->numduptsns++;
1770 }
1771 asoc->send_sack = 1;
1772 return (0);
1773 }
1774 /* Calculate the number of TSN's between the base and this TSN */
1776 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1777 /* Can't hold the bit in the mapping at max array, toss it */
1778 return (0);
1779 }
1780 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1782 if (sctp_expand_mapping_array(asoc, gap)) {
1783 /* Can't expand, drop it */
1784 return (0);
1785 }
1786 }
1787 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1788 *high_tsn = tsn;
1789 }
1790 /* See if we have received this one already */
1791 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1793 SCTP_STAT_INCR(sctps_recvdupdata);
1794 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1795 /* Record a dup for the next outbound sack */
1796 asoc->dup_tsns[asoc->numduptsns] = tsn;
1797 asoc->numduptsns++;
1798 }
1799 asoc->send_sack = 1;
1800 return (0);
1801 }
1802 /*
1803 * Check to see about the GONE flag, duplicates would cause a sack
1804 * to be sent up above
1805 */
1808 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1809 /*
1810 * wait a minute, this guy is gone, there is no longer a
1811 * receiver. Send peer an ABORT!
1812 */
1814 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1815 *abort_flag = 1;
1816 return (0);
1817 }
1818 /*
1819 * Now before going further we see if there is room. If NOT then we
1820 * MAY let one through only IF this TSN is the one we are waiting
1821 * for on a partial delivery API.
1822 */
1823
1824 /* Is the stream valid? */
1825 if (sid >= asoc->streamincnt) {
1827
1828 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1829 0, M_NOWAIT, 1, MT_DATA);
1830 if (op_err != NULL) {
1831 /* add some space up front so prepend will work well */
1832 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1833 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1834 /*
1835 * Error causes are just param's and this one has
1836 * two back to back phdr, one with the error type
1837 * and size, the other with the streamid and a rsvd
1838 */
1839 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1840 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1841 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1842 cause->stream_id = htons(sid);
1843 cause->reserved = htons(0);
1844 sctp_queue_op_err(stcb, op_err);
1845 }
1846 SCTP_STAT_INCR(sctps_badsid);
1849 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1850 asoc->highest_tsn_inside_nr_map = tsn;
1851 }
1852 if (tsn == (asoc->cumulative_tsn + 1)) {
1853 /* Update cum-ack */
1854 asoc->cumulative_tsn = tsn;
1855 }
1856 return (0);
1857 }
1858 /*
1859 * If its a fragmented message, lets see if we can find the control
1860 * on the reassembly queues.
1861 */
1862 if ((chk_type == SCTP_IDATA) &&
1863 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1864 (fsn == 0)) {
1865 /*
1866 * The first *must* be fsn 0, and other (middle/end) pieces
1867 * can *not* be fsn 0. XXX: This can happen in case of a
1868 * wrap around. Ignore is for now.
1869 */
1870 SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1871 goto err_out;
1872 }
1873 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1874 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1875 chk_flags, control);
1876 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1877 /* See if we can find the re-assembly entity */
1878 if (control != NULL) {
1879 /* We found something, does it belong? */
1880 if (ordered && (mid != control->mid)) {
1881 SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1882 err_out:
1885 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1886 *abort_flag = 1;
1887 return (0);
1888 }
1889 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1890 /*
1891 * We can't have a switched order with an
1892 * unordered chunk
1893 */
1894 SCTP_SNPRINTF(msg, sizeof(msg),
1895 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1896 tsn);
1897 goto err_out;
1898 }
1899 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1900 /*
1901 * We can't have a switched unordered with a
1902 * ordered chunk
1903 */
1904 SCTP_SNPRINTF(msg, sizeof(msg),
1905 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1906 tsn);
1907 goto err_out;
1908 }
1909 }
1910 } else {
1911 /*
1912 * Its a complete segment. Lets validate we don't have a
1913 * re-assembly going on with the same Stream/Seq (for
1914 * ordered) or in the same Stream for unordered.
1915 */
1916 if (control != NULL) {
1917 if (ordered || asoc->idata_supported) {
1918 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1919 chk_flags, mid);
1920 SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1921 goto err_out;
1922 } else {
1923 if ((tsn == control->fsn_included + 1) &&
1924 (control->end_added == 0)) {
1925 SCTP_SNPRINTF(msg, sizeof(msg),
1926 "Illegal message sequence, missing end for MID: %8.8x",
1927 control->fsn_included);
1928 goto err_out;
1929 } else {
1930 control = NULL;
1931 }
1932 }
1933 }
1934 }
1935 /* now do the tests */
1936 if (((asoc->cnt_on_all_streams +
1937 asoc->cnt_on_reasm_queue +
1938 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1939 (((int)asoc->my_rwnd) <= 0)) {
1940 /*
1941 * When we have NO room in the rwnd we check to make sure
1942 * the reader is doing its job...
1943 */
1944 if (stcb->sctp_socket->so_rcv.sb_cc) {
1945 /* some to read, wake-up */
1946 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1947 }
1948 /* now is it in the mapping array of what we have accepted? */
1949 if (chk_type == SCTP_DATA) {
1950 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1952 /* Nope not in the valid range dump it */
1953 dump_packet:
1954 sctp_set_rwnd(stcb, asoc);
1955 if ((asoc->cnt_on_all_streams +
1956 asoc->cnt_on_reasm_queue +
1957 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1958 SCTP_STAT_INCR(sctps_datadropchklmt);
1959 } else {
1960 SCTP_STAT_INCR(sctps_datadroprwnd);
1961 }
1962 *break_flag = 1;
1963 return (0);
1964 }
1965 } else {
1966 if (control == NULL) {
1967 goto dump_packet;
1968 }
1969 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1970 goto dump_packet;
1971 }
1972 }
1973 }
1974#ifdef SCTP_ASOCLOG_OF_TSNS
1976 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1977 asoc->tsn_in_at = 0;
1978 asoc->tsn_in_wrapped = 1;
1979 }
1980 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1981 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1982 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1983 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1984 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1985 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1986 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1987 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1988 asoc->tsn_in_at++;
1989#endif
1990 /*
1991 * Before we continue lets validate that we are not being fooled by
1992 * an evil attacker. We can only have Nk chunks based on our TSN
1993 * spread allowed by the mapping array N * 8 bits, so there is no
1994 * way our stream sequence numbers could have wrapped. We of course
1995 * only validate the FIRST fragment so the bit must be set.
1996 */
1997 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1998 (TAILQ_EMPTY(&asoc->resetHead)) &&
1999 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2000 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2001 /* The incoming sseq is behind where we last delivered? */
2002 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2003 mid, asoc->strmin[sid].last_mid_delivered);
2004
2005 if (asoc->idata_supported) {
2006 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2007 asoc->strmin[sid].last_mid_delivered,
2008 tsn,
2009 sid,
2010 mid);
2011 } else {
2012 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2014 tsn,
2015 sid,
2016 (uint16_t)mid);
2017 }
2020 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2021 *abort_flag = 1;
2022 return (0);
2023 }
2024 if (chk_type == SCTP_IDATA) {
2025 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2026 } else {
2027 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2028 }
2029 if (last_chunk == 0) {
2030 if (chk_type == SCTP_IDATA) {
2031 dmbuf = SCTP_M_COPYM(*m,
2032 (offset + sizeof(struct sctp_idata_chunk)),
2033 the_len, M_NOWAIT);
2034 } else {
2035 dmbuf = SCTP_M_COPYM(*m,
2036 (offset + sizeof(struct sctp_data_chunk)),
2037 the_len, M_NOWAIT);
2038 }
2039#ifdef SCTP_MBUF_LOGGING
2040 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2041 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2042 }
2043#endif
2044 } else {
2045 /* We can steal the last chunk */
2046 int l_len;
2047
2048 dmbuf = *m;
2049 /* lop off the top part */
2050 if (chk_type == SCTP_IDATA) {
2051 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2052 } else {
2053 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2054 }
2055 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2056 l_len = SCTP_BUF_LEN(dmbuf);
2057 } else {
2058 /*
2059 * need to count up the size hopefully does not hit
2060 * this to often :-0
2061 */
2062 struct mbuf *lat;
2063
2064 l_len = 0;
2065 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2066 l_len += SCTP_BUF_LEN(lat);
2067 }
2068 }
2069 if (l_len > the_len) {
2070 /* Trim the end round bytes off too */
2071 m_adj(dmbuf, -(l_len - the_len));
2072 }
2073 }
2074 if (dmbuf == NULL) {
2075 SCTP_STAT_INCR(sctps_nomem);
2076 return (0);
2077 }
2078 /*
2079 * Now no matter what, we need a control, get one if we don't have
2080 * one (we may have gotten it above when we found the message was
2081 * fragmented
2082 */
2083 if (control == NULL) {
2084 sctp_alloc_a_readq(stcb, control);
2085 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2086 ppid,
2087 sid,
2088 chk_flags,
2089 NULL, fsn, mid);
2090 if (control == NULL) {
2091 SCTP_STAT_INCR(sctps_nomem);
2092 return (0);
2093 }
2094 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2095 struct mbuf *mm;
2096
2097 control->data = dmbuf;
2098 control->tail_mbuf = NULL;
2099 for (mm = control->data; mm; mm = mm->m_next) {
2100 control->length += SCTP_BUF_LEN(mm);
2101 if (SCTP_BUF_NEXT(mm) == NULL) {
2102 control->tail_mbuf = mm;
2103 }
2104 }
2105 control->end_added = 1;
2106 control->last_frag_seen = 1;
2107 control->first_frag_seen = 1;
2108 control->fsn_included = fsn;
2109 control->top_fsn = fsn;
2110 }
2111 created_control = 1;
2112 }
2113 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2114 chk_flags, ordered, mid, control);
2115 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2116 TAILQ_EMPTY(&asoc->resetHead) &&
2117 ((ordered == 0) ||
2118 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2119 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2120 /* Candidate for express delivery */
2121 /*
2122 * Its not fragmented, No PD-API is up, Nothing in the
2123 * delivery queue, Its un-ordered OR ordered and the next to
2124 * deliver AND nothing else is stuck on the stream queue,
2125 * And there is room for it in the socket buffer. Lets just
2126 * stuff it up the buffer....
2127 */
2129 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2130 asoc->highest_tsn_inside_nr_map = tsn;
2131 }
2132 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2133 control, mid);
2134
2135 sctp_add_to_readq(stcb->sctp_ep, stcb,
2136 control, &stcb->sctp_socket->so_rcv,
2138
2139 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2140 /* for ordered, bump what we delivered */
2141 asoc->strmin[sid].last_mid_delivered++;
2142 }
2143 SCTP_STAT_INCR(sctps_recvexpress);
2144 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2145 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2147 }
2148 control = NULL;
2149 goto finish_express_del;
2150 }
2151
2152 /* Now will we need a chunk too? */
2153 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2154 sctp_alloc_a_chunk(stcb, chk);
2155 if (chk == NULL) {
2156 /* No memory so we drop the chunk */
2157 SCTP_STAT_INCR(sctps_nomem);
2158 if (last_chunk == 0) {
2159 /* we copied it, free the copy */
2160 sctp_m_freem(dmbuf);
2161 }
2162 return (0);
2163 }
2164 chk->rec.data.tsn = tsn;
2165 chk->no_fr_allowed = 0;
2166 chk->rec.data.fsn = fsn;
2167 chk->rec.data.mid = mid;
2168 chk->rec.data.sid = sid;
2169 chk->rec.data.ppid = ppid;
2170 chk->rec.data.context = stcb->asoc.context;
2172 chk->rec.data.rcv_flags = chk_flags;
2173 chk->asoc = asoc;
2174 chk->send_size = the_len;
2175 chk->whoTo = net;
2176 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2177 chk,
2178 control, mid);
2179 atomic_add_int(&net->ref_count, 1);
2180 chk->data = dmbuf;
2181 }
2182 /* Set the appropriate TSN mark */
2183 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2185 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2186 asoc->highest_tsn_inside_nr_map = tsn;
2187 }
2188 } else {
2190 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2191 asoc->highest_tsn_inside_map = tsn;
2192 }
2193 }
2194 /* Now is it complete (i.e. not fragmented)? */
2195 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2196 /*
2197 * Special check for when streams are resetting. We could be
2198 * more smart about this and check the actual stream to see
2199 * if it is not being reset.. that way we would not create a
2200 * HOLB when amongst streams being reset and those not being
2201 * reset.
2202 *
2203 */
2204 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2205 SCTP_TSN_GT(tsn, liste->tsn)) {
2206 /*
2207 * yep its past where we need to reset... go ahead
2208 * and queue it.
2209 */
2210 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2211 /* first one on */
2212 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2213 } else {
2214 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2215 unsigned char inserted = 0;
2216
2217 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2218 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2219 continue;
2220 } else {
2221 /* found it */
2222 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2223 inserted = 1;
2224 break;
2225 }
2226 }
2227 if (inserted == 0) {
2228 /*
2229 * must be put at end, use prevP
2230 * (all setup from loop) to setup
2231 * nextP.
2232 */
2233 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2234 }
2235 }
2236 goto finish_express_del;
2237 }
2238 if (chk_flags & SCTP_DATA_UNORDERED) {
2239 /* queue directly into socket buffer */
2240 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2241 control, mid);
2242 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2244 control,
2245 &stcb->sctp_socket->so_rcv, 1,
2247
2248 } else {
2249 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2250 mid);
2251 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2252 if (*abort_flag) {
2253 if (last_chunk) {
2254 *m = NULL;
2255 }
2256 return (0);
2257 }
2258 }
2259 goto finish_express_del;
2260 }
2261 /* If we reach here its a reassembly */
2262 need_reasm_check = 1;
2264 "Queue data to stream for reasm control: %p MID: %u\n",
2265 control, mid);
2266 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2267 if (*abort_flag) {
2268 /*
2269 * the assoc is now gone and chk was put onto the reasm
2270 * queue, which has all been freed.
2271 */
2272 if (last_chunk) {
2273 *m = NULL;
2274 }
2275 return (0);
2276 }
2277finish_express_del:
2278 /* Here we tidy up things */
2279 if (tsn == (asoc->cumulative_tsn + 1)) {
2280 /* Update cum-ack */
2281 asoc->cumulative_tsn = tsn;
2282 }
2283 if (last_chunk) {
2284 *m = NULL;
2285 }
2286 if (ordered) {
2287 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2288 } else {
2289 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2290 }
2291 SCTP_STAT_INCR(sctps_recvdata);
2292 /* Set it present please */
2293 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2295 }
2296 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2299 }
2300 if (need_reasm_check) {
2302 need_reasm_check = 0;
2303 }
2304 /* check the special flag for stream resets */
2305 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2306 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2307 /*
2308 * we have finished working through the backlogged TSN's now
2309 * time to reset streams. 1: call reset function. 2: free
2310 * pending_reply space 3: distribute any chunks in
2311 * pending_reply_queue.
2312 */
2314 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2316 SCTP_FREE(liste, SCTP_M_STRESET);
2317 /* sa_ignore FREED_MEMORY */
2318 liste = TAILQ_FIRST(&asoc->resetHead);
2319 if (TAILQ_EMPTY(&asoc->resetHead)) {
2320 /* All can be removed */
2321 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2322 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2323 strm = &asoc->strmin[control->sinfo_stream];
2324 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2325 if (*abort_flag) {
2326 return (0);
2327 }
2328 if (need_reasm_check) {
2330 need_reasm_check = 0;
2331 }
2332 }
2333 } else {
2334 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2335 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2336 break;
2337 }
2338 /*
2339 * if control->sinfo_tsn is <= liste->tsn we
2340 * can process it which is the NOT of
2341 * control->sinfo_tsn > liste->tsn
2342 */
2343 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2344 strm = &asoc->strmin[control->sinfo_stream];
2345 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2346 if (*abort_flag) {
2347 return (0);
2348 }
2349 if (need_reasm_check) {
2351 need_reasm_check = 0;
2352 }
2353 }
2354 }
2355 }
2356 return (1);
2357}
2358
2359static const int8_t sctp_map_lookup_tab[256] = {
2360 0, 1, 0, 2, 0, 1, 0, 3,
2361 0, 1, 0, 2, 0, 1, 0, 4,
2362 0, 1, 0, 2, 0, 1, 0, 3,
2363 0, 1, 0, 2, 0, 1, 0, 5,
2364 0, 1, 0, 2, 0, 1, 0, 3,
2365 0, 1, 0, 2, 0, 1, 0, 4,
2366 0, 1, 0, 2, 0, 1, 0, 3,
2367 0, 1, 0, 2, 0, 1, 0, 6,
2368 0, 1, 0, 2, 0, 1, 0, 3,
2369 0, 1, 0, 2, 0, 1, 0, 4,
2370 0, 1, 0, 2, 0, 1, 0, 3,
2371 0, 1, 0, 2, 0, 1, 0, 5,
2372 0, 1, 0, 2, 0, 1, 0, 3,
2373 0, 1, 0, 2, 0, 1, 0, 4,
2374 0, 1, 0, 2, 0, 1, 0, 3,
2375 0, 1, 0, 2, 0, 1, 0, 7,
2376 0, 1, 0, 2, 0, 1, 0, 3,
2377 0, 1, 0, 2, 0, 1, 0, 4,
2378 0, 1, 0, 2, 0, 1, 0, 3,
2379 0, 1, 0, 2, 0, 1, 0, 5,
2380 0, 1, 0, 2, 0, 1, 0, 3,
2381 0, 1, 0, 2, 0, 1, 0, 4,
2382 0, 1, 0, 2, 0, 1, 0, 3,
2383 0, 1, 0, 2, 0, 1, 0, 6,
2384 0, 1, 0, 2, 0, 1, 0, 3,
2385 0, 1, 0, 2, 0, 1, 0, 4,
2386 0, 1, 0, 2, 0, 1, 0, 3,
2387 0, 1, 0, 2, 0, 1, 0, 5,
2388 0, 1, 0, 2, 0, 1, 0, 3,
2389 0, 1, 0, 2, 0, 1, 0, 4,
2390 0, 1, 0, 2, 0, 1, 0, 3,
2391 0, 1, 0, 2, 0, 1, 0, 8
2392};
2393
2394void
2396{
2397 /*
2398 * Now we also need to check the mapping array in a couple of ways.
2399 * 1) Did we move the cum-ack point?
2400 *
2401 * When you first glance at this you might think that all entries
2402 * that make up the position of the cum-ack would be in the
2403 * nr-mapping array only.. i.e. things up to the cum-ack are always
2404 * deliverable. Thats true with one exception, when its a fragmented
2405 * message we may not deliver the data until some threshold (or all
2406 * of it) is in place. So we must OR the nr_mapping_array and
2407 * mapping_array to get a true picture of the cum-ack.
2408 */
2409 struct sctp_association *asoc;
2410 int at;
2411 uint8_t val;
2412 int slide_from, slide_end, lgap, distance;
2413 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2414
2415 asoc = &stcb->asoc;
2416
2417 old_cumack = asoc->cumulative_tsn;
2418 old_base = asoc->mapping_array_base_tsn;
2419 old_highest = asoc->highest_tsn_inside_map;
2420 /*
2421 * We could probably improve this a small bit by calculating the
2422 * offset of the current cum-ack as the starting point.
2423 */
2424 at = 0;
2425 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2426 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2427 if (val == 0xff) {
2428 at += 8;
2429 } else {
2430 /* there is a 0 bit */
2431 at += sctp_map_lookup_tab[val];
2432 break;
2433 }
2434 }
2435 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2436
2439#ifdef INVARIANTS
2440 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2442#else
2443 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2446 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2448 }
2451#endif
2452 }
2454 highest_tsn = asoc->highest_tsn_inside_nr_map;
2455 } else {
2456 highest_tsn = asoc->highest_tsn_inside_map;
2457 }
2458 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2459 /* The complete array was completed by a single FR */
2460 /* highest becomes the cum-ack */
2461 int clr;
2462#ifdef INVARIANTS
2463 unsigned int i;
2464#endif
2465
2466 /* clear the array */
2467 clr = ((at + 7) >> 3);
2468 if (clr > asoc->mapping_array_size) {
2469 clr = asoc->mapping_array_size;
2470 }
2471 memset(asoc->mapping_array, 0, clr);
2472 memset(asoc->nr_mapping_array, 0, clr);
2473#ifdef INVARIANTS
2474 for (i = 0; i < asoc->mapping_array_size; i++) {
2475 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2476 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2478 }
2479 }
2480#endif
2481 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2483 } else if (at >= 8) {
2484 /* we can slide the mapping array down */
2485 /* slide_from holds where we hit the first NON 0xff byte */
2486
2487 /*
2488 * now calculate the ceiling of the move using our highest
2489 * TSN value
2490 */
2491 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2492 slide_end = (lgap >> 3);
2493 if (slide_end < slide_from) {
2495#ifdef INVARIANTS
2496 panic("impossible slide");
2497#else
2498 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2499 lgap, slide_end, slide_from, at);
2500 return;
2501#endif
2502 }
2503 if (slide_end > asoc->mapping_array_size) {
2504#ifdef INVARIANTS
2505 panic("would overrun buffer");
2506#else
2507 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2508 asoc->mapping_array_size, slide_end);
2509 slide_end = asoc->mapping_array_size;
2510#endif
2511 }
2512 distance = (slide_end - slide_from) + 1;
2513 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2514 sctp_log_map(old_base, old_cumack, old_highest,
2516 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2518 }
2519 if (distance + slide_from > asoc->mapping_array_size ||
2520 distance < 0) {
2521 /*
2522 * Here we do NOT slide forward the array so that
2523 * hopefully when more data comes in to fill it up
2524 * we will be able to slide it forward. Really I
2525 * don't think this should happen :-0
2526 */
2527 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2528 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2531 }
2532 } else {
2533 int ii;
2534
2535 for (ii = 0; ii < distance; ii++) {
2536 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2537 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2538 }
2539 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2540 asoc->mapping_array[ii] = 0;
2541 asoc->nr_mapping_array[ii] = 0;
2542 }
2543 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2544 asoc->highest_tsn_inside_map += (slide_from << 3);
2545 }
2546 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2547 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2548 }
2549 asoc->mapping_array_base_tsn += (slide_from << 3);
2550 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2554 }
2555 }
2556 }
2557}
2558
2559void
2560sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2561{
2562 struct sctp_association *asoc;
2563 uint32_t highest_tsn;
2564 int is_a_gap;
2565
2567 asoc = &stcb->asoc;
2569 highest_tsn = asoc->highest_tsn_inside_nr_map;
2570 } else {
2571 highest_tsn = asoc->highest_tsn_inside_map;
2572 }
2573 /* Is there a gap now? */
2574 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2575
2576 /*
2577 * Now we need to see if we need to queue a sack or just start the
2578 * timer (if allowed).
2579 */
2581 /*
2582 * Ok special case, in SHUTDOWN-SENT case. here we maker
2583 * sure SACK timer is off and instead send a SHUTDOWN and a
2584 * SACK
2585 */
2588 stcb->sctp_ep, stcb, NULL,
2590 }
2591 sctp_send_shutdown(stcb,
2592 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2593 if (is_a_gap) {
2595 }
2596 } else {
2597 /*
2598 * CMT DAC algorithm: increase number of packets received
2599 * since last ack
2600 */
2601 stcb->asoc.cmt_dac_pkts_rcvd++;
2602
2603 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2604 * SACK */
2605 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2606 * longer is one */
2607 (stcb->asoc.numduptsns) || /* we have dup's */
2608 (is_a_gap) || /* is still a gap */
2609 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2610 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */
2611 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2612 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2613 (stcb->asoc.send_sack == 0) &&
2614 (stcb->asoc.numduptsns == 0) &&
2615 (stcb->asoc.delayed_ack) &&
2617 /*
2618 * CMT DAC algorithm: With CMT, delay acks
2619 * even in the face of reordering.
2620 * Therefore, if acks that do not have to be
2621 * sent because of the above reasons, will
2622 * be delayed. That is, acks that would have
2623 * been sent due to gap reports will be
2624 * delayed with DAC. Start the delayed ack
2625 * timer.
2626 */
2628 stcb->sctp_ep, stcb, NULL);
2629 } else {
2630 /*
2631 * Ok we must build a SACK since the timer
2632 * is pending, we got our first packet OR
2633 * there are gaps or duplicates.
2634 */
2635 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2638 }
2639 } else {
2642 stcb->sctp_ep, stcb, NULL);
2643 }
2644 }
2645 }
2646}
2647
2648int
2649sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2650 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2651 struct sctp_nets *net, uint32_t *high_tsn)
2652{
2653 struct sctp_chunkhdr *ch, chunk_buf;
2654 struct sctp_association *asoc;
2655 int num_chunks = 0; /* number of control chunks processed */
2656 int stop_proc = 0;
2657 int break_flag, last_chunk;
2658 int abort_flag = 0, was_a_gap;
2659 struct mbuf *m;
2660 uint32_t highest_tsn;
2661 uint16_t chk_length;
2662
2663 /* set the rwnd */
2664 sctp_set_rwnd(stcb, &stcb->asoc);
2665
2666 m = *mm;
2668 asoc = &stcb->asoc;
2670 highest_tsn = asoc->highest_tsn_inside_nr_map;
2671 } else {
2672 highest_tsn = asoc->highest_tsn_inside_map;
2673 }
2674 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2675 /*
2676 * setup where we got the last DATA packet from for any SACK that
2677 * may need to go out. Don't bump the net. This is done ONLY when a
2678 * chunk is assigned.
2679 */
2680 asoc->last_data_chunk_from = net;
2681
2682 /*-
2683 * Now before we proceed we must figure out if this is a wasted
2684 * cluster... i.e. it is a small packet sent in and yet the driver
2685 * underneath allocated a full cluster for it. If so we must copy it
2686 * to a smaller mbuf and free up the cluster mbuf. This will help
2687 * with cluster starvation.
2688 */
2689 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2690 /* we only handle mbufs that are singletons.. not chains */
2691 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2692 if (m) {
2693 /* ok lets see if we can copy the data up */
2694 caddr_t *from, *to;
2695
2696 /* get the pointers and copy */
2697 to = mtod(m, caddr_t *);
2698 from = mtod((*mm), caddr_t *);
2699 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2700 /* copy the length and free up the old */
2701 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2702 sctp_m_freem(*mm);
2703 /* success, back copy */
2704 *mm = m;
2705 } else {
2706 /* We are in trouble in the mbuf world .. yikes */
2707 m = *mm;
2708 }
2709 }
2710 /* get pointer to the first chunk header */
2711 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2712 sizeof(struct sctp_chunkhdr),
2713 (uint8_t *)&chunk_buf);
2714 if (ch == NULL) {
2715 return (1);
2716 }
2717 /*
2718 * process all DATA chunks...
2719 */
2720 *high_tsn = asoc->cumulative_tsn;
2721 break_flag = 0;
2722 asoc->data_pkts_seen++;
2723 while (stop_proc == 0) {
2724 /* validate chunk length */
2725 chk_length = ntohs(ch->chunk_length);
2726 if (length - *offset < chk_length) {
2727 /* all done, mutulated chunk */
2728 stop_proc = 1;
2729 continue;
2730 }
2731 if ((asoc->idata_supported == 1) &&
2732 (ch->chunk_type == SCTP_DATA)) {
2733 struct mbuf *op_err;
2734 char msg[SCTP_DIAG_INFO_LEN];
2735
2736 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2739 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2740 return (2);
2741 }
2742 if ((asoc->idata_supported == 0) &&
2743 (ch->chunk_type == SCTP_IDATA)) {
2744 struct mbuf *op_err;
2745 char msg[SCTP_DIAG_INFO_LEN];
2746
2747 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2750 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2751 return (2);
2752 }
2753 if ((ch->chunk_type == SCTP_DATA) ||
2754 (ch->chunk_type == SCTP_IDATA)) {
2755 uint16_t clen;
2756
2757 if (ch->chunk_type == SCTP_DATA) {
2758 clen = sizeof(struct sctp_data_chunk);
2759 } else {
2760 clen = sizeof(struct sctp_idata_chunk);
2761 }
2762 if (chk_length < clen) {
2763 /*
2764 * Need to send an abort since we had a
2765 * invalid data chunk.
2766 */
2767 struct mbuf *op_err;
2768 char msg[SCTP_DIAG_INFO_LEN];
2769
2770 SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2771 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2772 chk_length);
2775 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2776 return (2);
2777 }
2778#ifdef SCTP_AUDITING_ENABLED
2779 sctp_audit_log(0xB1, 0);
2780#endif
2781 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2782 last_chunk = 1;
2783 } else {
2784 last_chunk = 0;
2785 }
2786 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2787 chk_length, net, high_tsn, &abort_flag, &break_flag,
2788 last_chunk, ch->chunk_type)) {
2789 num_chunks++;
2790 }
2791 if (abort_flag)
2792 return (2);
2793
2794 if (break_flag) {
2795 /*
2796 * Set because of out of rwnd space and no
2797 * drop rep space left.
2798 */
2799 stop_proc = 1;
2800 continue;
2801 }
2802 } else {
2803 /* not a data chunk in the data region */
2804 switch (ch->chunk_type) {
2805 case SCTP_INITIATION:
2807 case SCTP_SELECTIVE_ACK:
2810 case SCTP_HEARTBEAT_ACK:
2812 case SCTP_SHUTDOWN:
2813 case SCTP_SHUTDOWN_ACK:
2815 case SCTP_COOKIE_ECHO:
2816 case SCTP_COOKIE_ACK:
2817 case SCTP_ECN_ECHO:
2818 case SCTP_ECN_CWR:
2821 case SCTP_ASCONF_ACK:
2823 case SCTP_STREAM_RESET:
2825 case SCTP_ASCONF:
2826 {
2827 /*
2828 * Now, what do we do with KNOWN
2829 * chunks that are NOT in the right
2830 * place?
2831 *
2832 * For now, I do nothing but ignore
2833 * them. We may later want to add
2834 * sysctl stuff to switch out and do
2835 * either an ABORT() or possibly
2836 * process them.
2837 */
2838 struct mbuf *op_err;
2839 char msg[SCTP_DIAG_INFO_LEN];
2840
2841 SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2842 ch->chunk_type);
2844 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2845 return (2);
2846 }
2847 default:
2848 /*
2849 * Unknown chunk type: use bit rules after
2850 * checking length
2851 */
2852 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2853 /*
2854 * Need to send an abort since we
2855 * had a invalid chunk.
2856 */
2857 struct mbuf *op_err;
2858 char msg[SCTP_DIAG_INFO_LEN];
2859
2860 SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2863 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2864 return (2);
2865 }
2866 if (ch->chunk_type & 0x40) {
2867 /* Add a error report to the queue */
2868 struct mbuf *op_err;
2869 struct sctp_gen_error_cause *cause;
2870
2871 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2872 0, M_NOWAIT, 1, MT_DATA);
2873 if (op_err != NULL) {
2874 cause = mtod(op_err, struct sctp_gen_error_cause *);
2875 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2876 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2877 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2878 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2879 if (SCTP_BUF_NEXT(op_err) != NULL) {
2880 sctp_queue_op_err(stcb, op_err);
2881 } else {
2882 sctp_m_freem(op_err);
2883 }
2884 }
2885 }
2886 if ((ch->chunk_type & 0x80) == 0) {
2887 /* discard the rest of this packet */
2888 stop_proc = 1;
2889 } /* else skip this bad chunk and
2890 * continue... */
2891 break;
2892 } /* switch of chunk type */
2893 }
2894 *offset += SCTP_SIZE32(chk_length);
2895 if ((*offset >= length) || stop_proc) {
2896 /* no more data left in the mbuf chain */
2897 stop_proc = 1;
2898 continue;
2899 }
2900 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2901 sizeof(struct sctp_chunkhdr),
2902 (uint8_t *)&chunk_buf);
2903 if (ch == NULL) {
2904 *offset = length;
2905 stop_proc = 1;
2906 continue;
2907 }
2908 }
2909 if (break_flag) {
2910 /*
2911 * we need to report rwnd overrun drops.
2912 */
2913 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2914 }
2915 if (num_chunks) {
2916 /*
2917 * Did we get data, if so update the time for auto-close and
2918 * give peer credit for being alive.
2919 */
2920 SCTP_STAT_INCR(sctps_recvpktwithdata);
2921 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2924 0,
2926 __LINE__);
2927 }
2928 stcb->asoc.overall_error_count = 0;
2930 }
2931 /* now service all of the reassm queue if needed */
2933 /* Assure that we ack right away */
2934 stcb->asoc.send_sack = 1;
2935 }
2936 /* Start a sack timer or QUEUE a SACK for sending */
2937 sctp_sack_check(stcb, was_a_gap);
2938 return (0);
2939}
2940
2941static int
2942sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2943 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2944 int *num_frs,
2945 uint32_t *biggest_newly_acked_tsn,
2946 uint32_t *this_sack_lowest_newack,
2947 int *rto_ok)
2948{
2949 struct sctp_tmit_chunk *tp1;
2950 unsigned int theTSN;
2951 int j, wake_him = 0, circled = 0;
2952
2953 /* Recover the tp1 we last saw */
2954 tp1 = *p_tp1;
2955 if (tp1 == NULL) {
2956 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2957 }
2958 for (j = frag_strt; j <= frag_end; j++) {
2959 theTSN = j + last_tsn;
2960 while (tp1) {
2962 (*num_frs) += 1;
2963
2964 /*-
2965 * CMT: CUCv2 algorithm. For each TSN being
2966 * processed from the sent queue, track the
2967 * next expected pseudo-cumack, or
2968 * rtx_pseudo_cumack, if required. Separate
2969 * cumack trackers for first transmissions,
2970 * and retransmissions.
2971 */
2972 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2973 (tp1->whoTo->find_pseudo_cumack == 1) &&
2974 (tp1->snd_count == 1)) {
2975 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2976 tp1->whoTo->find_pseudo_cumack = 0;
2977 }
2978 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2979 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2980 (tp1->snd_count > 1)) {
2981 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2982 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2983 }
2984 if (tp1->rec.data.tsn == theTSN) {
2985 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2986 /*-
2987 * must be held until
2988 * cum-ack passes
2989 */
2990 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2991 /*-
2992 * If it is less than RESEND, it is
2993 * now no-longer in flight.
2994 * Higher values may already be set
2995 * via previous Gap Ack Blocks...
2996 * i.e. ACKED or RESEND.
2997 */
2998 if (SCTP_TSN_GT(tp1->rec.data.tsn,
2999 *biggest_newly_acked_tsn)) {
3000 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3001 }
3002 /*-
3003 * CMT: SFR algo (and HTNA) - set
3004 * saw_newack to 1 for dest being
3005 * newly acked. update
3006 * this_sack_highest_newack if
3007 * appropriate.
3008 */
3009 if (tp1->rec.data.chunk_was_revoked == 0)
3010 tp1->whoTo->saw_newack = 1;
3011
3012 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3015 tp1->rec.data.tsn;
3016 }
3017 /*-
3018 * CMT DAC algo: also update
3019 * this_sack_lowest_newack
3020 */
3021 if (*this_sack_lowest_newack == 0) {
3022 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3023 sctp_log_sack(*this_sack_lowest_newack,
3024 last_tsn,
3025 tp1->rec.data.tsn,
3026 0,
3027 0,
3029 }
3030 *this_sack_lowest_newack = tp1->rec.data.tsn;
3031 }
3032 /*-
3033 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3034 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3035 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3036 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3037 * Separate pseudo_cumack trackers for first transmissions and
3038 * retransmissions.
3039 */
3040 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3041 if (tp1->rec.data.chunk_was_revoked == 0) {
3042 tp1->whoTo->new_pseudo_cumack = 1;
3043 }
3044 tp1->whoTo->find_pseudo_cumack = 1;
3045 }
3046 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3048 }
3049 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3050 if (tp1->rec.data.chunk_was_revoked == 0) {
3051 tp1->whoTo->new_pseudo_cumack = 1;
3052 }
3053 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3054 }
3055 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3056 sctp_log_sack(*biggest_newly_acked_tsn,
3057 last_tsn,
3058 tp1->rec.data.tsn,
3059 frag_strt,
3060 frag_end,
3062 }
3063 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3065 tp1->whoTo->flight_size,
3066 tp1->book_size,
3067 (uint32_t)(uintptr_t)tp1->whoTo,
3068 tp1->rec.data.tsn);
3069 }
3073 tp1);
3074 }
3075 sctp_total_flight_decrease(stcb, tp1);
3076
3077 tp1->whoTo->net_ack += tp1->send_size;
3078 if (tp1->snd_count < 2) {
3079 /*-
3080 * True non-retransmitted chunk
3081 */
3082 tp1->whoTo->net_ack2 += tp1->send_size;
3083
3084 /*-
3085 * update RTO too ?
3086 */
3087 if (tp1->do_rtt) {
3088 if (*rto_ok &&
3089 sctp_calculate_rto(stcb,
3090 &stcb->asoc,
3091 tp1->whoTo,
3092 &tp1->sent_rcv_time,
3094 *rto_ok = 0;
3095 }
3096 if (tp1->whoTo->rto_needed == 0) {
3097 tp1->whoTo->rto_needed = 1;
3098 }
3099 tp1->do_rtt = 0;
3100 }
3101 }
3102 }
3103 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3104 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3105 stcb->asoc.this_sack_highest_gap)) {
3107 tp1->rec.data.tsn;
3108 }
3109 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3111#ifdef SCTP_AUDITING_ENABLED
3112 sctp_audit_log(0xB2,
3113 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3114#endif
3115 }
3116 }
3117 /*-
3118 * All chunks NOT UNSENT fall through here and are marked
3119 * (leave PR-SCTP ones that are to skip alone though)
3120 */
3121 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3122 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3124 }
3125 if (tp1->rec.data.chunk_was_revoked) {
3126 /* deflate the cwnd */
3127 tp1->whoTo->cwnd -= tp1->book_size;
3128 tp1->rec.data.chunk_was_revoked = 0;
3129 }
3130 /* NR Sack code here */
3131 if (nr_sacking &&
3132 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3133 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3134 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3135#ifdef INVARIANTS
3136 } else {
3137 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3138#endif
3139 }
3140 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3142 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3143 stcb->asoc.trigger_reset = 1;
3144 }
3146 if (tp1->data) {
3147 /*
3148 * sa_ignore
3149 * NO_NULL_CHK
3150 */
3151 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3152 sctp_m_freem(tp1->data);
3153 tp1->data = NULL;
3154 }
3155 wake_him++;
3156 }
3157 }
3158 break;
3159 } /* if (tp1->tsn == theTSN) */
3160 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3161 break;
3162 }
3163 tp1 = TAILQ_NEXT(tp1, sctp_next);
3164 if ((tp1 == NULL) && (circled == 0)) {
3165 circled++;
3166 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3167 }
3168 } /* end while (tp1) */
3169 if (tp1 == NULL) {
3170 circled = 0;
3171 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3172 }
3173 /* In case the fragments were not in order we must reset */
3174 } /* end for (j = fragStart */
3175 *p_tp1 = tp1;
3176 return (wake_him); /* Return value only used for nr-sack */
3177}
3178
3179static int
3180sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3181 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3182 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3183 int num_seg, int num_nr_seg, int *rto_ok)
3184{
3185 struct sctp_gap_ack_block *frag, block;
3186 struct sctp_tmit_chunk *tp1;
3187 int i;
3188 int num_frs = 0;
3189 int chunk_freed;
3190 int non_revocable;
3191 uint16_t frag_strt, frag_end, prev_frag_end;
3192
3193 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3194 prev_frag_end = 0;
3195 chunk_freed = 0;
3196
3197 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3198 if (i == num_seg) {
3199 prev_frag_end = 0;
3200 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3201 }
3202 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3203 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3204 *offset += sizeof(block);
3205 if (frag == NULL) {
3206 return (chunk_freed);
3207 }
3208 frag_strt = ntohs(frag->start);
3209 frag_end = ntohs(frag->end);
3210
3211 if (frag_strt > frag_end) {
3212 /* This gap report is malformed, skip it. */
3213 continue;
3214 }
3215 if (frag_strt <= prev_frag_end) {
3216 /* This gap report is not in order, so restart. */
3217 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3218 }
3219 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3220 *biggest_tsn_acked = last_tsn + frag_end;
3221 }
3222 if (i < num_seg) {
3223 non_revocable = 0;
3224 } else {
3225 non_revocable = 1;
3226 }
3227 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3228 non_revocable, &num_frs, biggest_newly_acked_tsn,
3229 this_sack_lowest_newack, rto_ok)) {
3230 chunk_freed = 1;
3231 }
3232 prev_frag_end = frag_end;
3233 }
3234 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3235 if (num_frs)
3236 sctp_log_fr(*biggest_tsn_acked,
3237 *biggest_newly_acked_tsn,
3238 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3239 }
3240 return (chunk_freed);
3241}
3242
3243static void
3245 struct sctp_association *asoc, uint32_t cumack,
3246 uint32_t biggest_tsn_acked)
3247{
3248 struct sctp_tmit_chunk *tp1;
3249
3250 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3251 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3252 /*
3253 * ok this guy is either ACK or MARKED. If it is
3254 * ACKED it has been previously acked but not this
3255 * time i.e. revoked. If it is MARKED it was ACK'ed
3256 * again.
3257 */
3258 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3259 break;
3260 }
3261 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3262 /* it has been revoked */
3263 tp1->sent = SCTP_DATAGRAM_SENT;
3264 tp1->rec.data.chunk_was_revoked = 1;
3265 /*
3266 * We must add this stuff back in to assure
3267 * timers and such get started.
3268 */
3269 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3271 tp1->whoTo->flight_size,
3272 tp1->book_size,
3273 (uint32_t)(uintptr_t)tp1->whoTo,
3274 tp1->rec.data.tsn);
3275 }
3277 sctp_total_flight_increase(stcb, tp1);
3278 /*
3279 * We inflate the cwnd to compensate for our
3280 * artificial inflation of the flight_size.
3281 */
3282 tp1->whoTo->cwnd += tp1->book_size;
3283 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3285 cumack,
3286 tp1->rec.data.tsn,
3287 0,
3288 0,
3290 }
3291 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3292 /* it has been re-acked in this SACK */
3294 }
3295 }
3296 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3297 break;
3298 }
3299}
3300
3301static void
3303 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3304{
3305 struct sctp_tmit_chunk *tp1;
3306 int strike_flag = 0;
3307 struct timeval now;
3308 int tot_retrans = 0;
3309 uint32_t sending_seq;
3310 struct sctp_nets *net;
3311 int num_dests_sacked = 0;
3312
3313 /*
3314 * select the sending_seq, this is either the next thing ready to be
3315 * sent but not transmitted, OR, the next seq we assign.
3316 */
3317 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3318 if (tp1 == NULL) {
3319 sending_seq = asoc->sending_seq;
3320 } else {
3321 sending_seq = tp1->rec.data.tsn;
3322 }
3323
3324 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3325 if ((asoc->sctp_cmt_on_off > 0) &&
3326 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3327 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3328 if (net->saw_newack)
3329 num_dests_sacked++;
3330 }
3331 }
3332 if (stcb->asoc.prsctp_supported) {
3333 (void)SCTP_GETTIME_TIMEVAL(&now);
3334 }
3335 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3336 strike_flag = 0;
3337 if (tp1->no_fr_allowed) {
3338 /* this one had a timeout or something */
3339 continue;
3340 }
3341 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3342 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3343 sctp_log_fr(biggest_tsn_newly_acked,
3344 tp1->rec.data.tsn,
3345 tp1->sent,
3347 }
3348 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3349 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3350 /* done */
3351 break;
3352 }
3353 if (stcb->asoc.prsctp_supported) {
3354 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3355 /* Is it expired? */
3356 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3357 /* Yes so drop it */
3358 if (tp1->data != NULL) {
3359 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3361 }
3362 continue;
3363 }
3364 }
3365 }
3366 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3367 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3368 /* we are beyond the tsn in the sack */
3369 break;
3370 }
3371 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3372 /* either a RESEND, ACKED, or MARKED */
3373 /* skip */
3374 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3375 /* Continue strikin FWD-TSN chunks */
3376 tp1->rec.data.fwd_tsn_cnt++;
3377 }
3378 continue;
3379 }
3380 /*
3381 * CMT : SFR algo (covers part of DAC and HTNA as well)
3382 */
3383 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3384 /*
3385 * No new acks were receieved for data sent to this
3386 * dest. Therefore, according to the SFR algo for
3387 * CMT, no data sent to this dest can be marked for
3388 * FR using this SACK.
3389 */
3390 continue;
3391 } else if (tp1->whoTo &&
3392 SCTP_TSN_GT(tp1->rec.data.tsn,
3394 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3395 /*
3396 * CMT: New acks were receieved for data sent to
3397 * this dest. But no new acks were seen for data
3398 * sent after tp1. Therefore, according to the SFR
3399 * algo for CMT, tp1 cannot be marked for FR using
3400 * this SACK. This step covers part of the DAC algo
3401 * and the HTNA algo as well.
3402 */
3403 continue;
3404 }
3405 /*
3406 * Here we check to see if we were have already done a FR
3407 * and if so we see if the biggest TSN we saw in the sack is
3408 * smaller than the recovery point. If so we don't strike
3409 * the tsn... otherwise we CAN strike the TSN.
3410 */
3411 /*
3412 * @@@ JRI: Check for CMT if (accum_moved &&
3413 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3414 * 0)) {
3415 */
3416 if (accum_moved && asoc->fast_retran_loss_recovery) {
3417 /*
3418 * Strike the TSN if in fast-recovery and cum-ack
3419 * moved.
3420 */
3421 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3422 sctp_log_fr(biggest_tsn_newly_acked,
3423 tp1->rec.data.tsn,
3424 tp1->sent,
3426 }
3427 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3428 tp1->sent++;
3429 }
3430 if ((asoc->sctp_cmt_on_off > 0) &&
3431 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3432 /*
3433 * CMT DAC algorithm: If SACK flag is set to
3434 * 0, then lowest_newack test will not pass
3435 * because it would have been set to the
3436 * cumack earlier. If not already to be
3437 * rtx'd, If not a mixed sack and if tp1 is
3438 * not between two sacked TSNs, then mark by
3439 * one more. NOTE that we are marking by one
3440 * additional time since the SACK DAC flag
3441 * indicates that two packets have been
3442 * received after this missing TSN.
3443 */
3444 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3445 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3446 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3447 sctp_log_fr(16 + num_dests_sacked,
3448 tp1->rec.data.tsn,
3449 tp1->sent,
3451 }
3452 tp1->sent++;
3453 }
3454 }
3455 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3456 (asoc->sctp_cmt_on_off == 0)) {
3457 /*
3458 * For those that have done a FR we must take
3459 * special consideration if we strike. I.e the
3460 * biggest_newly_acked must be higher than the
3461 * sending_seq at the time we did the FR.
3462 */
3463 if (
3464#ifdef SCTP_FR_TO_ALTERNATE
3465 /*
3466 * If FR's go to new networks, then we must only do
3467 * this for singly homed asoc's. However if the FR's
3468 * go to the same network (Armando's work) then its
3469 * ok to FR multiple times.
3470 */
3471 (asoc->numnets < 2)
3472#else
3473 (1)
3474#endif
3475 ) {
3476 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3477 tp1->rec.data.fast_retran_tsn)) {
3478 /*
3479 * Strike the TSN, since this ack is
3480 * beyond where things were when we
3481 * did a FR.
3482 */
3483 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3484 sctp_log_fr(biggest_tsn_newly_acked,
3485 tp1->rec.data.tsn,
3486 tp1->sent,
3488 }
3489 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3490 tp1->sent++;
3491 }
3492 strike_flag = 1;
3493 if ((asoc->sctp_cmt_on_off > 0) &&
3494 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3495 /*
3496 * CMT DAC algorithm: If
3497 * SACK flag is set to 0,
3498 * then lowest_newack test
3499 * will not pass because it
3500 * would have been set to
3501 * the cumack earlier. If
3502 * not already to be rtx'd,
3503 * If not a mixed sack and
3504 * if tp1 is not between two
3505 * sacked TSNs, then mark by
3506 * one more. NOTE that we
3507 * are marking by one
3508 * additional time since the
3509 * SACK DAC flag indicates
3510 * that two packets have
3511 * been received after this
3512 * missing TSN.
3513 */
3514 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3515 (num_dests_sacked == 1) &&
3516 SCTP_TSN_GT(this_sack_lowest_newack,
3517 tp1->rec.data.tsn)) {
3518 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3519 sctp_log_fr(32 + num_dests_sacked,
3520 tp1->rec.data.tsn,
3521 tp1->sent,
3523 }
3524 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3525 tp1->sent++;
3526 }
3527 }
3528 }
3529 }
3530 }
3531 /*
3532 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3533 * algo covers HTNA.
3534 */
3535 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3536 biggest_tsn_newly_acked)) {
3537 /*
3538 * We don't strike these: This is the HTNA
3539 * algorithm i.e. we don't strike If our TSN is
3540 * larger than the Highest TSN Newly Acked.
3541 */
3542 ;
3543 } else {
3544 /* Strike the TSN */
3545 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3546 sctp_log_fr(biggest_tsn_newly_acked,
3547 tp1->rec.data.tsn,
3548 tp1->sent,
3550 }
3551 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3552 tp1->sent++;
3553 }
3554 if ((asoc->sctp_cmt_on_off > 0) &&
3555 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3556 /*
3557 * CMT DAC algorithm: If SACK flag is set to
3558 * 0, then lowest_newack test will not pass
3559 * because it would have been set to the
3560 * cumack earlier. If not already to be
3561 * rtx'd, If not a mixed sack and if tp1 is
3562 * not between two sacked TSNs, then mark by
3563 * one more. NOTE that we are marking by one
3564 * additional time since the SACK DAC flag
3565 * indicates that two packets have been
3566 * received after this missing TSN.
3567 */
3568 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3569 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3570 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3571 sctp_log_fr(48 + num_dests_sacked,
3572 tp1->rec.data.tsn,
3573 tp1->sent,
3575 }
3576 tp1->sent++;
3577 }
3578 }
3579 }
3580 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3581 struct sctp_nets *alt;
3582
3583 /* fix counts and things */
3584 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3586 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3587 tp1->book_size,
3588 (uint32_t)(uintptr_t)tp1->whoTo,
3589 tp1->rec.data.tsn);
3590 }
3591 if (tp1->whoTo) {
3592 tp1->whoTo->net_ack++;
3596 tp1);
3597 }
3598 }
3599
3600 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3602 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3603 }
3604 /* add back to the rwnd */
3605 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3606
3607 /* remove from the total flight */
3608 sctp_total_flight_decrease(stcb, tp1);
3609
3610 if ((stcb->asoc.prsctp_supported) &&
3611 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3612 /*
3613 * Has it been retransmitted tv_sec times? -
3614 * we store the retran count there.
3615 */
3616 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3617 /* Yes, so drop it */
3618 if (tp1->data != NULL) {
3619 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3621 }
3622 /* Make sure to flag we had a FR */
3623 if (tp1->whoTo != NULL) {
3624 tp1->whoTo->net_ack++;
3625 }
3626 continue;
3627 }
3628 }
3629 /*
3630 * SCTP_PRINTF("OK, we are now ready to FR this
3631 * guy\n");
3632 */
3633 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3634 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3635 0, SCTP_FR_MARKED);
3636 }
3637 if (strike_flag) {
3638 /* This is a subsequent FR */
3639 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3640 }
3642 if (asoc->sctp_cmt_on_off > 0) {
3643 /*
3644 * CMT: Using RTX_SSTHRESH policy for CMT.
3645 * If CMT is being used, then pick dest with
3646 * largest ssthresh for any retransmission.
3647 */
3648 tp1->no_fr_allowed = 1;
3649 alt = tp1->whoTo;
3650 /* sa_ignore NO_NULL_CHK */
3651 if (asoc->sctp_cmt_pf > 0) {
3652 /*
3653 * JRS 5/18/07 - If CMT PF is on,
3654 * use the PF version of
3655 * find_alt_net()
3656 */
3657 alt = sctp_find_alternate_net(stcb, alt, 2);
3658 } else {
3659 /*
3660 * JRS 5/18/07 - If only CMT is on,
3661 * use the CMT version of
3662 * find_alt_net()
3663 */
3664 /* sa_ignore NO_NULL_CHK */
3665 alt = sctp_find_alternate_net(stcb, alt, 1);
3666 }
3667 if (alt == NULL) {
3668 alt = tp1->whoTo;
3669 }
3670 /*
3671 * CUCv2: If a different dest is picked for
3672 * the retransmission, then new
3673 * (rtx-)pseudo_cumack needs to be tracked
3674 * for orig dest. Let CUCv2 track new (rtx-)
3675 * pseudo-cumack always.
3676 */
3677 if (tp1->whoTo) {
3678 tp1->whoTo->find_pseudo_cumack = 1;
3679 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3680 }
3681 } else { /* CMT is OFF */
3682#ifdef SCTP_FR_TO_ALTERNATE
3683 /* Can we find an alternate? */
3684 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3685#else
3686 /*
3687 * default behavior is to NOT retransmit
3688 * FR's to an alternate. Armando Caro's
3689 * paper details why.
3690 */
3691 alt = tp1->whoTo;
3692#endif
3693 }
3694
3696 tot_retrans++;
3697 /* mark the sending seq for possible subsequent FR's */
3698 /*
3699 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3700 * (uint32_t)tpi->rec.data.tsn);
3701 */
3702 if (TAILQ_EMPTY(&asoc->send_queue)) {
3703 /*
3704 * If the queue of send is empty then its
3705 * the next sequence number that will be
3706 * assigned so we subtract one from this to
3707 * get the one we last sent.
3708 */
3709 tp1->rec.data.fast_retran_tsn = sending_seq;
3710 } else {
3711 /*
3712 * If there are chunks on the send queue
3713 * (unsent data that has made it from the
3714 * stream queues but not out the door, we
3715 * take the first one (which will have the
3716 * lowest TSN) and subtract one to get the
3717 * one we last sent.
3718 */
3719 struct sctp_tmit_chunk *ttt;
3720
3721 ttt = TAILQ_FIRST(&asoc->send_queue);
3722 tp1->rec.data.fast_retran_tsn =
3723 ttt->rec.data.tsn;
3724 }
3725
3726 if (tp1->do_rtt) {
3727 /*
3728 * this guy had a RTO calculation pending on
3729 * it, cancel it
3730 */
3731 if ((tp1->whoTo != NULL) &&
3732 (tp1->whoTo->rto_needed == 0)) {
3733 tp1->whoTo->rto_needed = 1;
3734 }
3735 tp1->do_rtt = 0;
3736 }
3737 if (alt != tp1->whoTo) {
3738 /* yes, there is an alternate. */
3740 /* sa_ignore FREED_MEMORY */
3741 tp1->whoTo = alt;
3742 atomic_add_int(&alt->ref_count, 1);
3743 }
3744 }
3745 }
3746}
3747
3748struct sctp_tmit_chunk *
3750 struct sctp_association *asoc)
3751{
3752 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3753 struct timeval now;
3754 int now_filled = 0;
3755
3756 if (asoc->prsctp_supported == 0) {
3757 return (NULL);
3758 }
3759 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3760 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3761 tp1->sent != SCTP_DATAGRAM_RESEND &&
3762 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3763 /* no chance to advance, out of here */
3764 break;
3765 }
3766 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3767 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3768 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3771 tp1->rec.data.tsn, 0, 0);
3772 }
3773 }
3774 if (!PR_SCTP_ENABLED(tp1->flags)) {
3775 /*
3776 * We can't fwd-tsn past any that are reliable aka
3777 * retransmitted until the asoc fails.
3778 */
3779 break;
3780 }
3781 if (!now_filled) {
3782 (void)SCTP_GETTIME_TIMEVAL(&now);
3783 now_filled = 1;
3784 }
3785 /*
3786 * now we got a chunk which is marked for another
3787 * retransmission to a PR-stream but has run out its chances
3788 * already maybe OR has been marked to skip now. Can we skip
3789 * it if its a resend?
3790 */
3791 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3792 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3793 /*
3794 * Now is this one marked for resend and its time is
3795 * now up?
3796 */
3797 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3798 /* Yes so drop it */
3799 if (tp1->data) {
3800 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3802 }
3803 } else {
3804 /*
3805 * No, we are done when hit one for resend
3806 * whos time as not expired.
3807 */
3808 break;
3809 }
3810 }
3811 /*
3812 * Ok now if this chunk is marked to drop it we can clean up
3813 * the chunk, advance our peer ack point and we can check
3814 * the next chunk.
3815 */
3816 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3817 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3818 /* advance PeerAckPoint goes forward */
3819 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3820 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3821 a_adv = tp1;
3822 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3823 /* No update but we do save the chk */
3824 a_adv = tp1;
3825 }
3826 } else {
3827 /*
3828 * If it is still in RESEND we can advance no
3829 * further
3830 */
3831 break;
3832 }
3833 }
3834 return (a_adv);
3835}
3836
3837static int
3839{
3840 struct sctp_tmit_chunk *chk;
3841 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3842 int ret;
3843#ifndef INVARIANTS
3844 int entry_flight, entry_cnt;
3845#endif
3846
3847 ret = 0;
3848#ifndef INVARIANTS
3849 entry_flight = asoc->total_flight;
3850 entry_cnt = asoc->total_flight_count;
3851#endif
3853 return (0);
3854
3855 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3856 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3857 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3858 chk->rec.data.tsn,
3859 chk->send_size,
3860 chk->snd_count);
3861 inflight++;
3862 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3863 resend++;
3864 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3865 inbetween++;
3866 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3867 above++;
3868 } else {
3869 acked++;
3870 }
3871 }
3872
3873 if ((inflight > 0) || (inbetween > 0)) {
3874#ifdef INVARIANTS
3875 panic("Flight size-express incorrect? \n");
3876#else
3877 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3878 entry_flight, entry_cnt);
3879
3880 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3881 inflight, inbetween, resend, above, acked);
3882 ret = 1;
3883#endif
3884 }
3885 return (ret);
3886}
3887
3888static void
3890 struct sctp_association *asoc,
3891 struct sctp_tmit_chunk *tp1)
3892{
3893 tp1->window_probe = 0;
3894 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3895 /* TSN's skipped we do NOT move back. */
3897 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3898 tp1->book_size,
3899 (uint32_t)(uintptr_t)tp1->whoTo,
3900 tp1->rec.data.tsn);
3901 return;
3902 }
3903 /* First setup this by shrinking flight */
3906 tp1);
3907 }
3909 sctp_total_flight_decrease(stcb, tp1);
3910 /* Now mark for resend */
3913
3914 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3916 tp1->whoTo->flight_size,
3917 tp1->book_size,
3918 (uint32_t)(uintptr_t)tp1->whoTo,
3919 tp1->rec.data.tsn);
3920 }
3921}
3922
3923void
3925 uint32_t rwnd, int *abort_now, int ecne_seen)
3926{
3927 struct sctp_nets *net;
3928 struct sctp_association *asoc;
3929 struct sctp_tmit_chunk *tp1, *tp2;
3930 uint32_t old_rwnd;
3931 int win_probe_recovery = 0;
3932 int win_probe_recovered = 0;
3933 int j, done_once = 0;
3934 int rto_ok = 1;
3935 uint32_t send_s;
3936
3937 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3939 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3940 }
3942#ifdef SCTP_ASOCLOG_OF_TSNS
3943 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3944 stcb->asoc.cumack_log_at++;
3945 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3946 stcb->asoc.cumack_log_at = 0;
3947 }
3948#endif
3949 asoc = &stcb->asoc;
3950 old_rwnd = asoc->peers_rwnd;
3951 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3952 /* old ack */
3953 return;
3954 } else if (asoc->last_acked_seq == cumack) {
3955 /* Window update sack */
3957 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3959 /* SWS sender side engages */
3960 asoc->peers_rwnd = 0;
3961 }
3962 if (asoc->peers_rwnd > old_rwnd) {
3963 goto again;
3964 }
3965 return;
3966 }
3967
3968 /* First setup for CC stuff */
3969 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3970 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3971 /* Drag along the window_tsn for cwr's */
3972 net->cwr_window_tsn = cumack;
3973 }
3974 net->prev_cwnd = net->cwnd;
3975 net->net_ack = 0;
3976 net->net_ack2 = 0;
3977
3978 /*
3979 * CMT: Reset CUC and Fast recovery algo variables before
3980 * SACK processing
3981 */
3982 net->new_pseudo_cumack = 0;
3983 net->will_exit_fast_recovery = 0;
3986 }
3987 }
3988 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3989 tp1 = TAILQ_LAST(&asoc->sent_queue,
3990 sctpchunk_listhead);
3991 send_s = tp1->rec.data.tsn + 1;
3992 } else {
3993 send_s = asoc->sending_seq;
3994 }
3995 if (SCTP_TSN_GE(cumack, send_s)) {
3996 struct mbuf *op_err;
3997 char msg[SCTP_DIAG_INFO_LEN];
3998
3999 *abort_now = 1;
4000 /* XXX */
4001 SCTP_SNPRINTF(msg, sizeof(msg),
4002 "Cum ack %8.8x greater or equal than TSN %8.8x",
4003 cumack, send_s);
4006 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4007 return;
4008 }
4009 asoc->this_sack_highest_gap = cumack;
4010 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4013 0,
4015 __LINE__);
4016 }
4017 stcb->asoc.overall_error_count = 0;
4018 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4019 /* process the new consecutive TSN first */
4020 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4021 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4022 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4023 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4024 }
4025 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4026 /*
4027 * If it is less than ACKED, it is
4028 * now no-longer in flight. Higher
4029 * values may occur during marking
4030 */
4031 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4032 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4034 tp1->whoTo->flight_size,
4035 tp1->book_size,
4036 (uint32_t)(uintptr_t)tp1->whoTo,
4037 tp1->rec.data.tsn);
4038 }
4042 tp1);
4043 }
4044 /* sa_ignore NO_NULL_CHK */
4045 sctp_total_flight_decrease(stcb, tp1);
4046 }
4047 tp1->whoTo->net_ack += tp1->send_size;
4048 if (tp1->snd_count < 2) {
4049 /*
4050 * True non-retransmitted
4051 * chunk
4052 */
4053 tp1->whoTo->net_ack2 +=
4054 tp1->send_size;
4055
4056 /* update RTO too? */
4057 if (tp1->do_rtt) {
4058 if (rto_ok &&
4059 sctp_calculate_rto(stcb,
4060 &stcb->asoc,
4061 tp1->whoTo,
4062 &tp1->sent_rcv_time,
4064 rto_ok = 0;
4065 }
4066 if (tp1->whoTo->rto_needed == 0) {
4067 tp1->whoTo->rto_needed = 1;
4068 }
4069 tp1->do_rtt = 0;
4070 }
4071 }
4072 /*
4073 * CMT: CUCv2 algorithm. From the
4074 * cumack'd TSNs, for each TSN being
4075 * acked for the first time, set the
4076 * following variables for the
4077 * corresp destination.
4078 * new_pseudo_cumack will trigger a
4079 * cwnd update.
4080 * find_(rtx_)pseudo_cumack will
4081 * trigger search for the next
4082 * expected (rtx-)pseudo-cumack.
4083 */
4084 tp1->whoTo->new_pseudo_cumack = 1;
4085 tp1->whoTo->find_pseudo_cumack = 1;
4086 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4087 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4088 /* sa_ignore NO_NULL_CHK */
4090 }
4091 }
4092 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4094 }
4095 if (tp1->rec.data.chunk_was_revoked) {
4096 /* deflate the cwnd */
4097 tp1->whoTo->cwnd -= tp1->book_size;
4098 tp1->rec.data.chunk_was_revoked = 0;
4099 }
4100 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4101 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4102 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4103#ifdef INVARIANTS
4104 } else {
4105 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4106#endif
4107 }
4108 }
4109 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4111 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4112 asoc->trigger_reset = 1;
4113 }
4114 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4115 if (tp1->data) {
4116 /* sa_ignore NO_NULL_CHK */
4117 sctp_free_bufspace(stcb, asoc, tp1, 1);
4118 sctp_m_freem(tp1->data);
4119 tp1->data = NULL;
4120 }
4121 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4123 cumack,
4124 tp1->rec.data.tsn,
4125 0,
4126 0,
4128 }
4129 asoc->sent_queue_cnt--;
4131 } else {
4132 break;
4133 }
4134 }
4135 }
4136 /* sa_ignore NO_NULL_CHK */
4137 if (stcb->sctp_socket) {
4138 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4139 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4140 /* sa_ignore NO_NULL_CHK */
4142 }
4144 } else {
4145 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4147 }
4148 }
4149
4150 /* JRS - Use the congestion control given in the CC module */
4151 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4152 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4153 if (net->net_ack2 > 0) {
4154 /*
4155 * Karn's rule applies to clearing error
4156 * count, this is optional.
4157 */
4158 net->error_count = 0;
4159 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4160 /* addr came good */
4163 0, (void *)net, SCTP_SO_NOT_LOCKED);
4164 }
4165 if (net == stcb->asoc.primary_destination) {
4166 if (stcb->asoc.alternate) {
4167 /*
4168 * release the alternate,
4169 * primary is good
4170 */
4172 stcb->asoc.alternate = NULL;
4173 }
4174 }
4175 if (net->dest_state & SCTP_ADDR_PF) {
4176 net->dest_state &= ~SCTP_ADDR_PF;
4178 stcb->sctp_ep, stcb, net,
4181 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4182 /* Done with this net */
4183 net->net_ack = 0;
4184 }
4185 /* restore any doubled timers */
4186 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4187 if (net->RTO < stcb->asoc.minrto) {
4188 net->RTO = stcb->asoc.minrto;
4189 }
4190 if (net->RTO > stcb->asoc.maxrto) {
4191 net->RTO = stcb->asoc.maxrto;
4192 }
4193 }
4194 }
4195 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4196 }
4197 asoc->last_acked_seq = cumack;
4198
4199 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4200 /* nothing left in-flight */
4201 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4202 net->flight_size = 0;
4203 net->partial_bytes_acked = 0;
4204 }
4205 asoc->total_flight = 0;
4206 asoc->total_flight_count = 0;
4207 }
4208
4209 /* RWND update */
4210 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4211 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4212 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4213 /* SWS sender side engages */
4214 asoc->peers_rwnd = 0;
4215 }
4216 if (asoc->peers_rwnd > old_rwnd) {
4217 win_probe_recovery = 1;
4218 }
4219 /* Now assure a timer where data is queued at */
4220again:
4221 j = 0;
4222 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4223 if (win_probe_recovery && (net->window_probe)) {
4224 win_probe_recovered = 1;
4225 /*
4226 * Find first chunk that was used with window probe
4227 * and clear the sent
4228 */
4229 /* sa_ignore FREED_MEMORY */
4230 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4231 if (tp1->window_probe) {
4232 /* move back to data send queue */
4233 sctp_window_probe_recovery(stcb, asoc, tp1);
4234 break;
4235 }
4236 }
4237 }
4238 if (net->flight_size) {
4239 j++;
4241 if (net->window_probe) {
4242 net->window_probe = 0;
4243 }
4244 } else {
4245 if (net->window_probe) {
4246 /*
4247 * In window probes we must assure a timer
4248 * is still running there
4249 */
4250 net->window_probe = 0;
4253 }
4254 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4256 stcb, net,
4258 }
4259 }
4260 }
4261 if ((j == 0) &&
4262 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4263 (asoc->sent_queue_retran_cnt == 0) &&
4264 (win_probe_recovered == 0) &&
4265 (done_once == 0)) {
4266 /*
4267 * huh, this should not happen unless all packets are
4268 * PR-SCTP and marked to skip of course.
4269 */
4270 if (sctp_fs_audit(asoc)) {
4271 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4272 net->flight_size = 0;
4273 }
4274 asoc->total_flight = 0;
4275 asoc->total_flight_count = 0;
4276 asoc->sent_queue_retran_cnt = 0;
4277 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4278 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4280 sctp_total_flight_increase(stcb, tp1);
4281 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4283 }
4284 }
4285 }
4286 done_once = 1;
4287 goto again;
4288 }
4289 /**********************************/
4290 /* Now what about shutdown issues */
4291 /**********************************/
4292 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4293 /* nothing left on sendqueue.. consider done */
4294 /* clean up */
4295 if ((asoc->stream_queue_cnt == 1) &&
4298 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4300 }
4301 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4303 (asoc->stream_queue_cnt == 1) &&
4305 struct mbuf *op_err;
4306
4307 *abort_now = 1;
4308 /* XXX */
4311 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4312 return;
4313 }
4314 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4315 (asoc->stream_queue_cnt == 0)) {
4316 struct sctp_nets *netp;
4317
4318 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4320 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4321 }
4324 if (asoc->alternate) {
4325 netp = asoc->alternate;
4326 } else {
4327 netp = asoc->primary_destination;
4328 }
4329 sctp_send_shutdown(stcb, netp);
4331 stcb->sctp_ep, stcb, netp);
4333 stcb->sctp_ep, stcb, NULL);
4334 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4335 (asoc->stream_queue_cnt == 0)) {
4336 struct sctp_nets *netp;
4337
4338 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4341 if (asoc->alternate) {
4342 netp = asoc->alternate;
4343 } else {
4344 netp = asoc->primary_destination;
4345 }
4346 sctp_send_shutdown_ack(stcb, netp);
4348 stcb->sctp_ep, stcb, netp);
4349 }
4350 }
4351 /*********************************************/
4352 /* Here we perform PR-SCTP procedures */
4353 /* (section 4.2) */
4354 /*********************************************/
4355 /* C1. update advancedPeerAckPoint */
4356 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4357 asoc->advanced_peer_ack_point = cumack;
4358 }
4359 /* PR-Sctp issues need to be addressed too */
4360 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4361 struct sctp_tmit_chunk *lchk;
4362 uint32_t old_adv_peer_ack_point;
4363
4364 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4366 /* C3. See if we need to send a Fwd-TSN */
4368 /*
4369 * ISSUE with ECN, see FWD-TSN processing.
4370 */
4371 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4372 send_forward_tsn(stcb, asoc);
4373 } else if (lchk) {
4374 /* try to FR fwd-tsn's that get lost too */
4375 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4376 send_forward_tsn(stcb, asoc);
4377 }
4378 }
4379 }
4380 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4381 if (lchk->whoTo != NULL) {
4382 break;
4383 }
4384 }
4385 if (lchk != NULL) {
4386 /* Assure a timer is up */
4388 stcb->sctp_ep, stcb, lchk->whoTo);
4389 }
4390 }
4391 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4393 rwnd,
4394 stcb->asoc.peers_rwnd,
4395 stcb->asoc.total_flight,
4397 }
4398}
4399
4400void
4401sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4402 struct sctp_tcb *stcb,
4403 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4404 int *abort_now, uint8_t flags,
4405 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4406{
4407 struct sctp_association *asoc;
4408 struct sctp_tmit_chunk *tp1, *tp2;
4409 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4410 uint16_t wake_him = 0;
4411 uint32_t send_s = 0;
4412 long j;
4413 int accum_moved = 0;
4414 int will_exit_fast_recovery = 0;
4415 uint32_t a_rwnd, old_rwnd;
4416 int win_probe_recovery = 0;
4417 int win_probe_recovered = 0;
4418 struct sctp_nets *net = NULL;
4419 int done_once;
4420 int rto_ok = 1;
4421 uint8_t reneged_all = 0;
4422 uint8_t cmt_dac_flag;
4423
4424 /*
4425 * we take any chance we can to service our queues since we cannot
4426 * get awoken when the socket is read from :<
4427 */
4428 /*
4429 * Now perform the actual SACK handling: 1) Verify that it is not an
4430 * old sack, if so discard. 2) If there is nothing left in the send
4431 * queue (cum-ack is equal to last acked) then you have a duplicate
4432 * too, update any rwnd change and verify no timers are running.
4433 * then return. 3) Process any new consequtive data i.e. cum-ack
4434 * moved process these first and note that it moved. 4) Process any
4435 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4436 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4437 * sync up flightsizes and things, stop all timers and also check
4438 * for shutdown_pending state. If so then go ahead and send off the
4439 * shutdown. If in shutdown recv, send off the shutdown-ack and
4440 * start that timer, Ret. 9) Strike any non-acked things and do FR
4441 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4442 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4443 * if in shutdown_recv state.
4444 */
4446 /* CMT DAC algo */
4447 this_sack_lowest_newack = 0;
4448 SCTP_STAT_INCR(sctps_slowpath_sack);
4449 last_tsn = cum_ack;
4450 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4451#ifdef SCTP_ASOCLOG_OF_TSNS
4452 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4453 stcb->asoc.cumack_log_at++;
4454 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4455 stcb->asoc.cumack_log_at = 0;
4456 }
4457#endif
4458 a_rwnd = rwnd;
4459
4460 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4462 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4463 }
4464
4465 old_rwnd = stcb->asoc.peers_rwnd;
4466 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4469 0,
4471 __LINE__);
4472 }
4473 stcb->asoc.overall_error_count = 0;
4474 asoc = &stcb->asoc;
4475 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4477 cum_ack,
4478 0,
4479 num_seg,
4480 num_dup,
4482 }
4483 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4484 uint16_t i;
4485 uint32_t *dupdata, dblock;
4486
4487 for (i = 0; i < num_dup; i++) {
4488 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4489 sizeof(uint32_t), (uint8_t *)&dblock);
4490 if (dupdata == NULL) {
4491 break;
4492 }
4493 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4494 }
4495 }
4496 /* reality check */
4497 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4498 tp1 = TAILQ_LAST(&asoc->sent_queue,
4499 sctpchunk_listhead);
4500 send_s = tp1->rec.data.tsn + 1;
4501 } else {
4502 tp1 = NULL;
4503 send_s = asoc->sending_seq;
4504 }
4505 if (SCTP_TSN_GE(cum_ack, send_s)) {
4506 struct mbuf *op_err;
4507 char msg[SCTP_DIAG_INFO_LEN];
4508
4509 /*
4510 * no way, we have not even sent this TSN out yet. Peer is
4511 * hopelessly messed up with us.
4512 */
4513 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4514 cum_ack, send_s);
4515 if (tp1) {
4516 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4517 tp1->rec.data.tsn, (void *)tp1);
4518 }
4519hopeless_peer:
4520 *abort_now = 1;
4521 /* XXX */
4522 SCTP_SNPRINTF(msg, sizeof(msg),
4523 "Cum ack %8.8x greater or equal than TSN %8.8x",
4524 cum_ack, send_s);
4527 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4528 return;
4529 }
4530 /**********************/
4531 /* 1) check the range */
4532 /**********************/
4533 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4534 /* acking something behind */
4535 return;
4536 }
4537
4538 /* update the Rwnd of the peer */
4539 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4540 TAILQ_EMPTY(&asoc->send_queue) &&
4541 (asoc->stream_queue_cnt == 0)) {
4542 /* nothing left on send/sent and strmq */
4543 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4545 asoc->peers_rwnd, 0, 0, a_rwnd);
4546 }
4547 asoc->peers_rwnd = a_rwnd;
4548 if (asoc->sent_queue_retran_cnt) {
4549 asoc->sent_queue_retran_cnt = 0;
4550 }
4551 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4552 /* SWS sender side engages */
4553 asoc->peers_rwnd = 0;
4554 }
4555 /* stop any timers */
4556 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4558 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4559 net->partial_bytes_acked = 0;
4560 net->flight_size = 0;
4561 }
4562 asoc->total_flight = 0;
4563 asoc->total_flight_count = 0;
4564 return;
4565 }
4566 /*
4567 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4568 * things. The total byte count acked is tracked in netAckSz AND
4569 * netAck2 is used to track the total bytes acked that are un-
4570 * amibguious and were never retransmitted. We track these on a per
4571 * destination address basis.
4572 */
4573 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4574 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4575 /* Drag along the window_tsn for cwr's */
4576 net->cwr_window_tsn = cum_ack;
4577 }
4578 net->prev_cwnd = net->cwnd;
4579 net->net_ack = 0;
4580 net->net_ack2 = 0;
4581
4582 /*
4583 * CMT: Reset CUC and Fast recovery algo variables before
4584 * SACK processing
4585 */
4586 net->new_pseudo_cumack = 0;
4587 net->will_exit_fast_recovery = 0;
4590 }
4591
4592 /*
4593 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4594 * to be greater than the cumack. Also reset saw_newack to 0
4595 * for all dests.
4596 */
4597 net->saw_newack = 0;
4598 net->this_sack_highest_newack = last_tsn;
4599 }
4600 /* process the new consecutive TSN first */
4601 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4602 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4603 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4604 accum_moved = 1;
4605 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4606 /*
4607 * If it is less than ACKED, it is
4608 * now no-longer in flight. Higher
4609 * values may occur during marking
4610 */
4611 if ((tp1->whoTo->dest_state &
4613 (tp1->snd_count < 2)) {
4614 /*
4615 * If there was no retran
4616 * and the address is
4617 * un-confirmed and we sent
4618 * there and are now
4619 * sacked.. its confirmed,
4620 * mark it so.
4621 */
4622 tp1->whoTo->dest_state &=
4623 ~SCTP_ADDR_UNCONFIRMED;
4624 }
4625 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4626 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4628 tp1->whoTo->flight_size,
4629 tp1->book_size,
4630 (uint32_t)(uintptr_t)tp1->whoTo,
4631 tp1->rec.data.tsn);
4632 }
4634 sctp_total_flight_decrease(stcb, tp1);
4637 tp1);
4638 }
4639 }
4640 tp1->whoTo->net_ack += tp1->send_size;
4641
4642 /* CMT SFR and DAC algos */
4643 this_sack_lowest_newack = tp1->rec.data.tsn;
4644 tp1->whoTo->saw_newack = 1;
4645
4646 if (tp1->snd_count < 2) {
4647 /*
4648 * True non-retransmitted
4649 * chunk
4650 */
4651 tp1->whoTo->net_ack2 +=
4652 tp1->send_size;
4653
4654 /* update RTO too? */
4655 if (tp1->do_rtt) {
4656 if (rto_ok &&
4657 sctp_calculate_rto(stcb,
4658 &stcb->asoc,
4659 tp1->whoTo,
4660 &tp1->sent_rcv_time,
4662 rto_ok = 0;
4663 }
4664 if (tp1->whoTo->rto_needed == 0) {
4665 tp1->whoTo->rto_needed = 1;
4666 }
4667 tp1->do_rtt = 0;
4668 }
4669 }
4670 /*
4671 * CMT: CUCv2 algorithm. From the
4672 * cumack'd TSNs, for each TSN being
4673 * acked for the first time, set the
4674 * following variables for the
4675 * corresp destination.
4676 * new_pseudo_cumack will trigger a
4677 * cwnd update.
4678 * find_(rtx_)pseudo_cumack will
4679 * trigger search for the next
4680 * expected (rtx-)pseudo-cumack.
4681 */
4682 tp1->whoTo->new_pseudo_cumack = 1;
4683 tp1->whoTo->find_pseudo_cumack = 1;
4684 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4685 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4687 cum_ack,
4688 tp1->rec.data.tsn,
4689 0,
4690 0,
4692 }
4693 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4695 }
4696 }
4697 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4699#ifdef SCTP_AUDITING_ENABLED
4700 sctp_audit_log(0xB3,
4701 (asoc->sent_queue_retran_cnt & 0x000000ff));
4702#endif
4703 }
4704 if (tp1->rec.data.chunk_was_revoked) {
4705 /* deflate the cwnd */
4706 tp1->whoTo->cwnd -= tp1->book_size;
4707 tp1->rec.data.chunk_was_revoked = 0;
4708 }
4709 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4711 }
4712 }
4713 } else {
4714 break;
4715 }
4716 }
4717 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4718 /* always set this up to cum-ack */
4719 asoc->this_sack_highest_gap = last_tsn;
4720
4721 if ((num_seg > 0) || (num_nr_seg > 0)) {
4722 /*
4723 * thisSackHighestGap will increase while handling NEW
4724 * segments this_sack_highest_newack will increase while
4725 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4726 * used for CMT DAC algo. saw_newack will also change.
4727 */
4728 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4729 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4730 num_seg, num_nr_seg, &rto_ok)) {
4731 wake_him++;
4732 }
4733 /*
4734 * validate the biggest_tsn_acked in the gap acks if strict
4735 * adherence is wanted.
4736 */
4737 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4738 /*
4739 * peer is either confused or we are under attack.
4740 * We must abort.
4741 */
4742 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4743 biggest_tsn_acked, send_s);
4744 goto hopeless_peer;
4745 }
4746 }
4747 /*******************************************/
4748 /* cancel ALL T3-send timer if accum moved */
4749 /*******************************************/
4750 if (asoc->sctp_cmt_on_off > 0) {
4751 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4752 if (net->new_pseudo_cumack)
4754 stcb, net,
4756 }
4757 } else {
4758 if (accum_moved) {
4759 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4761 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4762 }
4763 }
4764 }
4765 /********************************************/
4766 /* drop the acked chunks from the sentqueue */
4767 /********************************************/
4768 asoc->last_acked_seq = cum_ack;
4769
4770 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4771 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4772 break;
4773 }
4774 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4775 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4776 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4777#ifdef INVARIANTS
4778 } else {
4779 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4780#endif
4781 }
4782 }
4783 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4785 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4786 asoc->trigger_reset = 1;
4787 }
4788 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4789 if (PR_SCTP_ENABLED(tp1->flags)) {
4790 if (asoc->pr_sctp_cnt != 0)
4791 asoc->pr_sctp_cnt--;
4792 }
4793 asoc->sent_queue_cnt--;
4794 if (tp1->data) {
4795 /* sa_ignore NO_NULL_CHK */
4796 sctp_free_bufspace(stcb, asoc, tp1, 1);
4797 sctp_m_freem(tp1->data);
4798 tp1->data = NULL;
4799 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4801 }
4802 }
4803 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4805 cum_ack,
4806 tp1->rec.data.tsn,
4807 0,
4808 0,
4810 }
4812 wake_him++;
4813 }
4814 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4815#ifdef INVARIANTS
4816 panic("Warning flight size is positive and should be 0");
4817#else
4818 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4819 asoc->total_flight);
4820#endif
4821 asoc->total_flight = 0;
4822 }
4823
4824 /* sa_ignore NO_NULL_CHK */
4825 if ((wake_him) && (stcb->sctp_socket)) {
4826 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4827 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4828 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4829 }
4831 } else {
4832 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4833 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4834 }
4835 }
4836
4837 if (asoc->fast_retran_loss_recovery && accum_moved) {
4838 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4839 /* Setup so we will exit RFC2582 fast recovery */
4840 will_exit_fast_recovery = 1;
4841 }
4842 }
4843 /*
4844 * Check for revoked fragments:
4845 *
4846 * if Previous sack - Had no frags then we can't have any revoked if
4847 * Previous sack - Had frag's then - If we now have frags aka
4848 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4849 * some of them. else - The peer revoked all ACKED fragments, since
4850 * we had some before and now we have NONE.
4851 */
4852
4853 if (num_seg) {
4854 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4855 asoc->saw_sack_with_frags = 1;
4856 } else if (asoc->saw_sack_with_frags) {
4857 int cnt_revoked = 0;
4858
4859 /* Peer revoked all dg's marked or acked */
4860 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4861 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4862 tp1->sent = SCTP_DATAGRAM_SENT;
4863 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4865 tp1->whoTo->flight_size,
4866 tp1->book_size,
4867 (uint32_t)(uintptr_t)tp1->whoTo,
4868 tp1->rec.data.tsn);
4869 }
4871 sctp_total_flight_increase(stcb, tp1);
4872 tp1->rec.data.chunk_was_revoked = 1;
4873 /*
4874 * To ensure that this increase in
4875 * flightsize, which is artificial, does not
4876 * throttle the sender, we also increase the
4877 * cwnd artificially.
4878 */
4879 tp1->whoTo->cwnd += tp1->book_size;
4880 cnt_revoked++;
4881 }
4882 }
4883 if (cnt_revoked) {
4884 reneged_all = 1;
4885 }
4886 asoc->saw_sack_with_frags = 0;
4887 }
4888 if (num_nr_seg > 0)
4889 asoc->saw_sack_with_nr_frags = 1;
4890 else
4891 asoc->saw_sack_with_nr_frags = 0;
4892
4893 /* JRS - Use the congestion control given in the CC module */
4894 if (ecne_seen == 0) {
4895 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4896 if (net->net_ack2 > 0) {
4897 /*
4898 * Karn's rule applies to clearing error
4899 * count, this is optional.
4900 */
4901 net->error_count = 0;
4902 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4903 /* addr came good */
4906 0, (void *)net, SCTP_SO_NOT_LOCKED);
4907 }
4908
4909 if (net == stcb->asoc.primary_destination) {
4910 if (stcb->asoc.alternate) {
4911 /*
4912 * release the alternate,
4913 * primary is good
4914 */
4916 stcb->asoc.alternate = NULL;
4917 }
4918 }
4919
4920 if (net->dest_state & SCTP_ADDR_PF) {
4921 net->dest_state &= ~SCTP_ADDR_PF;
4923 stcb->sctp_ep, stcb, net,
4926 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4927 /* Done with this net */
4928 net->net_ack = 0;
4929 }
4930 /* restore any doubled timers */
4931 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4932 if (net->RTO < stcb->asoc.minrto) {
4933 net->RTO = stcb->asoc.minrto;
4934 }
4935 if (net->RTO > stcb->asoc.maxrto) {
4936 net->RTO = stcb->asoc.maxrto;
4937 }
4938 }
4939 }
4940 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4941 }
4942
4943 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4944 /* nothing left in-flight */
4945 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4946 /* stop all timers */
4948 stcb, net,
4950 net->flight_size = 0;
4951 net->partial_bytes_acked = 0;
4952 }
4953 asoc->total_flight = 0;
4954 asoc->total_flight_count = 0;
4955 }
4956
4957 /**********************************/
4958 /* Now what about shutdown issues */
4959 /**********************************/
4960 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4961 /* nothing left on sendqueue.. consider done */
4962 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4964 asoc->peers_rwnd, 0, 0, a_rwnd);
4965 }
4966 asoc->peers_rwnd = a_rwnd;
4967 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4968 /* SWS sender side engages */
4969 asoc->peers_rwnd = 0;
4970 }
4971 /* clean up */
4972 if ((asoc->stream_queue_cnt == 1) &&
4975 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4977 }
4978 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4980 (asoc->stream_queue_cnt == 1) &&
4982 struct mbuf *op_err;
4983
4984 *abort_now = 1;
4985 /* XXX */
4988 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4989 return;
4990 }
4991 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4992 (asoc->stream_queue_cnt == 0)) {
4993 struct sctp_nets *netp;
4994
4995 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4997 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4998 }
5001 if (asoc->alternate) {
5002 netp = asoc->alternate;
5003 } else {
5004 netp = asoc->primary_destination;
5005 }
5006 sctp_send_shutdown(stcb, netp);
5008 stcb->sctp_ep, stcb, netp);
5010 stcb->sctp_ep, stcb, NULL);
5011 return;
5012 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5013 (asoc->stream_queue_cnt == 0)) {
5014 struct sctp_nets *netp;
5015
5016 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5019 if (asoc->alternate) {
5020 netp = asoc->alternate;
5021 } else {
5022 netp = asoc->primary_destination;
5023 }
5024 sctp_send_shutdown_ack(stcb, netp);
5026 stcb->sctp_ep, stcb, netp);
5027 return;
5028 }
5029 }
5030 /*
5031 * Now here we are going to recycle net_ack for a different use...
5032 * HEADS UP.
5033 */
5034 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5035 net->net_ack = 0;
5036 }
5037
5038 /*
5039 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5040 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5041 * automatically ensure that.
5042 */
5043 if ((asoc->sctp_cmt_on_off > 0) &&
5044 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5045 (cmt_dac_flag == 0)) {
5046 this_sack_lowest_newack = cum_ack;
5047 }
5048 if ((num_seg > 0) || (num_nr_seg > 0)) {
5049 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5050 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5051 }
5052 /* JRS - Use the congestion control given in the CC module */
5053 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5054
5055 /* Now are we exiting loss recovery ? */
5057 /* Ok, we must exit fast recovery */
5058 asoc->fast_retran_loss_recovery = 0;
5059 }
5060 if ((asoc->sat_t3_loss_recovery) &&
5062 /* end satellite t3 loss recovery */
5063 asoc->sat_t3_loss_recovery = 0;
5064 }
5065 /*
5066 * CMT Fast recovery
5067 */
5068 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5069 if (net->will_exit_fast_recovery) {
5070 /* Ok, we must exit fast recovery */
5072 }
5073 }
5074
5075 /* Adjust and set the new rwnd value */
5076 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5078 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5079 }
5080 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5081 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5082 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5083 /* SWS sender side engages */
5084 asoc->peers_rwnd = 0;
5085 }
5086 if (asoc->peers_rwnd > old_rwnd) {
5087 win_probe_recovery = 1;
5088 }
5089
5090 /*
5091 * Now we must setup so we have a timer up for anyone with
5092 * outstanding data.
5093 */
5094 done_once = 0;
5095again:
5096 j = 0;
5097 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5098 if (win_probe_recovery && (net->window_probe)) {
5099 win_probe_recovered = 1;
5100 /*-
5101 * Find first chunk that was used with
5102 * window probe and clear the event. Put
5103 * it back into the send queue as if has
5104 * not been sent.
5105 */
5106 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5107 if (tp1->window_probe) {
5108 sctp_window_probe_recovery(stcb, asoc, tp1);
5109 break;
5110 }
5111 }
5112 }
5113 if (net->flight_size) {
5114 j++;
5117 stcb->sctp_ep, stcb, net);
5118 }
5119 if (net->window_probe) {
5120 net->window_probe = 0;
5121 }
5122 } else {
5123 if (net->window_probe) {
5124 /*
5125 * In window probes we must assure a timer
5126 * is still running there
5127 */
5130 stcb->sctp_ep, stcb, net);
5131 }
5132 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5134 stcb, net,
5136 }
5137 }
5138 }
5139 if ((j == 0) &&
5140 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5141 (asoc->sent_queue_retran_cnt == 0) &&
5142 (win_probe_recovered == 0) &&
5143 (done_once == 0)) {
5144 /*
5145 * huh, this should not happen unless all packets are
5146 * PR-SCTP and marked to skip of course.
5147 */
5148 if (sctp_fs_audit(asoc)) {
5149 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5150 net->flight_size = 0;
5151 }
5152 asoc->total_flight = 0;
5153 asoc->total_flight_count = 0;
5154 asoc->sent_queue_retran_cnt = 0;
5155 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5156 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5158 sctp_total_flight_increase(stcb, tp1);
5159 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5161 }
5162 }
5163 }
5164 done_once = 1;
5165 goto again;
5166 }
5167 /*********************************************/
5168 /* Here we perform PR-SCTP procedures */
5169 /* (section 4.2) */
5170 /*********************************************/
5171 /* C1. update advancedPeerAckPoint */
5172 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5173 asoc->advanced_peer_ack_point = cum_ack;
5174 }
5175 /* C2. try to further move advancedPeerAckPoint ahead */
5176 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5177 struct sctp_tmit_chunk *lchk;
5178 uint32_t old_adv_peer_ack_point;
5179
5180 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5182 /* C3. See if we need to send a Fwd-TSN */
5183 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5184 /*
5185 * ISSUE with ECN, see FWD-TSN processing.
5186 */
5187 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5189 0xee, cum_ack, asoc->advanced_peer_ack_point,
5190 old_adv_peer_ack_point);
5191 }
5192 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5193 send_forward_tsn(stcb, asoc);
5194 } else if (lchk) {
5195 /* try to FR fwd-tsn's that get lost too */
5196 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5197 send_forward_tsn(stcb, asoc);
5198 }
5199 }
5200 }
5201 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5202 if (lchk->whoTo != NULL) {
5203 break;
5204 }
5205 }
5206 if (lchk != NULL) {
5207 /* Assure a timer is up */
5209 stcb->sctp_ep, stcb, lchk->whoTo);
5210 }
5211 }
5212 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5214 a_rwnd,
5215 stcb->asoc.peers_rwnd,
5216 stcb->asoc.total_flight,
5218 }
5219}
5220
5221void
5222sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5223{
5224 /* Copy cum-ack */
5225 uint32_t cum_ack, a_rwnd;
5226
5227 cum_ack = ntohl(cp->cumulative_tsn_ack);
5228 /* Arrange so a_rwnd does NOT change */
5229 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5230
5231 /* Now call the express sack handling */
5232 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5233}
5234
5235static void
5237 struct sctp_stream_in *strmin)
5238{
5239 struct sctp_queued_to_read *control, *ncontrol;
5240 struct sctp_association *asoc;
5241 uint32_t mid;
5242 int need_reasm_check = 0;
5243
5244 asoc = &stcb->asoc;
5246 /*
5247 * First deliver anything prior to and including the stream no that
5248 * came in.
5249 */
5250 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5251 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5252 /* this is deliverable now */
5253 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5254 if (control->on_strm_q) {
5255 if (control->on_strm_q == SCTP_ON_ORDERED) {
5256 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5257 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5258 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5259#ifdef INVARIANTS
5260 } else {
5261 panic("strmin: %p ctl: %p unknown %d",
5262 strmin, control, control->on_strm_q);
5263#endif
5264 }
5265 control->on_strm_q = 0;
5266 }
5267 /* subtract pending on streams */
5268 if (asoc->size_on_all_streams >= control->length) {
5269 asoc->size_on_all_streams -= control->length;
5270 } else {
5271#ifdef INVARIANTS
5272 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5273#else
5274 asoc->size_on_all_streams = 0;
5275#endif
5276 }
5278 /* deliver it to at least the delivery-q */
5279 if (stcb->sctp_socket) {
5280 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5281 sctp_add_to_readq(stcb->sctp_ep, stcb,
5282 control,
5283 &stcb->sctp_socket->so_rcv,
5286 }
5287 } else {
5288 /* Its a fragmented message */
5289 if (control->first_frag_seen) {
5290 /*
5291 * Make it so this is next to
5292 * deliver, we restore later
5293 */
5294 strmin->last_mid_delivered = control->mid - 1;
5295 need_reasm_check = 1;
5296 break;
5297 }
5298 }
5299 } else {
5300 /* no more delivery now. */
5301 break;
5302 }
5303 }
5304 if (need_reasm_check) {
5305 int ret;
5306
5309 /* Restore the next to deliver unless we are ahead */
5311 }
5312 if (ret == 0) {
5313 /* Left the front Partial one on */
5314 return;
5315 }
5316 need_reasm_check = 0;
5317 }
5318 /*
5319 * now we must deliver things in queue the normal way if any are
5320 * now ready.
5321 */
5322 mid = strmin->last_mid_delivered + 1;
5323 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5324 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5325 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5326 /* this is deliverable now */
5327 if (control->on_strm_q) {
5328 if (control->on_strm_q == SCTP_ON_ORDERED) {
5329 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5330 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5331 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5332#ifdef INVARIANTS
5333 } else {
5334 panic("strmin: %p ctl: %p unknown %d",
5335 strmin, control, control->on_strm_q);
5336#endif
5337 }
5338 control->on_strm_q = 0;
5339 }
5340 /* subtract pending on streams */
5341 if (asoc->size_on_all_streams >= control->length) {
5342 asoc->size_on_all_streams -= control->length;
5343 } else {
5344#ifdef INVARIANTS
5345 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5346#else
5347 asoc->size_on_all_streams = 0;
5348#endif
5349 }
5351 /* deliver it to at least the delivery-q */
5352 strmin->last_mid_delivered = control->mid;
5353 if (stcb->sctp_socket) {
5354 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5355 sctp_add_to_readq(stcb->sctp_ep, stcb,
5356 control,
5357 &stcb->sctp_socket->so_rcv, 1,
5359 }
5360 mid = strmin->last_mid_delivered + 1;
5361 } else {
5362 /* Its a fragmented message */
5363 if (control->first_frag_seen) {
5364 /*
5365 * Make it so this is next to
5366 * deliver
5367 */
5368 strmin->last_mid_delivered = control->mid - 1;
5369 need_reasm_check = 1;
5370 break;
5371 }
5372 }
5373 } else {
5374 break;
5375 }
5376 }
5377 if (need_reasm_check) {
5379 }
5380}
5381
5382static void
5384 struct sctp_association *asoc, struct sctp_stream_in *strm,
5385 struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5386{
5387 struct sctp_tmit_chunk *chk, *nchk;
5388
5389 /*
5390 * For now large messages held on the stream reasm that are complete
5391 * will be tossed too. We could in theory do more work to spin
5392 * through and stop after dumping one msg aka seeing the start of a
5393 * new msg at the head, and call the delivery function... to see if
5394 * it can be delivered... But for now we just dump everything on the
5395 * queue.
5396 */
5397 if (!asoc->idata_supported && !ordered &&
5398 control->first_frag_seen &&
5399 SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5400 return;
5401 }
5402 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5403 /* Purge hanging chunks */
5404 if (!asoc->idata_supported && !ordered) {
5405 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5406 break;
5407 }
5408 }
5409 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5410 if (asoc->size_on_reasm_queue >= chk->send_size) {
5412 } else {
5413#ifdef INVARIANTS
5414 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5415#else
5417#endif
5418 }
5420 if (chk->data) {
5421 sctp_m_freem(chk->data);
5422 chk->data = NULL;
5423 }
5425 }
5426 if (!TAILQ_EMPTY(&control->reasm)) {
5427 /* This has to be old data, unordered */
5428 if (control->data) {
5429 sctp_m_freem(control->data);
5430 control->data = NULL;
5431 }
5432 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5433 chk = TAILQ_FIRST(&control->reasm);
5435 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5436 sctp_add_chk_to_control(control, strm, stcb, asoc,
5437 chk, SCTP_READ_LOCK_HELD);
5438 }
5440 return;
5441 }
5442 if (control->on_strm_q == SCTP_ON_ORDERED) {
5443 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5444 if (asoc->size_on_all_streams >= control->length) {
5445 asoc->size_on_all_streams -= control->length;
5446 } else {
5447#ifdef INVARIANTS
5448 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5449#else
5451#endif
5452 }
5454 control->on_strm_q = 0;
5455 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5456 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5457 control->on_strm_q = 0;
5458#ifdef INVARIANTS
5459 } else if (control->on_strm_q) {
5460 panic("strm: %p ctl: %p unknown %d",
5461 strm, control, control->on_strm_q);
5462#endif
5463 }
5464 control->on_strm_q = 0;
5465 if (control->on_read_q == 0) {
5467 if (control->data) {
5468 sctp_m_freem(control->data);
5469 control->data = NULL;
5470 }
5471 sctp_free_a_readq(stcb, control);
5472 }
5473}
5474
5475void
5477 struct sctp_forward_tsn_chunk *fwd,
5478 int *abort_flag, struct mbuf *m, int offset)
5479{
5480 /* The pr-sctp fwd tsn */
5481 /*
5482 * here we will perform all the data receiver side steps for
5483 * processing FwdTSN, as required in by pr-sctp draft:
5484 *
5485 * Assume we get FwdTSN(x):
5486 *
5487 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5488 * + others we have 3) examine and update re-ordering queue on
5489 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5490 * report where we are.
5491 */
5492 struct sctp_association *asoc;
5493 uint32_t new_cum_tsn, gap;
5494 unsigned int i, fwd_sz, m_size;
5495 uint32_t str_seq;
5496 struct sctp_stream_in *strm;
5497 struct sctp_queued_to_read *control, *ncontrol, *sv;
5498
5499 asoc = &stcb->asoc;
5500 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5502 "Bad size too small/big fwd-tsn\n");
5503 return;
5504 }
5505 m_size = (stcb->asoc.mapping_array_size << 3);
5506 /*************************************************************/
5507 /* 1. Here we update local cumTSN and shift the bitmap array */
5508 /*************************************************************/
5509 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5510
5511 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5512 /* Already got there ... */
5513 return;
5514 }
5515 /*
5516 * now we know the new TSN is more advanced, let's find the actual
5517 * gap
5518 */
5519 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5520 asoc->cumulative_tsn = new_cum_tsn;
5521 if (gap >= m_size) {
5522 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5523 struct mbuf *op_err;
5524 char msg[SCTP_DIAG_INFO_LEN];
5525
5526 /*
5527 * out of range (of single byte chunks in the rwnd I
5528 * give out). This must be an attacker.
5529 */
5530 *abort_flag = 1;
5531 SCTP_SNPRINTF(msg, sizeof(msg),
5532 "New cum ack %8.8x too high, highest TSN %8.8x",
5533 new_cum_tsn, asoc->highest_tsn_inside_map);
5536 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
5537 return;
5538 }
5539 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5540
5541 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5542 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5543 asoc->highest_tsn_inside_map = new_cum_tsn;
5544
5545 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5546 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5547
5548 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5550 }
5551 } else {
5553 for (i = 0; i <= gap; i++) {
5554 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5559 }
5560 }
5561 }
5562 }
5563 /*************************************************************/
5564 /* 2. Clear up re-assembly queue */
5565 /*************************************************************/
5566
5567 /* This is now done as part of clearing up the stream/seq */
5568 if (asoc->idata_supported == 0) {
5569 uint16_t sid;
5570
5571 /* Flush all the un-ordered data based on cum-tsn */
5573 for (sid = 0; sid < asoc->streamincnt; sid++) {
5574 strm = &asoc->strmin[sid];
5575 if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5576 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5577 }
5578 }
5580 }
5581 /*******************************************************/
5582 /* 3. Update the PR-stream re-ordering queues and fix */
5583 /* delivery issues as needed. */
5584 /*******************************************************/
5585 fwd_sz -= sizeof(*fwd);
5586 if (m && fwd_sz) {
5587 /* New method. */
5588 unsigned int num_str;
5589 uint32_t mid;
5590 uint16_t sid;
5591 uint16_t ordered, flags;
5592 struct sctp_strseq *stseq, strseqbuf;
5593 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5594
5595 offset += sizeof(*fwd);
5596
5598 if (asoc->idata_supported) {
5599 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5600 } else {
5601 num_str = fwd_sz / sizeof(struct sctp_strseq);
5602 }
5603 for (i = 0; i < num_str; i++) {
5604 if (asoc->idata_supported) {
5605 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5606 sizeof(struct sctp_strseq_mid),
5607 (uint8_t *)&strseqbuf_m);
5608 offset += sizeof(struct sctp_strseq_mid);
5609 if (stseq_m == NULL) {
5610 break;
5611 }
5612 sid = ntohs(stseq_m->sid);
5613 mid = ntohl(stseq_m->mid);
5614 flags = ntohs(stseq_m->flags);
5616 ordered = 0;
5617 } else {
5618 ordered = 1;
5619 }
5620 } else {
5621 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5622 sizeof(struct sctp_strseq),
5623 (uint8_t *)&strseqbuf);
5624 offset += sizeof(struct sctp_strseq);
5625 if (stseq == NULL) {
5626 break;
5627 }
5628 sid = ntohs(stseq->sid);
5629 mid = (uint32_t)ntohs(stseq->ssn);
5630 ordered = 1;
5631 }
5632 /* Convert */
5633
5634 /* now process */
5635
5636 /*
5637 * Ok we now look for the stream/seq on the read
5638 * queue where its not all delivered. If we find it
5639 * we transmute the read entry into a PDI_ABORTED.
5640 */
5641 if (sid >= asoc->streamincnt) {
5642 /* screwed up streams, stop! */
5643 break;
5644 }
5645 if ((asoc->str_of_pdapi == sid) &&
5646 (asoc->ssn_of_pdapi == mid)) {
5647 /*
5648 * If this is the one we were partially
5649 * delivering now then we no longer are.
5650 * Note this will change with the reassembly
5651 * re-write.
5652 */
5654 }
5655 strm = &asoc->strmin[sid];
5656 if (ordered) {
5657 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) {
5658 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5659 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5660 }
5661 }
5662 } else {
5663 if (asoc->idata_supported) {
5664 TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) {
5665 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5666 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5667 }
5668 }
5669 } else {
5670 if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5671 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5672 }
5673 }
5674 }
5675 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5676 if ((control->sinfo_stream == sid) &&
5677 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5678 str_seq = (sid << 16) | (0x0000ffff & mid);
5679 control->pdapi_aborted = 1;
5680 sv = stcb->asoc.control_pdapi;
5681 control->end_added = 1;
5682 if (control->on_strm_q == SCTP_ON_ORDERED) {
5683 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5684 if (asoc->size_on_all_streams >= control->length) {
5685 asoc->size_on_all_streams -= control->length;
5686 } else {
5687#ifdef INVARIANTS
5688 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5689#else
5690 asoc->size_on_all_streams = 0;
5691#endif
5692 }
5694 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5695 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5696#ifdef INVARIANTS
5697 } else if (control->on_strm_q) {
5698 panic("strm: %p ctl: %p unknown %d",
5699 strm, control, control->on_strm_q);
5700#endif
5701 }
5702 control->on_strm_q = 0;
5703 stcb->asoc.control_pdapi = control;
5705 stcb,
5707 (void *)&str_seq,
5709 stcb->asoc.control_pdapi = sv;
5710 break;
5711 } else if ((control->sinfo_stream == sid) &&
5712 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5713 /* We are past our victim SSN */
5714 break;
5715 }
5716 }
5717 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5718 /* Update the sequence number */
5719 strm->last_mid_delivered = mid;
5720 }
5721 /* now kick the stream the new way */
5722 /* sa_ignore NO_NULL_CHK */
5724 }
5726 }
5727 /*
5728 * Now slide thing forward.
5729 */
5731}
__uint32_t uint32_t
Definition: in.h:62
__uint16_t uint16_t
Definition: in.h:57
__uint8_t uint8_t
Definition: in.h:52
ipfw_dyn_rule * next
Definition: ip_fw.h:0
#define SCTP_COOKIE_ECHO
Definition: sctp.h:441
#define SCTP_DATA_FIRST_FRAG
Definition: sctp.h:488
#define SCTP_ASCONF_ACK
Definition: sctp.h:454
#define SCTP_INITIATION_ACK
Definition: sctp.h:433
#define SCTP_ECN_CWR
Definition: sctp.h:444
#define SCTP_FLIGHT_LOGGING_ENABLE
Definition: sctp.h:608
#define SCTP_HEARTBEAT_ACK
Definition: sctp.h:436
#define SCTP_ASCONF
Definition: sctp.h:466
#define SCTP_SACK_CMT_DAC
Definition: sctp.h:496
#define SCTP_CAUSE_UNRECOG_CHUNK
Definition: sctp.h:348
#define SCTP_PCB_FLAGS_SOCKET_ALLGONE
Definition: sctp.h:522
#define SCTP_FORWARD_CUM_TSN
Definition: sctp.h:464
#define SCTP_LOG_SACK_ARRIVALS_ENABLE
Definition: sctp.h:624
#define SCTP_OPERATION_ERROR
Definition: sctp.h:440
#define SCTP_DATA_UNORDERED
Definition: sctp.h:490
#define SCTP_COOKIE_ACK
Definition: sctp.h:442
#define SCTP_PCB_FLAGS_EXT_RCVINFO
Definition: sctp.h:534
#define SCTP_CAUSE_INVALID_STREAM
Definition: sctp.h:343
#define SCTP_PACKET_DROPPED
Definition: sctp.h:456
#define SCTP_LOG_TRY_ADVANCE
Definition: sctp.h:631
#define SCTP_PCB_FLAGS_RECVNXTINFO
Definition: sctp.h:560
#define SCTP_SELECTIVE_ACK
Definition: sctp.h:434
#define SCTP_THRESHOLD_LOGGING
Definition: sctp.h:628
#define SCTP_MAP_LOGGING_ENABLE
Definition: sctp.h:611
#define SCTP_PCB_FLAGS_SOCKET_GONE
Definition: sctp.h:521
#define SCTP_STR_LOGGING_ENABLE
Definition: sctp.h:620
#define SCTP_DATA_NOT_FRAG
Definition: sctp.h:489
#define SCTP_CAUSE_PROTOCOL_VIOLATION
Definition: sctp.h:355
#define SCTP_PCB_FLAGS_RECVDATAIOEVNT
Definition: sctp.h:543
#define SCTP_STREAM_RESET
Definition: sctp.h:458
#define SCTP_DATA
Definition: sctp.h:431
#define SCTP_DATA_LAST_FRAG
Definition: sctp.h:487
#define SCTP_LOG_RWND_ENABLE
Definition: sctp.h:623
#define SCTP_AUTHENTICATION
Definition: sctp.h:447
#define SCTP_IDATA
Definition: sctp.h:451
#define SCTP_DATA_SACK_IMMEDIATELY
Definition: sctp.h:491
#define SCTP_SHUTDOWN_ACK
Definition: sctp.h:439
#define SCTP_SHUTDOWN
Definition: sctp.h:438
#define SCTP_SACK_LOGGING_ENABLE
Definition: sctp.h:617
#define SCTP_CAUSE_OUT_OF_RESC
Definition: sctp.h:346
#define SCTP_SHUTDOWN_COMPLETE
Definition: sctp.h:445
#define SCTP_FR_LOGGING_ENABLE
Definition: sctp.h:609
#define SCTP_CAUSE_USER_INITIATED_ABT
Definition: sctp.h:354
#define SCTP_MBUF_LOGGING_ENABLE
Definition: sctp.h:613
#define SCTP_CWND_LOGGING_ENABLE
Definition: sctp.h:607
#define SCTP_SACK_RWND_LOGGING_ENABLE
Definition: sctp.h:618
#define SCTP_ECN_ECHO
Definition: sctp.h:443
#define SCTP_ABORT_ASSOCIATION
Definition: sctp.h:437
#define SCTP_PCB_FLAGS_RECVRCVINFO
Definition: sctp.h:559
#define SCTP_HEARTBEAT_REQUEST
Definition: sctp.h:435
#define SCTP_NR_SELECTIVE_ACK
Definition: sctp.h:449
#define SCTP_WAKE_LOGGING_ENABLE
Definition: sctp.h:621
#define SCTP_INITIATION
Definition: sctp.h:432
struct mbuf * sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, int how, int allonebuf, int type)
#define SCTP_DIAG_INFO_LEN
#define SCTP_LOC_37
#define SCTP_TIMER_TYPE_SHUTDOWNACK
#define SCTP_LOC_10
#define SCTP_LOC_1
#define SCTP_DATAGRAM_NR_ACKED
#define SCTP_LOC_17
#define SCTP_SIZE32(x)
#define SCTP_LOC_28
#define SCTP_STR_LOG_FROM_INTO_STRD
#define SCTP_FLIGHT_LOG_UP_REVOKE
#define SCTP_INCREASE_PEER_RWND
#define SCTP_ADD_SUBSTATE(_stcb, _substate)
#define SCTP_FLIGHT_LOG_DWN_WP_FWD
#define SCTP_LOC_19
#define SCTP_TSN_GT(a, b)
#define SCTP_MAP_TSN_ENTERS
#define SCTP_LOC_12
#define SCTP_LOC_8
#define SCTP_STR_LOG_FROM_INSERT_TL
#define SCTP_LOC_16
#define SCTP_DATAGRAM_RESEND
#define SCTP_FWD_TSN_CHECK
#define SCTP_SO_NOT_LOCKED
#define SCTP_MAP_PREPARE_SLIDE
#define SCTP_TIMER_TYPE_HEARTBEAT
#define SCTP_MID_GT(i, a, b)
#define SCTP_SACK_LOG_NORMAL
#define SCTP_DEBUG_XXX
#define SCTP_MAP_SLIDE_RESULT
#define SCTP_LOC_14
#define SCTP_SET_PEER_RWND_VIA_SACK
#define SCTP_NOTIFY_INTERFACE_UP
#define SCTP_LOC_31
#define SCTP_LOC_25
#define SCTP_CWND_LOG_FROM_SACK
#define SCTP_ADDR_PF
#define sctp_sorwakeup(inp, so)
#define SCTP_LOC_35
#define SCTP_CALC_TSN_TO_GAP(gap, tsn, mapping_tsn)
#define SCTP_WAKESND_FROM_SACK
#define SCTP_MAX_DUP_TSNS
#define SCTP_FR_LOG_CHECK_STRIKE
#define SCTP_LOC_26
#define SCTP_STATE_SHUTDOWN_PENDING
#define SCTP_LOC_18
#define SCTP_LOC_13
#define SCTP_ADDR_UNCONFIRMED
#define SCTP_LOC_15
#define SCTP_TIMER_TYPE_RECV
#define SCTP_LOC_6
#define SCTP_LOG_FREE_SENT
#define SCTP_LOG_TSN_ACKED
#define SCTP_TSN_GE(a, b)
#define SCTP_LOC_5
#define SCTP_MID_GE(i, a, b)
#define SCTP_LOC_20
#define SCTP_LOC_4
#define SCTP_STATE_SHUTDOWN_ACK_SENT
#define SCTP_FROM_SCTP_INDATA
#define PR_SCTP_UNORDERED_FLAG
#define SCTP_LOC_34
#define SCTP_TIMER_TYPE_SHUTDOWNGUARD
#define SCTP_LOC_23
#define SCTP_GET_STATE(_stcb)
#define SCTP_LOC_11
#define SCTP_LOC_27
#define SCTP_THRESHOLD_CLEAR
#define SCTP_GETTIME_TIMEVAL(x)
#define SCTP_LOC_2
#define SCTP_LOC_3
#define SCTP_STR_LOG_FROM_IMMED_DEL
#define SCTP_DATAGRAM_UNSENT
#define SCTP_STATE_CLOSED_SOCKET
#define SCTP_RTT_FROM_DATA
#define SCTP_LOC_33
#define SCTP_SACK_LOG_EXPRESS
#define SCTP_LOG_TSN_REVOKED
#define SCTP_FLIGHT_LOG_DOWN_RSND
#define SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION
#define SCTP_FLIGHT_LOG_DOWN_GAP
#define SCTP_MBUF_ICOPY
#define SCTP_DATAGRAM_MARKED
#define SCTP_LOC_21
#define SCTP_DATAGRAM_SENT
#define SCTP_FR_LOG_BIGGEST_TSNS
#define SCTP_LOC_7
#define SCTP_PARTIAL_DELIVERY_SHIFT
#define SCTP_TIMER_TYPE_SHUTDOWN
#define SCTP_LOC_30
#define SCTP_FR_MARKED
#define SCTP_STATE_SHUTDOWN_SENT
#define SCTP_LOC_32
#define SCTP_STATE_SHUTDOWN_RECEIVED
#define SCTP_LOC_22
#define SCTP_SET_TSN_PRESENT(arry, gap)
#define SCTP_LOC_9
#define SCTP_STATE_ABOUT_TO_BE_FREED
#define SCTP_LOC_24
#define SCTP_ADDR_REACHABLE
#define SCTP_UNSET_TSN_PRESENT(arry, gap)
#define SCTP_STATE_OPEN
#define SCTP_FLIGHT_LOG_DOWN_CA
#define SCTP_NOWAKE_FROM_SACK
#define SCTP_SET_STATE(_stcb, _state)
#define SCTP_FR_DUPED
#define SCTP_FR_LOG_STRIKE_CHUNK
#define SCTP_STR_LOG_FROM_EXPRS_DEL
#define SCTP_SO_LOCKED
#define SCTP_LOG_NEW_SACK
#define SCTP_MAP_SLIDE_NONE
#define SCTP_LOC_36
#define SCTP_FORWARD_TSN_SKIP
#define SCTP_LOC_29
#define SCTP_MAP_SLIDE_FROM
#define SCTP_MID_EQ(i, a, b)
#define SCTP_DATAGRAM_ACKED
#define SCTP_FLIGHT_LOG_DOWN_WP
#define SCTP_TIMER_TYPE_SEND
#define SCTP_SACK_RWND_UPDATE
#define sctp_sowwakeup_locked(inp, so)
#define SCTP_DEBUG_INDATA1
#define SCTP_STR_LOG_FROM_MARK_TSN
#define SCTP_MAPPING_ARRAY
#define SCTP_MINIMAL_RWND
#define SCTP_IS_TSN_PRESENT(arry, gap)
#define SCTP_STATE_PARTIAL_MSG_LEFT
#define IPPROTO_SCTP
#define SCTP_STREAM_RESET_RESULT_PERFORMED
Definition: sctp_header.h:464
void sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
Definition: sctp_indata.c:5222
static struct sctp_queued_to_read * sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
Definition: sctp_indata.c:1665
static int sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, uint16_t frag_strt, uint16_t frag_end, int nr_sacking, int *num_frs, uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, int *rto_ok)
Definition: sctp_indata.c:2942
static void sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
Definition: sctp_indata.c:633
static int sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm, int inp_read_lock_held)
Definition: sctp_indata.c:1051
void sctp_handle_forward_tsn(struct sctp_tcb *stcb, struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset)
Definition: sctp_indata.c:5476
static int sctp_handle_old_unordered_data(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm, struct sctp_queued_to_read *control, uint32_t pd_point, int inp_read_lock_held)
Definition: sctp_indata.c:768
struct sctp_tmit_chunk * sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, struct sctp_association *asoc)
Definition: sctp_indata.c:3749
uint32_t sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
Definition: sctp_indata.c:79
static void sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_queued_to_read *control, struct sctp_tmit_chunk *chk, int created_control, int *abort_flag, uint32_t tsn)
Definition: sctp_indata.c:1365
static void sctp_window_probe_recovery(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_tmit_chunk *tp1)
Definition: sctp_indata.c:3889
static void sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm, struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
Definition: sctp_indata.c:5383
static void sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
Definition: sctp_indata.c:730
void sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
Definition: sctp_indata.c:72
static const int8_t sctp_map_lookup_tab[256]
Definition: sctp_indata.c:2359
static void sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
Definition: sctp_indata.c:481
static void sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, struct sctp_stream_in *strmin)
Definition: sctp_indata.c:5236
int sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t *high_tsn)
Definition: sctp_indata.c:2649
static void sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
Definition: sctp_indata.c:452
static void sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
Definition: sctp_indata.c:286
static int sctp_fs_audit(struct sctp_association *asoc)
Definition: sctp_indata.c:3838
struct mbuf * sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
Definition: sctp_indata.c:172
static void sctp_inject_old_unordered_data(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_queued_to_read *control, struct sctp_tmit_chunk *chk, int *abort_flag)
Definition: sctp_indata.c:913
void sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
Definition: sctp_indata.c:2560
void sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, struct sctp_tcb *stcb, uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, int *abort_now, uint8_t flags, uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
Definition: sctp_indata.c:4401
void sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, uint32_t rwnd, int *abort_now, int ecne_seen)
Definition: sctp_indata.c:3924
__FBSDID("$FreeBSD$")
static void sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
Definition: sctp_indata.c:3302
static void sctp_abort_in_reasm(struct sctp_tcb *stcb, struct sctp_queued_to_read *control, struct sctp_tmit_chunk *chk, int *abort_flag, int opspot)
Definition: sctp_indata.c:416
static uint32_t sctp_add_chk_to_control(struct sctp_queued_to_read *control, struct sctp_stream_in *strm, struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_tmit_chunk *chk, int hold_rlock)
Definition: sctp_indata.c:1285
static int sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, struct mbuf **m, int offset, int chk_length, struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, int *break_flag, int last_chunk, uint8_t chk_type)
Definition: sctp_indata.c:1690
static int sctp_place_control_in_stream(struct sctp_stream_in *strm, struct sctp_association *asoc, struct sctp_queued_to_read *control)
Definition: sctp_indata.c:330
static int sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, uint32_t last_tsn, uint32_t *biggest_tsn_acked, uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, int num_seg, int num_nr_seg, int *rto_ok)
Definition: sctp_indata.c:3180
static void sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
Definition: sctp_indata.c:675
void sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
Definition: sctp_indata.c:2395
struct sctp_queued_to_read * sctp_build_readq_entry(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t tsn, uint32_t ppid, uint32_t context, uint16_t sid, uint32_t mid, uint8_t flags, struct mbuf *dm)
Definition: sctp_indata.c:135
static void sctp_reset_a_control(struct sctp_queued_to_read *control, struct sctp_inpcb *inp, uint32_t tsn)
Definition: sctp_indata.c:753
static void sctp_check_for_revoked(struct sctp_tcb *stcb, struct sctp_association *asoc, uint32_t cumack, uint32_t biggest_tsn_acked)
Definition: sctp_indata.c:3244
#define sctp_build_readq_entry_mac(_ctl, in_it, context, net, tsn, ppid, sid, flags, dm, tfsn, mid)
Definition: sctp_indata.h:51
void sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
Definition: sctp_input.c:3290
#define SCTP_INP_READ_UNLOCK(_inp)
#define SCTP_TCB_LOCK_ASSERT(_tcb)
#define SCTP_INP_READ_LOCK(_inp)
#define SCTP_LTRACE_CHK(a, b, c, d)
Definition: sctp_os_bsd.h:177
#define SCTP_SNPRINTF(...)
Definition: sctp_os_bsd.h:303
#define SCTP_OS_TIMER_PENDING
Definition: sctp_os_bsd.h:278
#define SCTP_BUF_RESV_UF(m, size)
Definition: sctp_os_bsd.h:293
#define SCTP_PRINTF(params...)
Definition: sctp_os_bsd.h:151
#define SCTP_FREE(var, type)
Definition: sctp_os_bsd.h:224
#define SCTP_BUF_LEN(m)
Definition: sctp_os_bsd.h:290
#define SCTP_M_COPYM
Definition: sctp_os_bsd.h:261
#define SCTPDBG(level, params...)
Definition: sctp_os_bsd.h:170
#define SCTP_BASE_SYSCTL(__m)
Definition: sctp_os_bsd.h:148
#define SCTP_BUF_NEXT(m)
Definition: sctp_os_bsd.h:291
#define SCTP_SB_LIMIT_RCV(so)
Definition: sctp_os_bsd.h:390
void sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
Definition: sctp_output.c:9163
void send_forward_tsn(struct sctp_tcb *stcb, struct sctp_association *asoc)
void sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
Definition: sctp_output.c:8909
void sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net, struct mbuf *m, int len, int iphlen, int bad_crc)
void sctp_send_deferred_reset_response(struct sctp_tcb *stcb, struct sctp_stream_reset_list *ent, int response)
void sctp_send_sack(struct sctp_tcb *stcb, int so_locked)
void sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
Definition: sctp_output.c:9206
#define SCTP_ON_UNORDERED
Definition: sctp_structs.h:489
#define SCTP_STREAM_RESET_PENDING
Definition: sctp_structs.h:606
#define SCTP_ON_ORDERED
Definition: sctp_structs.h:488
#define SCTP_TSN_LOG_SIZE
Definition: sctp_structs.h:654
struct sctp_nets * sctp_find_alternate_net(struct sctp_tcb *stcb, struct sctp_nets *net, int mode)
Definition: sctp_timer.c:171
#define SCTP_RTT_SHIFT
Definition: sctp_timer.h:43
#define PR_SCTP_ENABLED(x)
Definition: sctp_uio.h:268
#define SCTP_UNORDERED
Definition: sctp_uio.h:246
#define SCTP_NEXT_MSG_IS_NOTIFICATION
Definition: sctp_uio.h:203
#define PR_SCTP_RTX_ENABLED(x)
Definition: sctp_uio.h:272
#define SCTP_NEXT_MSG_AVAIL
Definition: sctp_uio.h:200
#define SCTP_NOTIFICATION
Definition: sctp_uio.h:242
#define SCTP_NEXT_MSG_ISCOMPLETE
Definition: sctp_uio.h:201
#define SCTP_PARTIAL_DELIVERY_ABORTED
Definition: sctp_uio.h:437
#define PR_SCTP_BUF_ENABLED(x)
Definition: sctp_uio.h:271
#define SCTP_RCVINFO
Definition: sctp_uio.h:83
#define SCTP_STAT_INCR_COUNTER64(_x)
Definition: sctp_uio.h:1136
#define SCTP_NXTINFO
Definition: sctp_uio.h:84
#define SCTP_EXTRCV
Definition: sctp_uio.h:81
#define PR_SCTP_TTL_ENABLED(x)
Definition: sctp_uio.h:270
#define SCTP_COMPLETE
Definition: sctp_uio.h:243
#define SCTP_SNDRCV
Definition: sctp_uio.h:80
#define SCTP_STAT_INCR(_x)
Definition: sctp_uio.h:1125
#define SCTP_STAT_DECR_GAUGE32(_x)
Definition: sctp_uio.h:1140
#define SCTP_NEXT_MSG_IS_UNORDERED
Definition: sctp_uio.h:202
#define sctp_free_a_chunk(_stcb, _chk, _so_locked)
Definition: sctp_var.h:140
#define sctp_flight_size_decrease(tp1)
Definition: sctp_var.h:247
#define sctp_total_flight_decrease(stcb, tp1)
Definition: sctp_var.h:296
#define sctp_is_feature_on(inp, feature)
Definition: sctp_var.h:49
#define sctp_alloc_a_readq(_stcb, _readq)
Definition: sctp_var.h:115
#define sctp_is_feature_off(inp, feature)
Definition: sctp_var.h:50
#define sctp_total_flight_increase(stcb, tp1)
Definition: sctp_var.h:308
#define sctp_free_a_readq(_stcb, _readq)
Definition: sctp_var.h:109
#define sctp_free_remote_addr(__net)
Definition: sctp_var.h:184
#define sctp_sbspace(asoc, sb)
Definition: sctp_var.h:86
#define sctp_ucount_incr(val)
Definition: sctp_var.h:224
#define sctp_ucount_decr(val)
Definition: sctp_var.h:228
#define sctp_alloc_a_chunk(_stcb, _chk)
Definition: sctp_var.h:166
#define sctp_sballoc(stcb, sb, m)
Definition: sctp_var.h:212
#define sctp_sbspace_sub(a, b)
Definition: sctp_var.h:90
#define sctp_flight_size_increase(tp1)
Definition: sctp_var.h:254
void sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
Definition: sctputil.c:489
struct mbuf * sctp_generate_no_user_data_cause(uint32_t tsn)
Definition: sctputil.c:4978
void sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t from)
Definition: sctputil.c:2615
void sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net)
Definition: sctputil.c:2157
int sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
Definition: sctputil.c:1417
void sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
Definition: sctputil.c:427
struct mbuf * sctp_generate_cause(uint16_t code, char *info)
Definition: sctputil.c:4951
int sctp_calculate_rto(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_nets *net, struct timeval *old, int rtt_from_sack)
Definition: sctputil.c:2916
void sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
Definition: sctputil.c:500
void sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
Definition: sctputil.c:184
void sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, uint32_t error, void *data, int so_locked)
Definition: sctputil.c:4042
void sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct mbuf *op_err, bool timedout, int so_locked)
Definition: sctputil.c:4465
void sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
Definition: sctputil.c:205
void sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
Definition: sctputil.c:284
void sctp_print_mapping_array(struct sctp_association *asoc)
Definition: sctputil.c:1382
void sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
Definition: sctputil.c:447
void sctp_add_to_readq(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_queued_to_read *control, struct sockbuf *sb, int end, int inp_read_lock_held, int so_locked)
Definition: sctputil.c:4845
void sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
Definition: sctputil.c:315
void sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
Definition: sctputil.c:225
void sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
Definition: sctputil.c:141
void sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked SCTP_UNUSED)
Definition: sctputil.c:4833
caddr_t sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
Definition: sctputil.c:3033
int sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, uint8_t sent, int so_locked)
Definition: sctputil.c:5031
void sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
Definition: sctputil.c:853
#define sctp_get_associd(stcb)
Definition: sctputil.h:63
#define sctp_free_bufspace(stcb, asoc, tp1, chk_cnt)
Definition: sctputil.h:248
#define sctp_m_freem
Definition: sctputil.h:55
#define sctp_m_free
Definition: sctputil.h:54
#define SCTP_READ_LOCK_NOT_HELD
Definition: sctputil.h:44
#define SCTP_READ_LOCK_HELD
Definition: sctputil.h:43
uint32_t sending_seq
Definition: sctp_structs.h:930
unsigned int numnets
uint16_t mapping_array_size
uint32_t total_output_queue_size
unsigned int sent_queue_retran_cnt
unsigned int cnt_msg_on_sb
unsigned int sent_queue_cnt_removeable
uint8_t fragmented_delivery_inprogress
uint32_t highest_tsn_inside_map
Definition: sctp_structs.h:953
struct sctp_nets * alternate
Definition: sctp_structs.h:861
struct sctpchunk_listhead send_queue
Definition: sctp_structs.h:833
uint16_t streamincnt
uint16_t ssn_of_pdapi
struct sctp_stream_out * strmout
Definition: sctp_structs.h:857
unsigned int total_flight_count
uint32_t sat_t3_recovery_tsn
Definition: sctp_structs.h:960
struct sctpnetlisthead nets
Definition: sctp_structs.h:814
struct sctp_readhead pending_reply_queue
Definition: sctp_structs.h:876
uint8_t fast_retran_loss_recovery
uint32_t pr_sctp_cnt
struct timeval time_last_rcvd
Definition: sctp_structs.h:793
uint8_t idata_supported
struct sctp_timer dack_timer
Definition: sctp_structs.h:799
unsigned int cnt_on_all_streams
uint32_t cumulative_tsn
Definition: sctp_structs.h:943
uint32_t mapping_array_base_tsn
Definition: sctp_structs.h:948
uint8_t cmt_dac_pkts_rcvd
unsigned int total_flight
unsigned int stream_queue_cnt
uint8_t sctp_cmt_on_off
uint8_t sat_t3_loss_recovery
struct sctp_queued_to_read * control_pdapi
Definition: sctp_structs.h:969
uint8_t saw_sack_with_nr_frags
uint8_t saw_sack_with_frags
unsigned int delayed_ack
uint32_t fast_recovery_tsn
Definition: sctp_structs.h:959
struct sctp_resethead resetHead
Definition: sctp_structs.h:873
struct sctp_ss_functions ss_functions
Definition: sctp_structs.h:886
uint8_t * nr_mapping_array
Definition: sctp_structs.h:956
uint32_t my_rwnd_control_len
uint16_t str_of_pdapi
unsigned int size_on_all_streams
uint32_t highest_tsn_inside_nr_map
Definition: sctp_structs.h:957
uint32_t last_acked_seq
Definition: sctp_structs.h:927
unsigned int numduptsns
uint32_t this_sack_highest_gap
Definition: sctp_structs.h:921
unsigned int data_pkts_seen
uint8_t * mapping_array
Definition: sctp_structs.h:858
unsigned int cnt_on_reasm_queue
uint8_t prsctp_supported
int dup_tsns[SCTP_MAX_DUP_TSNS]
struct sctp_nets * primary_destination
Definition: sctp_structs.h:860
struct sctp_cc_functions cc_functions
Definition: sctp_structs.h:879
unsigned int overall_error_count
struct sctp_nets * last_data_chunk_from
Definition: sctp_structs.h:865
struct sctpchunk_listhead sent_queue
Definition: sctp_structs.h:832
unsigned int size_on_reasm_queue
unsigned int sack_freq
uint32_t advanced_peer_ack_point
Definition: sctp_structs.h:937
struct sctp_stream_in * strmin
Definition: sctp_structs.h:856
unsigned int sent_queue_cnt
void(* sctp_cwnd_update_tsn_acknowledged)(struct sctp_nets *net, struct sctp_tmit_chunk *)
Definition: sctp_structs.h:722
void(* sctp_cwnd_update_after_sack)(struct sctp_tcb *stcb, struct sctp_association *asoc, int accum_moved, int reneged_all, int will_exit)
Definition: sctp_structs.h:705
void(* sctp_cwnd_update_exit_pf)(struct sctp_tcb *stcb, struct sctp_nets *net)
Definition: sctp_structs.h:708
void(* sctp_cwnd_prepare_net_for_sack)(struct sctp_tcb *stcb, struct sctp_nets *net)
Definition: sctp_structs.h:726
void(* sctp_cwnd_update_after_fr)(struct sctp_tcb *stcb, struct sctp_association *asoc)
Definition: sctp_structs.h:709
uint8_t chunk_type
Definition: sctp.h:60
uint8_t chunk_flags
Definition: sctp.h:61
uint16_t chunk_length
Definition: sctp.h:62
struct sctp_data dp
Definition: sctp_header.h:148
struct sctp_chunkhdr ch
Definition: sctp_header.h:147
struct timeval timetodrop
Definition: sctp_structs.h:397
uint32_t fast_retran_tsn
Definition: sctp_structs.h:396
uint8_t doing_fast_retransmit
Definition: sctp_structs.h:399
uint8_t chunk_was_revoked
Definition: sctp_structs.h:403
uint16_t sid
Definition: sctp_header.h:140
uint16_t ssn
Definition: sctp_header.h:141
uint32_t tsn
Definition: sctp_header.h:139
uint32_t ppid
Definition: sctp_header.h:142
uint16_t length
Definition: sctp.h:382
uint16_t code
Definition: sctp.h:381
struct sctp_error_cause cause
Definition: sctp.h:387
uint16_t serinfo_next_stream
Definition: sctp_uio.h:142
uint16_t serinfo_next_flags
Definition: sctp_uio.h:141
uint32_t serinfo_next_length
Definition: sctp_uio.h:144
uint32_t serinfo_next_aid
Definition: sctp_uio.h:143
uint32_t serinfo_next_ppid
Definition: sctp_uio.h:145
struct sctp_chunkhdr ch
Definition: sctp_header.h:376
uint16_t length
Definition: sctp.h:376
uint16_t code
Definition: sctp.h:375
struct sctp_idata dp
Definition: sctp_header.h:165
struct sctp_chunkhdr ch
Definition: sctp_header.h:164
uint32_t ppid
Definition: sctp_header.h:157
uint32_t mid
Definition: sctp_header.h:155
uint16_t sid
Definition: sctp_header.h:153
union sctp_idata::@32 ppid_fsn
uint32_t fsn
Definition: sctp_header.h:158
uint32_t tsn
Definition: sctp_header.h:152
uint32_t partial_delivery_point
Definition: sctp_pcb.h:393
uint32_t sctp_flags
Definition: sctp_pcb.h:381
struct sctp_readhead read_queue
Definition: sctp_pcb.h:362
uint32_t last_abort_code
Definition: sctp_pcb.h:425
struct sctp_pcb sctp_ep
Definition: sctp_pcb.h:383
uint32_t pseudo_cumack
Definition: sctp_structs.h:312
uint32_t cwr_window_tsn
Definition: sctp_structs.h:264
uint32_t prev_cwnd
Definition: sctp_structs.h:291
uint8_t new_pseudo_cumack
Definition: sctp_structs.h:373
uint32_t flight_size
Definition: sctp_structs.h:289
uint8_t saw_newack
Definition: sctp_structs.h:349
uint32_t partial_bytes_acked
Definition: sctp_structs.h:294
uint32_t rtx_pseudo_cumack
Definition: sctp_structs.h:314
uint32_t this_sack_highest_newack
Definition: sctp_structs.h:308
uint8_t find_pseudo_cumack
Definition: sctp_structs.h:358
uint32_t RTO
Definition: sctp_structs.h:271
uint16_t error_count
Definition: sctp_structs.h:340
uint16_t dest_state
Definition: sctp_structs.h:334
uint8_t rto_needed
Definition: sctp_structs.h:380
unsigned int net_ack2
Definition: sctp_structs.h:297
uint8_t find_rtx_pseudo_cumack
Definition: sctp_structs.h:365
uint8_t fast_retran_loss_recovery
Definition: sctp_structs.h:344
uint8_t window_probe
Definition: sctp_structs.h:376
struct sctp_timer rxt_timer
Definition: sctp_structs.h:274
uint8_t will_exit_fast_recovery
Definition: sctp_structs.h:345
uint32_t cwnd
Definition: sctp_structs.h:290
unsigned int net_ack
Definition: sctp_structs.h:296
sctp_assoc_t nxt_assoc_id
Definition: sctp_uio.h:196
uint32_t nxt_ppid
Definition: sctp_uio.h:194
uint16_t nxt_flags
Definition: sctp_uio.h:193
uint32_t nxt_length
Definition: sctp_uio.h:195
uint16_t nxt_sid
Definition: sctp_uio.h:192
uint32_t sctp_sws_sender
Definition: sctp_pcb.h:275
struct sctp_tcb * stcb
Definition: sctp_structs.h:471
struct mbuf * data
Definition: sctp_structs.h:466
struct sctpchunk_listhead reasm
Definition: sctp_structs.h:474
struct mbuf * tail_mbuf
Definition: sctp_structs.h:468
sctp_assoc_t sinfo_assoc_id
Definition: sctp_structs.h:458
struct sctp_nets * whoFrom
Definition: sctp_structs.h:465
uint16_t rcv_sid
Definition: sctp_uio.h:181
uint32_t rcv_context
Definition: sctp_uio.h:187
uint16_t rcv_flags
Definition: sctp_uio.h:183
sctp_assoc_t rcv_assoc_id
Definition: sctp_uio.h:188
uint16_t rcv_ssn
Definition: sctp_uio.h:182
uint32_t rcv_tsn
Definition: sctp_uio.h:185
uint32_t rcv_cumtsn
Definition: sctp_uio.h:186
uint32_t rcv_ppid
Definition: sctp_uio.h:184
uint32_t cumulative_tsn_ack
Definition: sctp_header.h:302
uint32_t sinfo_tsn
Definition: sctp_uio.h:123
sctp_assoc_t sinfo_assoc_id
Definition: sctp_uio.h:125
uint16_t sinfo_flags
Definition: sctp_uio.h:119
uint16_t sinfo_stream
Definition: sctp_uio.h:117
uint16_t sinfo_ssn
Definition: sctp_uio.h:118
uint32_t sinfo_cumtsn
Definition: sctp_uio.h:124
uint32_t sinfo_context
Definition: sctp_uio.h:121
uint32_t sinfo_ppid
Definition: sctp_uio.h:120
bool(* sctp_ss_is_user_msgs_incomplete)(struct sctp_tcb *stcb, struct sctp_association *asoc)
Definition: sctp_structs.h:756
uint32_t last_mid_delivered
Definition: sctp_structs.h:544
struct sctp_readhead inqueue
Definition: sctp_structs.h:542
uint8_t pd_api_started
Definition: sctp_structs.h:547
struct sctp_readhead uno_inqueue
Definition: sctp_structs.h:543
uint32_t chunks_on_queues
Definition: sctp_structs.h:613
struct sctp_streamhead outqueue
Definition: sctp_structs.h:611
uint16_t list_of_streams[]
Definition: sctp_structs.h:82
uint16_t flags
Definition: sctp_header.h:388
uint16_t ssn
Definition: sctp_header.h:383
uint16_t sid
Definition: sctp_header.h:382
struct socket * sctp_socket
Definition: sctp_pcb.h:438
struct sctp_association asoc
Definition: sctp_pcb.h:449
uint16_t rport
Definition: sctp_pcb.h:459
struct sctp_inpcb * sctp_ep
Definition: sctp_pcb.h:439
sctp_os_timer_t timer
Definition: sctp_structs.h:46
union sctp_tmit_chunk::@34 rec
struct sctp_data_chunkrec data
Definition: sctp_structs.h:424
uint8_t window_probe
Definition: sctp_structs.h:446
struct timeval sent_rcv_time
Definition: sctp_structs.h:428
uint16_t book_size
Definition: sctp_structs.h:437
uint16_t snd_count
Definition: sctp_structs.h:434
struct sctp_nets * whoTo
Definition: sctp_structs.h:431
uint16_t send_size
Definition: sctp_structs.h:436
struct sctp_association * asoc
Definition: sctp_structs.h:427
uint8_t no_fr_allowed
Definition: sctp_structs.h:444