FreeBSD kernel IPv4 code
sctp_output.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
17 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD$");
37
38#include <netinet/sctp_os.h>
39#include <sys/proc.h>
40#include <netinet/sctp_var.h>
41#include <netinet/sctp_sysctl.h>
42#include <netinet/sctp_header.h>
43#include <netinet/sctp_pcb.h>
44#include <netinet/sctputil.h>
45#include <netinet/sctp_output.h>
46#include <netinet/sctp_uio.h>
47#include <netinet/sctputil.h>
48#include <netinet/sctp_auth.h>
49#include <netinet/sctp_timer.h>
50#include <netinet/sctp_asconf.h>
51#include <netinet/sctp_indata.h>
53#include <netinet/sctp_input.h>
54#include <netinet/sctp_crc32.h>
56#if defined(INET) || defined(INET6)
57#include <netinet/udp.h>
58#endif
59#include <netinet/udp_var.h>
60#include <machine/in_cksum.h>
61
62#define SCTP_MAX_GAPS_INARRAY 4
63struct sack_track {
64 uint8_t right_edge; /* mergable on the right edge */
65 uint8_t left_edge; /* mergable on the left edge */
69};
70
71const struct sack_track sack_array[256] = {
72 {0, 0, 0, 0, /* 0x00 */
73 {{0, 0},
74 {0, 0},
75 {0, 0},
76 {0, 0}
77 }
78 },
79 {1, 0, 1, 0, /* 0x01 */
80 {{0, 0},
81 {0, 0},
82 {0, 0},
83 {0, 0}
84 }
85 },
86 {0, 0, 1, 0, /* 0x02 */
87 {{1, 1},
88 {0, 0},
89 {0, 0},
90 {0, 0}
91 }
92 },
93 {1, 0, 1, 0, /* 0x03 */
94 {{0, 1},
95 {0, 0},
96 {0, 0},
97 {0, 0}
98 }
99 },
100 {0, 0, 1, 0, /* 0x04 */
101 {{2, 2},
102 {0, 0},
103 {0, 0},
104 {0, 0}
105 }
106 },
107 {1, 0, 2, 0, /* 0x05 */
108 {{0, 0},
109 {2, 2},
110 {0, 0},
111 {0, 0}
112 }
113 },
114 {0, 0, 1, 0, /* 0x06 */
115 {{1, 2},
116 {0, 0},
117 {0, 0},
118 {0, 0}
119 }
120 },
121 {1, 0, 1, 0, /* 0x07 */
122 {{0, 2},
123 {0, 0},
124 {0, 0},
125 {0, 0}
126 }
127 },
128 {0, 0, 1, 0, /* 0x08 */
129 {{3, 3},
130 {0, 0},
131 {0, 0},
132 {0, 0}
133 }
134 },
135 {1, 0, 2, 0, /* 0x09 */
136 {{0, 0},
137 {3, 3},
138 {0, 0},
139 {0, 0}
140 }
141 },
142 {0, 0, 2, 0, /* 0x0a */
143 {{1, 1},
144 {3, 3},
145 {0, 0},
146 {0, 0}
147 }
148 },
149 {1, 0, 2, 0, /* 0x0b */
150 {{0, 1},
151 {3, 3},
152 {0, 0},
153 {0, 0}
154 }
155 },
156 {0, 0, 1, 0, /* 0x0c */
157 {{2, 3},
158 {0, 0},
159 {0, 0},
160 {0, 0}
161 }
162 },
163 {1, 0, 2, 0, /* 0x0d */
164 {{0, 0},
165 {2, 3},
166 {0, 0},
167 {0, 0}
168 }
169 },
170 {0, 0, 1, 0, /* 0x0e */
171 {{1, 3},
172 {0, 0},
173 {0, 0},
174 {0, 0}
175 }
176 },
177 {1, 0, 1, 0, /* 0x0f */
178 {{0, 3},
179 {0, 0},
180 {0, 0},
181 {0, 0}
182 }
183 },
184 {0, 0, 1, 0, /* 0x10 */
185 {{4, 4},
186 {0, 0},
187 {0, 0},
188 {0, 0}
189 }
190 },
191 {1, 0, 2, 0, /* 0x11 */
192 {{0, 0},
193 {4, 4},
194 {0, 0},
195 {0, 0}
196 }
197 },
198 {0, 0, 2, 0, /* 0x12 */
199 {{1, 1},
200 {4, 4},
201 {0, 0},
202 {0, 0}
203 }
204 },
205 {1, 0, 2, 0, /* 0x13 */
206 {{0, 1},
207 {4, 4},
208 {0, 0},
209 {0, 0}
210 }
211 },
212 {0, 0, 2, 0, /* 0x14 */
213 {{2, 2},
214 {4, 4},
215 {0, 0},
216 {0, 0}
217 }
218 },
219 {1, 0, 3, 0, /* 0x15 */
220 {{0, 0},
221 {2, 2},
222 {4, 4},
223 {0, 0}
224 }
225 },
226 {0, 0, 2, 0, /* 0x16 */
227 {{1, 2},
228 {4, 4},
229 {0, 0},
230 {0, 0}
231 }
232 },
233 {1, 0, 2, 0, /* 0x17 */
234 {{0, 2},
235 {4, 4},
236 {0, 0},
237 {0, 0}
238 }
239 },
240 {0, 0, 1, 0, /* 0x18 */
241 {{3, 4},
242 {0, 0},
243 {0, 0},
244 {0, 0}
245 }
246 },
247 {1, 0, 2, 0, /* 0x19 */
248 {{0, 0},
249 {3, 4},
250 {0, 0},
251 {0, 0}
252 }
253 },
254 {0, 0, 2, 0, /* 0x1a */
255 {{1, 1},
256 {3, 4},
257 {0, 0},
258 {0, 0}
259 }
260 },
261 {1, 0, 2, 0, /* 0x1b */
262 {{0, 1},
263 {3, 4},
264 {0, 0},
265 {0, 0}
266 }
267 },
268 {0, 0, 1, 0, /* 0x1c */
269 {{2, 4},
270 {0, 0},
271 {0, 0},
272 {0, 0}
273 }
274 },
275 {1, 0, 2, 0, /* 0x1d */
276 {{0, 0},
277 {2, 4},
278 {0, 0},
279 {0, 0}
280 }
281 },
282 {0, 0, 1, 0, /* 0x1e */
283 {{1, 4},
284 {0, 0},
285 {0, 0},
286 {0, 0}
287 }
288 },
289 {1, 0, 1, 0, /* 0x1f */
290 {{0, 4},
291 {0, 0},
292 {0, 0},
293 {0, 0}
294 }
295 },
296 {0, 0, 1, 0, /* 0x20 */
297 {{5, 5},
298 {0, 0},
299 {0, 0},
300 {0, 0}
301 }
302 },
303 {1, 0, 2, 0, /* 0x21 */
304 {{0, 0},
305 {5, 5},
306 {0, 0},
307 {0, 0}
308 }
309 },
310 {0, 0, 2, 0, /* 0x22 */
311 {{1, 1},
312 {5, 5},
313 {0, 0},
314 {0, 0}
315 }
316 },
317 {1, 0, 2, 0, /* 0x23 */
318 {{0, 1},
319 {5, 5},
320 {0, 0},
321 {0, 0}
322 }
323 },
324 {0, 0, 2, 0, /* 0x24 */
325 {{2, 2},
326 {5, 5},
327 {0, 0},
328 {0, 0}
329 }
330 },
331 {1, 0, 3, 0, /* 0x25 */
332 {{0, 0},
333 {2, 2},
334 {5, 5},
335 {0, 0}
336 }
337 },
338 {0, 0, 2, 0, /* 0x26 */
339 {{1, 2},
340 {5, 5},
341 {0, 0},
342 {0, 0}
343 }
344 },
345 {1, 0, 2, 0, /* 0x27 */
346 {{0, 2},
347 {5, 5},
348 {0, 0},
349 {0, 0}
350 }
351 },
352 {0, 0, 2, 0, /* 0x28 */
353 {{3, 3},
354 {5, 5},
355 {0, 0},
356 {0, 0}
357 }
358 },
359 {1, 0, 3, 0, /* 0x29 */
360 {{0, 0},
361 {3, 3},
362 {5, 5},
363 {0, 0}
364 }
365 },
366 {0, 0, 3, 0, /* 0x2a */
367 {{1, 1},
368 {3, 3},
369 {5, 5},
370 {0, 0}
371 }
372 },
373 {1, 0, 3, 0, /* 0x2b */
374 {{0, 1},
375 {3, 3},
376 {5, 5},
377 {0, 0}
378 }
379 },
380 {0, 0, 2, 0, /* 0x2c */
381 {{2, 3},
382 {5, 5},
383 {0, 0},
384 {0, 0}
385 }
386 },
387 {1, 0, 3, 0, /* 0x2d */
388 {{0, 0},
389 {2, 3},
390 {5, 5},
391 {0, 0}
392 }
393 },
394 {0, 0, 2, 0, /* 0x2e */
395 {{1, 3},
396 {5, 5},
397 {0, 0},
398 {0, 0}
399 }
400 },
401 {1, 0, 2, 0, /* 0x2f */
402 {{0, 3},
403 {5, 5},
404 {0, 0},
405 {0, 0}
406 }
407 },
408 {0, 0, 1, 0, /* 0x30 */
409 {{4, 5},
410 {0, 0},
411 {0, 0},
412 {0, 0}
413 }
414 },
415 {1, 0, 2, 0, /* 0x31 */
416 {{0, 0},
417 {4, 5},
418 {0, 0},
419 {0, 0}
420 }
421 },
422 {0, 0, 2, 0, /* 0x32 */
423 {{1, 1},
424 {4, 5},
425 {0, 0},
426 {0, 0}
427 }
428 },
429 {1, 0, 2, 0, /* 0x33 */
430 {{0, 1},
431 {4, 5},
432 {0, 0},
433 {0, 0}
434 }
435 },
436 {0, 0, 2, 0, /* 0x34 */
437 {{2, 2},
438 {4, 5},
439 {0, 0},
440 {0, 0}
441 }
442 },
443 {1, 0, 3, 0, /* 0x35 */
444 {{0, 0},
445 {2, 2},
446 {4, 5},
447 {0, 0}
448 }
449 },
450 {0, 0, 2, 0, /* 0x36 */
451 {{1, 2},
452 {4, 5},
453 {0, 0},
454 {0, 0}
455 }
456 },
457 {1, 0, 2, 0, /* 0x37 */
458 {{0, 2},
459 {4, 5},
460 {0, 0},
461 {0, 0}
462 }
463 },
464 {0, 0, 1, 0, /* 0x38 */
465 {{3, 5},
466 {0, 0},
467 {0, 0},
468 {0, 0}
469 }
470 },
471 {1, 0, 2, 0, /* 0x39 */
472 {{0, 0},
473 {3, 5},
474 {0, 0},
475 {0, 0}
476 }
477 },
478 {0, 0, 2, 0, /* 0x3a */
479 {{1, 1},
480 {3, 5},
481 {0, 0},
482 {0, 0}
483 }
484 },
485 {1, 0, 2, 0, /* 0x3b */
486 {{0, 1},
487 {3, 5},
488 {0, 0},
489 {0, 0}
490 }
491 },
492 {0, 0, 1, 0, /* 0x3c */
493 {{2, 5},
494 {0, 0},
495 {0, 0},
496 {0, 0}
497 }
498 },
499 {1, 0, 2, 0, /* 0x3d */
500 {{0, 0},
501 {2, 5},
502 {0, 0},
503 {0, 0}
504 }
505 },
506 {0, 0, 1, 0, /* 0x3e */
507 {{1, 5},
508 {0, 0},
509 {0, 0},
510 {0, 0}
511 }
512 },
513 {1, 0, 1, 0, /* 0x3f */
514 {{0, 5},
515 {0, 0},
516 {0, 0},
517 {0, 0}
518 }
519 },
520 {0, 0, 1, 0, /* 0x40 */
521 {{6, 6},
522 {0, 0},
523 {0, 0},
524 {0, 0}
525 }
526 },
527 {1, 0, 2, 0, /* 0x41 */
528 {{0, 0},
529 {6, 6},
530 {0, 0},
531 {0, 0}
532 }
533 },
534 {0, 0, 2, 0, /* 0x42 */
535 {{1, 1},
536 {6, 6},
537 {0, 0},
538 {0, 0}
539 }
540 },
541 {1, 0, 2, 0, /* 0x43 */
542 {{0, 1},
543 {6, 6},
544 {0, 0},
545 {0, 0}
546 }
547 },
548 {0, 0, 2, 0, /* 0x44 */
549 {{2, 2},
550 {6, 6},
551 {0, 0},
552 {0, 0}
553 }
554 },
555 {1, 0, 3, 0, /* 0x45 */
556 {{0, 0},
557 {2, 2},
558 {6, 6},
559 {0, 0}
560 }
561 },
562 {0, 0, 2, 0, /* 0x46 */
563 {{1, 2},
564 {6, 6},
565 {0, 0},
566 {0, 0}
567 }
568 },
569 {1, 0, 2, 0, /* 0x47 */
570 {{0, 2},
571 {6, 6},
572 {0, 0},
573 {0, 0}
574 }
575 },
576 {0, 0, 2, 0, /* 0x48 */
577 {{3, 3},
578 {6, 6},
579 {0, 0},
580 {0, 0}
581 }
582 },
583 {1, 0, 3, 0, /* 0x49 */
584 {{0, 0},
585 {3, 3},
586 {6, 6},
587 {0, 0}
588 }
589 },
590 {0, 0, 3, 0, /* 0x4a */
591 {{1, 1},
592 {3, 3},
593 {6, 6},
594 {0, 0}
595 }
596 },
597 {1, 0, 3, 0, /* 0x4b */
598 {{0, 1},
599 {3, 3},
600 {6, 6},
601 {0, 0}
602 }
603 },
604 {0, 0, 2, 0, /* 0x4c */
605 {{2, 3},
606 {6, 6},
607 {0, 0},
608 {0, 0}
609 }
610 },
611 {1, 0, 3, 0, /* 0x4d */
612 {{0, 0},
613 {2, 3},
614 {6, 6},
615 {0, 0}
616 }
617 },
618 {0, 0, 2, 0, /* 0x4e */
619 {{1, 3},
620 {6, 6},
621 {0, 0},
622 {0, 0}
623 }
624 },
625 {1, 0, 2, 0, /* 0x4f */
626 {{0, 3},
627 {6, 6},
628 {0, 0},
629 {0, 0}
630 }
631 },
632 {0, 0, 2, 0, /* 0x50 */
633 {{4, 4},
634 {6, 6},
635 {0, 0},
636 {0, 0}
637 }
638 },
639 {1, 0, 3, 0, /* 0x51 */
640 {{0, 0},
641 {4, 4},
642 {6, 6},
643 {0, 0}
644 }
645 },
646 {0, 0, 3, 0, /* 0x52 */
647 {{1, 1},
648 {4, 4},
649 {6, 6},
650 {0, 0}
651 }
652 },
653 {1, 0, 3, 0, /* 0x53 */
654 {{0, 1},
655 {4, 4},
656 {6, 6},
657 {0, 0}
658 }
659 },
660 {0, 0, 3, 0, /* 0x54 */
661 {{2, 2},
662 {4, 4},
663 {6, 6},
664 {0, 0}
665 }
666 },
667 {1, 0, 4, 0, /* 0x55 */
668 {{0, 0},
669 {2, 2},
670 {4, 4},
671 {6, 6}
672 }
673 },
674 {0, 0, 3, 0, /* 0x56 */
675 {{1, 2},
676 {4, 4},
677 {6, 6},
678 {0, 0}
679 }
680 },
681 {1, 0, 3, 0, /* 0x57 */
682 {{0, 2},
683 {4, 4},
684 {6, 6},
685 {0, 0}
686 }
687 },
688 {0, 0, 2, 0, /* 0x58 */
689 {{3, 4},
690 {6, 6},
691 {0, 0},
692 {0, 0}
693 }
694 },
695 {1, 0, 3, 0, /* 0x59 */
696 {{0, 0},
697 {3, 4},
698 {6, 6},
699 {0, 0}
700 }
701 },
702 {0, 0, 3, 0, /* 0x5a */
703 {{1, 1},
704 {3, 4},
705 {6, 6},
706 {0, 0}
707 }
708 },
709 {1, 0, 3, 0, /* 0x5b */
710 {{0, 1},
711 {3, 4},
712 {6, 6},
713 {0, 0}
714 }
715 },
716 {0, 0, 2, 0, /* 0x5c */
717 {{2, 4},
718 {6, 6},
719 {0, 0},
720 {0, 0}
721 }
722 },
723 {1, 0, 3, 0, /* 0x5d */
724 {{0, 0},
725 {2, 4},
726 {6, 6},
727 {0, 0}
728 }
729 },
730 {0, 0, 2, 0, /* 0x5e */
731 {{1, 4},
732 {6, 6},
733 {0, 0},
734 {0, 0}
735 }
736 },
737 {1, 0, 2, 0, /* 0x5f */
738 {{0, 4},
739 {6, 6},
740 {0, 0},
741 {0, 0}
742 }
743 },
744 {0, 0, 1, 0, /* 0x60 */
745 {{5, 6},
746 {0, 0},
747 {0, 0},
748 {0, 0}
749 }
750 },
751 {1, 0, 2, 0, /* 0x61 */
752 {{0, 0},
753 {5, 6},
754 {0, 0},
755 {0, 0}
756 }
757 },
758 {0, 0, 2, 0, /* 0x62 */
759 {{1, 1},
760 {5, 6},
761 {0, 0},
762 {0, 0}
763 }
764 },
765 {1, 0, 2, 0, /* 0x63 */
766 {{0, 1},
767 {5, 6},
768 {0, 0},
769 {0, 0}
770 }
771 },
772 {0, 0, 2, 0, /* 0x64 */
773 {{2, 2},
774 {5, 6},
775 {0, 0},
776 {0, 0}
777 }
778 },
779 {1, 0, 3, 0, /* 0x65 */
780 {{0, 0},
781 {2, 2},
782 {5, 6},
783 {0, 0}
784 }
785 },
786 {0, 0, 2, 0, /* 0x66 */
787 {{1, 2},
788 {5, 6},
789 {0, 0},
790 {0, 0}
791 }
792 },
793 {1, 0, 2, 0, /* 0x67 */
794 {{0, 2},
795 {5, 6},
796 {0, 0},
797 {0, 0}
798 }
799 },
800 {0, 0, 2, 0, /* 0x68 */
801 {{3, 3},
802 {5, 6},
803 {0, 0},
804 {0, 0}
805 }
806 },
807 {1, 0, 3, 0, /* 0x69 */
808 {{0, 0},
809 {3, 3},
810 {5, 6},
811 {0, 0}
812 }
813 },
814 {0, 0, 3, 0, /* 0x6a */
815 {{1, 1},
816 {3, 3},
817 {5, 6},
818 {0, 0}
819 }
820 },
821 {1, 0, 3, 0, /* 0x6b */
822 {{0, 1},
823 {3, 3},
824 {5, 6},
825 {0, 0}
826 }
827 },
828 {0, 0, 2, 0, /* 0x6c */
829 {{2, 3},
830 {5, 6},
831 {0, 0},
832 {0, 0}
833 }
834 },
835 {1, 0, 3, 0, /* 0x6d */
836 {{0, 0},
837 {2, 3},
838 {5, 6},
839 {0, 0}
840 }
841 },
842 {0, 0, 2, 0, /* 0x6e */
843 {{1, 3},
844 {5, 6},
845 {0, 0},
846 {0, 0}
847 }
848 },
849 {1, 0, 2, 0, /* 0x6f */
850 {{0, 3},
851 {5, 6},
852 {0, 0},
853 {0, 0}
854 }
855 },
856 {0, 0, 1, 0, /* 0x70 */
857 {{4, 6},
858 {0, 0},
859 {0, 0},
860 {0, 0}
861 }
862 },
863 {1, 0, 2, 0, /* 0x71 */
864 {{0, 0},
865 {4, 6},
866 {0, 0},
867 {0, 0}
868 }
869 },
870 {0, 0, 2, 0, /* 0x72 */
871 {{1, 1},
872 {4, 6},
873 {0, 0},
874 {0, 0}
875 }
876 },
877 {1, 0, 2, 0, /* 0x73 */
878 {{0, 1},
879 {4, 6},
880 {0, 0},
881 {0, 0}
882 }
883 },
884 {0, 0, 2, 0, /* 0x74 */
885 {{2, 2},
886 {4, 6},
887 {0, 0},
888 {0, 0}
889 }
890 },
891 {1, 0, 3, 0, /* 0x75 */
892 {{0, 0},
893 {2, 2},
894 {4, 6},
895 {0, 0}
896 }
897 },
898 {0, 0, 2, 0, /* 0x76 */
899 {{1, 2},
900 {4, 6},
901 {0, 0},
902 {0, 0}
903 }
904 },
905 {1, 0, 2, 0, /* 0x77 */
906 {{0, 2},
907 {4, 6},
908 {0, 0},
909 {0, 0}
910 }
911 },
912 {0, 0, 1, 0, /* 0x78 */
913 {{3, 6},
914 {0, 0},
915 {0, 0},
916 {0, 0}
917 }
918 },
919 {1, 0, 2, 0, /* 0x79 */
920 {{0, 0},
921 {3, 6},
922 {0, 0},
923 {0, 0}
924 }
925 },
926 {0, 0, 2, 0, /* 0x7a */
927 {{1, 1},
928 {3, 6},
929 {0, 0},
930 {0, 0}
931 }
932 },
933 {1, 0, 2, 0, /* 0x7b */
934 {{0, 1},
935 {3, 6},
936 {0, 0},
937 {0, 0}
938 }
939 },
940 {0, 0, 1, 0, /* 0x7c */
941 {{2, 6},
942 {0, 0},
943 {0, 0},
944 {0, 0}
945 }
946 },
947 {1, 0, 2, 0, /* 0x7d */
948 {{0, 0},
949 {2, 6},
950 {0, 0},
951 {0, 0}
952 }
953 },
954 {0, 0, 1, 0, /* 0x7e */
955 {{1, 6},
956 {0, 0},
957 {0, 0},
958 {0, 0}
959 }
960 },
961 {1, 0, 1, 0, /* 0x7f */
962 {{0, 6},
963 {0, 0},
964 {0, 0},
965 {0, 0}
966 }
967 },
968 {0, 1, 1, 0, /* 0x80 */
969 {{7, 7},
970 {0, 0},
971 {0, 0},
972 {0, 0}
973 }
974 },
975 {1, 1, 2, 0, /* 0x81 */
976 {{0, 0},
977 {7, 7},
978 {0, 0},
979 {0, 0}
980 }
981 },
982 {0, 1, 2, 0, /* 0x82 */
983 {{1, 1},
984 {7, 7},
985 {0, 0},
986 {0, 0}
987 }
988 },
989 {1, 1, 2, 0, /* 0x83 */
990 {{0, 1},
991 {7, 7},
992 {0, 0},
993 {0, 0}
994 }
995 },
996 {0, 1, 2, 0, /* 0x84 */
997 {{2, 2},
998 {7, 7},
999 {0, 0},
1000 {0, 0}
1001 }
1002 },
1003 {1, 1, 3, 0, /* 0x85 */
1004 {{0, 0},
1005 {2, 2},
1006 {7, 7},
1007 {0, 0}
1008 }
1009 },
1010 {0, 1, 2, 0, /* 0x86 */
1011 {{1, 2},
1012 {7, 7},
1013 {0, 0},
1014 {0, 0}
1015 }
1016 },
1017 {1, 1, 2, 0, /* 0x87 */
1018 {{0, 2},
1019 {7, 7},
1020 {0, 0},
1021 {0, 0}
1022 }
1023 },
1024 {0, 1, 2, 0, /* 0x88 */
1025 {{3, 3},
1026 {7, 7},
1027 {0, 0},
1028 {0, 0}
1029 }
1030 },
1031 {1, 1, 3, 0, /* 0x89 */
1032 {{0, 0},
1033 {3, 3},
1034 {7, 7},
1035 {0, 0}
1036 }
1037 },
1038 {0, 1, 3, 0, /* 0x8a */
1039 {{1, 1},
1040 {3, 3},
1041 {7, 7},
1042 {0, 0}
1043 }
1044 },
1045 {1, 1, 3, 0, /* 0x8b */
1046 {{0, 1},
1047 {3, 3},
1048 {7, 7},
1049 {0, 0}
1050 }
1051 },
1052 {0, 1, 2, 0, /* 0x8c */
1053 {{2, 3},
1054 {7, 7},
1055 {0, 0},
1056 {0, 0}
1057 }
1058 },
1059 {1, 1, 3, 0, /* 0x8d */
1060 {{0, 0},
1061 {2, 3},
1062 {7, 7},
1063 {0, 0}
1064 }
1065 },
1066 {0, 1, 2, 0, /* 0x8e */
1067 {{1, 3},
1068 {7, 7},
1069 {0, 0},
1070 {0, 0}
1071 }
1072 },
1073 {1, 1, 2, 0, /* 0x8f */
1074 {{0, 3},
1075 {7, 7},
1076 {0, 0},
1077 {0, 0}
1078 }
1079 },
1080 {0, 1, 2, 0, /* 0x90 */
1081 {{4, 4},
1082 {7, 7},
1083 {0, 0},
1084 {0, 0}
1085 }
1086 },
1087 {1, 1, 3, 0, /* 0x91 */
1088 {{0, 0},
1089 {4, 4},
1090 {7, 7},
1091 {0, 0}
1092 }
1093 },
1094 {0, 1, 3, 0, /* 0x92 */
1095 {{1, 1},
1096 {4, 4},
1097 {7, 7},
1098 {0, 0}
1099 }
1100 },
1101 {1, 1, 3, 0, /* 0x93 */
1102 {{0, 1},
1103 {4, 4},
1104 {7, 7},
1105 {0, 0}
1106 }
1107 },
1108 {0, 1, 3, 0, /* 0x94 */
1109 {{2, 2},
1110 {4, 4},
1111 {7, 7},
1112 {0, 0}
1113 }
1114 },
1115 {1, 1, 4, 0, /* 0x95 */
1116 {{0, 0},
1117 {2, 2},
1118 {4, 4},
1119 {7, 7}
1120 }
1121 },
1122 {0, 1, 3, 0, /* 0x96 */
1123 {{1, 2},
1124 {4, 4},
1125 {7, 7},
1126 {0, 0}
1127 }
1128 },
1129 {1, 1, 3, 0, /* 0x97 */
1130 {{0, 2},
1131 {4, 4},
1132 {7, 7},
1133 {0, 0}
1134 }
1135 },
1136 {0, 1, 2, 0, /* 0x98 */
1137 {{3, 4},
1138 {7, 7},
1139 {0, 0},
1140 {0, 0}
1141 }
1142 },
1143 {1, 1, 3, 0, /* 0x99 */
1144 {{0, 0},
1145 {3, 4},
1146 {7, 7},
1147 {0, 0}
1148 }
1149 },
1150 {0, 1, 3, 0, /* 0x9a */
1151 {{1, 1},
1152 {3, 4},
1153 {7, 7},
1154 {0, 0}
1155 }
1156 },
1157 {1, 1, 3, 0, /* 0x9b */
1158 {{0, 1},
1159 {3, 4},
1160 {7, 7},
1161 {0, 0}
1162 }
1163 },
1164 {0, 1, 2, 0, /* 0x9c */
1165 {{2, 4},
1166 {7, 7},
1167 {0, 0},
1168 {0, 0}
1169 }
1170 },
1171 {1, 1, 3, 0, /* 0x9d */
1172 {{0, 0},
1173 {2, 4},
1174 {7, 7},
1175 {0, 0}
1176 }
1177 },
1178 {0, 1, 2, 0, /* 0x9e */
1179 {{1, 4},
1180 {7, 7},
1181 {0, 0},
1182 {0, 0}
1183 }
1184 },
1185 {1, 1, 2, 0, /* 0x9f */
1186 {{0, 4},
1187 {7, 7},
1188 {0, 0},
1189 {0, 0}
1190 }
1191 },
1192 {0, 1, 2, 0, /* 0xa0 */
1193 {{5, 5},
1194 {7, 7},
1195 {0, 0},
1196 {0, 0}
1197 }
1198 },
1199 {1, 1, 3, 0, /* 0xa1 */
1200 {{0, 0},
1201 {5, 5},
1202 {7, 7},
1203 {0, 0}
1204 }
1205 },
1206 {0, 1, 3, 0, /* 0xa2 */
1207 {{1, 1},
1208 {5, 5},
1209 {7, 7},
1210 {0, 0}
1211 }
1212 },
1213 {1, 1, 3, 0, /* 0xa3 */
1214 {{0, 1},
1215 {5, 5},
1216 {7, 7},
1217 {0, 0}
1218 }
1219 },
1220 {0, 1, 3, 0, /* 0xa4 */
1221 {{2, 2},
1222 {5, 5},
1223 {7, 7},
1224 {0, 0}
1225 }
1226 },
1227 {1, 1, 4, 0, /* 0xa5 */
1228 {{0, 0},
1229 {2, 2},
1230 {5, 5},
1231 {7, 7}
1232 }
1233 },
1234 {0, 1, 3, 0, /* 0xa6 */
1235 {{1, 2},
1236 {5, 5},
1237 {7, 7},
1238 {0, 0}
1239 }
1240 },
1241 {1, 1, 3, 0, /* 0xa7 */
1242 {{0, 2},
1243 {5, 5},
1244 {7, 7},
1245 {0, 0}
1246 }
1247 },
1248 {0, 1, 3, 0, /* 0xa8 */
1249 {{3, 3},
1250 {5, 5},
1251 {7, 7},
1252 {0, 0}
1253 }
1254 },
1255 {1, 1, 4, 0, /* 0xa9 */
1256 {{0, 0},
1257 {3, 3},
1258 {5, 5},
1259 {7, 7}
1260 }
1261 },
1262 {0, 1, 4, 0, /* 0xaa */
1263 {{1, 1},
1264 {3, 3},
1265 {5, 5},
1266 {7, 7}
1267 }
1268 },
1269 {1, 1, 4, 0, /* 0xab */
1270 {{0, 1},
1271 {3, 3},
1272 {5, 5},
1273 {7, 7}
1274 }
1275 },
1276 {0, 1, 3, 0, /* 0xac */
1277 {{2, 3},
1278 {5, 5},
1279 {7, 7},
1280 {0, 0}
1281 }
1282 },
1283 {1, 1, 4, 0, /* 0xad */
1284 {{0, 0},
1285 {2, 3},
1286 {5, 5},
1287 {7, 7}
1288 }
1289 },
1290 {0, 1, 3, 0, /* 0xae */
1291 {{1, 3},
1292 {5, 5},
1293 {7, 7},
1294 {0, 0}
1295 }
1296 },
1297 {1, 1, 3, 0, /* 0xaf */
1298 {{0, 3},
1299 {5, 5},
1300 {7, 7},
1301 {0, 0}
1302 }
1303 },
1304 {0, 1, 2, 0, /* 0xb0 */
1305 {{4, 5},
1306 {7, 7},
1307 {0, 0},
1308 {0, 0}
1309 }
1310 },
1311 {1, 1, 3, 0, /* 0xb1 */
1312 {{0, 0},
1313 {4, 5},
1314 {7, 7},
1315 {0, 0}
1316 }
1317 },
1318 {0, 1, 3, 0, /* 0xb2 */
1319 {{1, 1},
1320 {4, 5},
1321 {7, 7},
1322 {0, 0}
1323 }
1324 },
1325 {1, 1, 3, 0, /* 0xb3 */
1326 {{0, 1},
1327 {4, 5},
1328 {7, 7},
1329 {0, 0}
1330 }
1331 },
1332 {0, 1, 3, 0, /* 0xb4 */
1333 {{2, 2},
1334 {4, 5},
1335 {7, 7},
1336 {0, 0}
1337 }
1338 },
1339 {1, 1, 4, 0, /* 0xb5 */
1340 {{0, 0},
1341 {2, 2},
1342 {4, 5},
1343 {7, 7}
1344 }
1345 },
1346 {0, 1, 3, 0, /* 0xb6 */
1347 {{1, 2},
1348 {4, 5},
1349 {7, 7},
1350 {0, 0}
1351 }
1352 },
1353 {1, 1, 3, 0, /* 0xb7 */
1354 {{0, 2},
1355 {4, 5},
1356 {7, 7},
1357 {0, 0}
1358 }
1359 },
1360 {0, 1, 2, 0, /* 0xb8 */
1361 {{3, 5},
1362 {7, 7},
1363 {0, 0},
1364 {0, 0}
1365 }
1366 },
1367 {1, 1, 3, 0, /* 0xb9 */
1368 {{0, 0},
1369 {3, 5},
1370 {7, 7},
1371 {0, 0}
1372 }
1373 },
1374 {0, 1, 3, 0, /* 0xba */
1375 {{1, 1},
1376 {3, 5},
1377 {7, 7},
1378 {0, 0}
1379 }
1380 },
1381 {1, 1, 3, 0, /* 0xbb */
1382 {{0, 1},
1383 {3, 5},
1384 {7, 7},
1385 {0, 0}
1386 }
1387 },
1388 {0, 1, 2, 0, /* 0xbc */
1389 {{2, 5},
1390 {7, 7},
1391 {0, 0},
1392 {0, 0}
1393 }
1394 },
1395 {1, 1, 3, 0, /* 0xbd */
1396 {{0, 0},
1397 {2, 5},
1398 {7, 7},
1399 {0, 0}
1400 }
1401 },
1402 {0, 1, 2, 0, /* 0xbe */
1403 {{1, 5},
1404 {7, 7},
1405 {0, 0},
1406 {0, 0}
1407 }
1408 },
1409 {1, 1, 2, 0, /* 0xbf */
1410 {{0, 5},
1411 {7, 7},
1412 {0, 0},
1413 {0, 0}
1414 }
1415 },
1416 {0, 1, 1, 0, /* 0xc0 */
1417 {{6, 7},
1418 {0, 0},
1419 {0, 0},
1420 {0, 0}
1421 }
1422 },
1423 {1, 1, 2, 0, /* 0xc1 */
1424 {{0, 0},
1425 {6, 7},
1426 {0, 0},
1427 {0, 0}
1428 }
1429 },
1430 {0, 1, 2, 0, /* 0xc2 */
1431 {{1, 1},
1432 {6, 7},
1433 {0, 0},
1434 {0, 0}
1435 }
1436 },
1437 {1, 1, 2, 0, /* 0xc3 */
1438 {{0, 1},
1439 {6, 7},
1440 {0, 0},
1441 {0, 0}
1442 }
1443 },
1444 {0, 1, 2, 0, /* 0xc4 */
1445 {{2, 2},
1446 {6, 7},
1447 {0, 0},
1448 {0, 0}
1449 }
1450 },
1451 {1, 1, 3, 0, /* 0xc5 */
1452 {{0, 0},
1453 {2, 2},
1454 {6, 7},
1455 {0, 0}
1456 }
1457 },
1458 {0, 1, 2, 0, /* 0xc6 */
1459 {{1, 2},
1460 {6, 7},
1461 {0, 0},
1462 {0, 0}
1463 }
1464 },
1465 {1, 1, 2, 0, /* 0xc7 */
1466 {{0, 2},
1467 {6, 7},
1468 {0, 0},
1469 {0, 0}
1470 }
1471 },
1472 {0, 1, 2, 0, /* 0xc8 */
1473 {{3, 3},
1474 {6, 7},
1475 {0, 0},
1476 {0, 0}
1477 }
1478 },
1479 {1, 1, 3, 0, /* 0xc9 */
1480 {{0, 0},
1481 {3, 3},
1482 {6, 7},
1483 {0, 0}
1484 }
1485 },
1486 {0, 1, 3, 0, /* 0xca */
1487 {{1, 1},
1488 {3, 3},
1489 {6, 7},
1490 {0, 0}
1491 }
1492 },
1493 {1, 1, 3, 0, /* 0xcb */
1494 {{0, 1},
1495 {3, 3},
1496 {6, 7},
1497 {0, 0}
1498 }
1499 },
1500 {0, 1, 2, 0, /* 0xcc */
1501 {{2, 3},
1502 {6, 7},
1503 {0, 0},
1504 {0, 0}
1505 }
1506 },
1507 {1, 1, 3, 0, /* 0xcd */
1508 {{0, 0},
1509 {2, 3},
1510 {6, 7},
1511 {0, 0}
1512 }
1513 },
1514 {0, 1, 2, 0, /* 0xce */
1515 {{1, 3},
1516 {6, 7},
1517 {0, 0},
1518 {0, 0}
1519 }
1520 },
1521 {1, 1, 2, 0, /* 0xcf */
1522 {{0, 3},
1523 {6, 7},
1524 {0, 0},
1525 {0, 0}
1526 }
1527 },
1528 {0, 1, 2, 0, /* 0xd0 */
1529 {{4, 4},
1530 {6, 7},
1531 {0, 0},
1532 {0, 0}
1533 }
1534 },
1535 {1, 1, 3, 0, /* 0xd1 */
1536 {{0, 0},
1537 {4, 4},
1538 {6, 7},
1539 {0, 0}
1540 }
1541 },
1542 {0, 1, 3, 0, /* 0xd2 */
1543 {{1, 1},
1544 {4, 4},
1545 {6, 7},
1546 {0, 0}
1547 }
1548 },
1549 {1, 1, 3, 0, /* 0xd3 */
1550 {{0, 1},
1551 {4, 4},
1552 {6, 7},
1553 {0, 0}
1554 }
1555 },
1556 {0, 1, 3, 0, /* 0xd4 */
1557 {{2, 2},
1558 {4, 4},
1559 {6, 7},
1560 {0, 0}
1561 }
1562 },
1563 {1, 1, 4, 0, /* 0xd5 */
1564 {{0, 0},
1565 {2, 2},
1566 {4, 4},
1567 {6, 7}
1568 }
1569 },
1570 {0, 1, 3, 0, /* 0xd6 */
1571 {{1, 2},
1572 {4, 4},
1573 {6, 7},
1574 {0, 0}
1575 }
1576 },
1577 {1, 1, 3, 0, /* 0xd7 */
1578 {{0, 2},
1579 {4, 4},
1580 {6, 7},
1581 {0, 0}
1582 }
1583 },
1584 {0, 1, 2, 0, /* 0xd8 */
1585 {{3, 4},
1586 {6, 7},
1587 {0, 0},
1588 {0, 0}
1589 }
1590 },
1591 {1, 1, 3, 0, /* 0xd9 */
1592 {{0, 0},
1593 {3, 4},
1594 {6, 7},
1595 {0, 0}
1596 }
1597 },
1598 {0, 1, 3, 0, /* 0xda */
1599 {{1, 1},
1600 {3, 4},
1601 {6, 7},
1602 {0, 0}
1603 }
1604 },
1605 {1, 1, 3, 0, /* 0xdb */
1606 {{0, 1},
1607 {3, 4},
1608 {6, 7},
1609 {0, 0}
1610 }
1611 },
1612 {0, 1, 2, 0, /* 0xdc */
1613 {{2, 4},
1614 {6, 7},
1615 {0, 0},
1616 {0, 0}
1617 }
1618 },
1619 {1, 1, 3, 0, /* 0xdd */
1620 {{0, 0},
1621 {2, 4},
1622 {6, 7},
1623 {0, 0}
1624 }
1625 },
1626 {0, 1, 2, 0, /* 0xde */
1627 {{1, 4},
1628 {6, 7},
1629 {0, 0},
1630 {0, 0}
1631 }
1632 },
1633 {1, 1, 2, 0, /* 0xdf */
1634 {{0, 4},
1635 {6, 7},
1636 {0, 0},
1637 {0, 0}
1638 }
1639 },
1640 {0, 1, 1, 0, /* 0xe0 */
1641 {{5, 7},
1642 {0, 0},
1643 {0, 0},
1644 {0, 0}
1645 }
1646 },
1647 {1, 1, 2, 0, /* 0xe1 */
1648 {{0, 0},
1649 {5, 7},
1650 {0, 0},
1651 {0, 0}
1652 }
1653 },
1654 {0, 1, 2, 0, /* 0xe2 */
1655 {{1, 1},
1656 {5, 7},
1657 {0, 0},
1658 {0, 0}
1659 }
1660 },
1661 {1, 1, 2, 0, /* 0xe3 */
1662 {{0, 1},
1663 {5, 7},
1664 {0, 0},
1665 {0, 0}
1666 }
1667 },
1668 {0, 1, 2, 0, /* 0xe4 */
1669 {{2, 2},
1670 {5, 7},
1671 {0, 0},
1672 {0, 0}
1673 }
1674 },
1675 {1, 1, 3, 0, /* 0xe5 */
1676 {{0, 0},
1677 {2, 2},
1678 {5, 7},
1679 {0, 0}
1680 }
1681 },
1682 {0, 1, 2, 0, /* 0xe6 */
1683 {{1, 2},
1684 {5, 7},
1685 {0, 0},
1686 {0, 0}
1687 }
1688 },
1689 {1, 1, 2, 0, /* 0xe7 */
1690 {{0, 2},
1691 {5, 7},
1692 {0, 0},
1693 {0, 0}
1694 }
1695 },
1696 {0, 1, 2, 0, /* 0xe8 */
1697 {{3, 3},
1698 {5, 7},
1699 {0, 0},
1700 {0, 0}
1701 }
1702 },
1703 {1, 1, 3, 0, /* 0xe9 */
1704 {{0, 0},
1705 {3, 3},
1706 {5, 7},
1707 {0, 0}
1708 }
1709 },
1710 {0, 1, 3, 0, /* 0xea */
1711 {{1, 1},
1712 {3, 3},
1713 {5, 7},
1714 {0, 0}
1715 }
1716 },
1717 {1, 1, 3, 0, /* 0xeb */
1718 {{0, 1},
1719 {3, 3},
1720 {5, 7},
1721 {0, 0}
1722 }
1723 },
1724 {0, 1, 2, 0, /* 0xec */
1725 {{2, 3},
1726 {5, 7},
1727 {0, 0},
1728 {0, 0}
1729 }
1730 },
1731 {1, 1, 3, 0, /* 0xed */
1732 {{0, 0},
1733 {2, 3},
1734 {5, 7},
1735 {0, 0}
1736 }
1737 },
1738 {0, 1, 2, 0, /* 0xee */
1739 {{1, 3},
1740 {5, 7},
1741 {0, 0},
1742 {0, 0}
1743 }
1744 },
1745 {1, 1, 2, 0, /* 0xef */
1746 {{0, 3},
1747 {5, 7},
1748 {0, 0},
1749 {0, 0}
1750 }
1751 },
1752 {0, 1, 1, 0, /* 0xf0 */
1753 {{4, 7},
1754 {0, 0},
1755 {0, 0},
1756 {0, 0}
1757 }
1758 },
1759 {1, 1, 2, 0, /* 0xf1 */
1760 {{0, 0},
1761 {4, 7},
1762 {0, 0},
1763 {0, 0}
1764 }
1765 },
1766 {0, 1, 2, 0, /* 0xf2 */
1767 {{1, 1},
1768 {4, 7},
1769 {0, 0},
1770 {0, 0}
1771 }
1772 },
1773 {1, 1, 2, 0, /* 0xf3 */
1774 {{0, 1},
1775 {4, 7},
1776 {0, 0},
1777 {0, 0}
1778 }
1779 },
1780 {0, 1, 2, 0, /* 0xf4 */
1781 {{2, 2},
1782 {4, 7},
1783 {0, 0},
1784 {0, 0}
1785 }
1786 },
1787 {1, 1, 3, 0, /* 0xf5 */
1788 {{0, 0},
1789 {2, 2},
1790 {4, 7},
1791 {0, 0}
1792 }
1793 },
1794 {0, 1, 2, 0, /* 0xf6 */
1795 {{1, 2},
1796 {4, 7},
1797 {0, 0},
1798 {0, 0}
1799 }
1800 },
1801 {1, 1, 2, 0, /* 0xf7 */
1802 {{0, 2},
1803 {4, 7},
1804 {0, 0},
1805 {0, 0}
1806 }
1807 },
1808 {0, 1, 1, 0, /* 0xf8 */
1809 {{3, 7},
1810 {0, 0},
1811 {0, 0},
1812 {0, 0}
1813 }
1814 },
1815 {1, 1, 2, 0, /* 0xf9 */
1816 {{0, 0},
1817 {3, 7},
1818 {0, 0},
1819 {0, 0}
1820 }
1821 },
1822 {0, 1, 2, 0, /* 0xfa */
1823 {{1, 1},
1824 {3, 7},
1825 {0, 0},
1826 {0, 0}
1827 }
1828 },
1829 {1, 1, 2, 0, /* 0xfb */
1830 {{0, 1},
1831 {3, 7},
1832 {0, 0},
1833 {0, 0}
1834 }
1835 },
1836 {0, 1, 1, 0, /* 0xfc */
1837 {{2, 7},
1838 {0, 0},
1839 {0, 0},
1840 {0, 0}
1841 }
1842 },
1843 {1, 1, 2, 0, /* 0xfd */
1844 {{0, 0},
1845 {2, 7},
1846 {0, 0},
1847 {0, 0}
1848 }
1849 },
1850 {0, 1, 1, 0, /* 0xfe */
1851 {{1, 7},
1852 {0, 0},
1853 {0, 0},
1854 {0, 0}
1855 }
1856 },
1857 {1, 1, 1, 0, /* 0xff */
1858 {{0, 7},
1859 {0, 0},
1860 {0, 0},
1861 {0, 0}
1862 }
1863 }
1864};
1865
1866int
1868 struct sctp_scoping *scope,
1869 int do_update)
1870{
1871 if ((scope->loopback_scope == 0) &&
1872 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1873 /*
1874 * skip loopback if not in scope *
1875 */
1876 return (0);
1877 }
1878 switch (ifa->address.sa.sa_family) {
1879#ifdef INET
1880 case AF_INET:
1881 if (scope->ipv4_addr_legal) {
1882 struct sockaddr_in *sin;
1883
1884 sin = &ifa->address.sin;
1885 if (sin->sin_addr.s_addr == 0) {
1886 /* not in scope , unspecified */
1887 return (0);
1888 }
1889 if ((scope->ipv4_local_scope == 0) &&
1891 /* private address not in scope */
1892 return (0);
1893 }
1894 } else {
1895 return (0);
1896 }
1897 break;
1898#endif
1899#ifdef INET6
1900 case AF_INET6:
1901 if (scope->ipv6_addr_legal) {
1902 struct sockaddr_in6 *sin6;
1903
1904 /*
1905 * Must update the flags, bummer, which means any
1906 * IFA locks must now be applied HERE <->
1907 */
1908 if (do_update) {
1909 sctp_gather_internal_ifa_flags(ifa);
1910 }
1912 return (0);
1913 }
1914 /* ok to use deprecated addresses? */
1915 sin6 = &ifa->address.sin6;
1916 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1917 /* skip unspecifed addresses */
1918 return (0);
1919 }
1920 if ( /* (local_scope == 0) && */
1921 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1922 return (0);
1923 }
1924 if ((scope->site_scope == 0) &&
1925 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1926 return (0);
1927 }
1928 } else {
1929 return (0);
1930 }
1931 break;
1932#endif
1933 default:
1934 return (0);
1935 }
1936 return (1);
1937}
1938
1939static struct mbuf *
1940sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
1941{
1942#if defined(INET) || defined(INET6)
1943 struct sctp_paramhdr *paramh;
1944 struct mbuf *mret;
1945 uint16_t plen;
1946#endif
1947
1948 switch (ifa->address.sa.sa_family) {
1949#ifdef INET
1950 case AF_INET:
1951 plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
1952 break;
1953#endif
1954#ifdef INET6
1955 case AF_INET6:
1956 plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
1957 break;
1958#endif
1959 default:
1960 return (m);
1961 }
1962#if defined(INET) || defined(INET6)
1963 if (M_TRAILINGSPACE(m) >= plen) {
1964 /* easy side we just drop it on the end */
1965 paramh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1966 mret = m;
1967 } else {
1968 /* Need more space */
1969 mret = m;
1970 while (SCTP_BUF_NEXT(mret) != NULL) {
1971 mret = SCTP_BUF_NEXT(mret);
1972 }
1973 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
1974 if (SCTP_BUF_NEXT(mret) == NULL) {
1975 /* We are hosed, can't add more addresses */
1976 return (m);
1977 }
1978 mret = SCTP_BUF_NEXT(mret);
1979 paramh = mtod(mret, struct sctp_paramhdr *);
1980 }
1981 /* now add the parameter */
1982 switch (ifa->address.sa.sa_family) {
1983#ifdef INET
1984 case AF_INET:
1985 {
1986 struct sctp_ipv4addr_param *ipv4p;
1987 struct sockaddr_in *sin;
1988
1989 sin = &ifa->address.sin;
1990 ipv4p = (struct sctp_ipv4addr_param *)paramh;
1991 paramh->param_type = htons(SCTP_IPV4_ADDRESS);
1992 paramh->param_length = htons(plen);
1993 ipv4p->addr = sin->sin_addr.s_addr;
1994 SCTP_BUF_LEN(mret) += plen;
1995 break;
1996 }
1997#endif
1998#ifdef INET6
1999 case AF_INET6:
2000 {
2001 struct sctp_ipv6addr_param *ipv6p;
2002 struct sockaddr_in6 *sin6;
2003
2004 sin6 = &ifa->address.sin6;
2005 ipv6p = (struct sctp_ipv6addr_param *)paramh;
2006 paramh->param_type = htons(SCTP_IPV6_ADDRESS);
2007 paramh->param_length = htons(plen);
2008 memcpy(ipv6p->addr, &sin6->sin6_addr,
2009 sizeof(ipv6p->addr));
2010 /* clear embedded scope in the address */
2011 in6_clearscope((struct in6_addr *)ipv6p->addr);
2012 SCTP_BUF_LEN(mret) += plen;
2013 break;
2014 }
2015#endif
2016 default:
2017 return (m);
2018 }
2019 if (len != NULL) {
2020 *len += plen;
2021 }
2022 return (mret);
2023#endif
2024}
2025
2026struct mbuf *
2028 struct sctp_scoping *scope,
2029 struct mbuf *m_at, int cnt_inits_to,
2030 uint16_t *padding_len, uint16_t *chunk_len)
2031{
2032 struct sctp_vrf *vrf = NULL;
2033 int cnt, limit_out = 0, total_count;
2035
2036 vrf_id = inp->def_vrf_id;
2038 vrf = sctp_find_vrf(vrf_id);
2039 if (vrf == NULL) {
2041 return (m_at);
2042 }
2044 struct sctp_ifa *sctp_ifap;
2045 struct sctp_ifn *sctp_ifnp;
2046
2047 cnt = cnt_inits_to;
2049 limit_out = 1;
2050 cnt = SCTP_ADDRESS_LIMIT;
2051 goto skip_count;
2052 }
2053 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2054 if ((scope->loopback_scope == 0) &&
2055 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2056 /*
2057 * Skip loopback devices if loopback_scope
2058 * not set
2059 */
2060 continue;
2061 }
2062 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2063#ifdef INET
2064 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2066 &sctp_ifap->address.sin.sin_addr) != 0)) {
2067 continue;
2068 }
2069#endif
2070#ifdef INET6
2071 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2072 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2073 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2074 continue;
2075 }
2076#endif
2077 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2078 continue;
2079 }
2080 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2081 continue;
2082 }
2083 cnt++;
2084 if (cnt > SCTP_ADDRESS_LIMIT) {
2085 break;
2086 }
2087 }
2088 if (cnt > SCTP_ADDRESS_LIMIT) {
2089 break;
2090 }
2091 }
2092skip_count:
2093 if (cnt > 1) {
2094 total_count = 0;
2095 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2096 cnt = 0;
2097 if ((scope->loopback_scope == 0) &&
2098 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2099 /*
2100 * Skip loopback devices if
2101 * loopback_scope not set
2102 */
2103 continue;
2104 }
2105 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2106#ifdef INET
2107 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2109 &sctp_ifap->address.sin.sin_addr) != 0)) {
2110 continue;
2111 }
2112#endif
2113#ifdef INET6
2114 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2115 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2116 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2117 continue;
2118 }
2119#endif
2120 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2121 continue;
2122 }
2123 if (sctp_is_address_in_scope(sctp_ifap,
2124 scope, 0) == 0) {
2125 continue;
2126 }
2127 if ((chunk_len != NULL) &&
2128 (padding_len != NULL) &&
2129 (*padding_len > 0)) {
2130 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2131 SCTP_BUF_LEN(m_at) += *padding_len;
2132 *chunk_len += *padding_len;
2133 *padding_len = 0;
2134 }
2135 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2136 if (limit_out) {
2137 cnt++;
2138 total_count++;
2139 if (cnt >= 2) {
2140 /*
2141 * two from each
2142 * address
2143 */
2144 break;
2145 }
2146 if (total_count > SCTP_ADDRESS_LIMIT) {
2147 /* No more addresses */
2148 break;
2149 }
2150 }
2151 }
2152 }
2153 }
2154 } else {
2155 struct sctp_laddr *laddr;
2156
2157 cnt = cnt_inits_to;
2158 /* First, how many ? */
2159 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2160 if (laddr->ifa == NULL) {
2161 continue;
2162 }
2164 /*
2165 * Address being deleted by the system, dont
2166 * list.
2167 */
2168 continue;
2169 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2170 /*
2171 * Address being deleted on this ep don't
2172 * list.
2173 */
2174 continue;
2175 }
2176 if (sctp_is_address_in_scope(laddr->ifa,
2177 scope, 1) == 0) {
2178 continue;
2179 }
2180 cnt++;
2181 }
2182 /*
2183 * To get through a NAT we only list addresses if we have
2184 * more than one. That way if you just bind a single address
2185 * we let the source of the init dictate our address.
2186 */
2187 if (cnt > 1) {
2188 cnt = cnt_inits_to;
2189 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2190 if (laddr->ifa == NULL) {
2191 continue;
2192 }
2193 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2194 continue;
2195 }
2196 if (sctp_is_address_in_scope(laddr->ifa,
2197 scope, 0) == 0) {
2198 continue;
2199 }
2200 if ((chunk_len != NULL) &&
2201 (padding_len != NULL) &&
2202 (*padding_len > 0)) {
2203 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2204 SCTP_BUF_LEN(m_at) += *padding_len;
2205 *chunk_len += *padding_len;
2206 *padding_len = 0;
2207 }
2208 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2209 cnt++;
2210 if (cnt >= SCTP_ADDRESS_LIMIT) {
2211 break;
2212 }
2213 }
2214 }
2215 }
2217 return (m_at);
2218}
2219
2220static struct sctp_ifa *
2222 uint8_t dest_is_loop,
2223 uint8_t dest_is_priv,
2224 sa_family_t fam)
2225{
2226 uint8_t dest_is_global = 0;
2227
2228 /* dest_is_priv is true if destination is a private address */
2229 /* dest_is_loop is true if destination is a loopback addresses */
2230
2258 if (ifa->address.sa.sa_family != fam) {
2259 /* forget mis-matched family */
2260 return (NULL);
2261 }
2262 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2263 dest_is_global = 1;
2264 }
2265 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2266 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2267 /* Ok the address may be ok */
2268#ifdef INET6
2269 if (fam == AF_INET6) {
2270 /* ok to use deprecated addresses? no lets not! */
2271 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2272 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2273 return (NULL);
2274 }
2275 if (ifa->src_is_priv && !ifa->src_is_loop) {
2276 if (dest_is_loop) {
2277 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2278 return (NULL);
2279 }
2280 }
2281 if (ifa->src_is_glob) {
2282 if (dest_is_loop) {
2283 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2284 return (NULL);
2285 }
2286 }
2287 }
2288#endif
2289 /*
2290 * Now that we know what is what, implement or table this could in
2291 * theory be done slicker (it used to be), but this is
2292 * straightforward and easier to validate :-)
2293 */
2294 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2295 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2296 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2297 dest_is_loop, dest_is_priv, dest_is_global);
2298
2299 if ((ifa->src_is_loop) && (dest_is_priv)) {
2300 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2301 return (NULL);
2302 }
2303 if ((ifa->src_is_glob) && (dest_is_priv)) {
2304 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2305 return (NULL);
2306 }
2307 if ((ifa->src_is_loop) && (dest_is_global)) {
2308 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2309 return (NULL);
2310 }
2311 if ((ifa->src_is_priv) && (dest_is_global)) {
2312 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2313 return (NULL);
2314 }
2315 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2316 /* its a preferred address */
2317 return (ifa);
2318}
2319
2320static struct sctp_ifa *
2322 uint8_t dest_is_loop,
2323 uint8_t dest_is_priv,
2324 sa_family_t fam)
2325{
2326 uint8_t dest_is_global = 0;
2327
2358 if (ifa->address.sa.sa_family != fam) {
2359 /* forget non matching family */
2360 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2361 ifa->address.sa.sa_family, fam);
2362 return (NULL);
2363 }
2364 /* Ok the address may be ok */
2365 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2366 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2367 dest_is_loop, dest_is_priv);
2368 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2369 dest_is_global = 1;
2370 }
2371#ifdef INET6
2372 if (fam == AF_INET6) {
2373 /* ok to use deprecated addresses? */
2374 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2375 return (NULL);
2376 }
2377 if (ifa->src_is_priv) {
2378 /* Special case, linklocal to loop */
2379 if (dest_is_loop)
2380 return (NULL);
2381 }
2382 }
2383#endif
2384 /*
2385 * Now that we know what is what, implement our table. This could in
2386 * theory be done slicker (it used to be), but this is
2387 * straightforward and easier to validate :-)
2388 */
2389 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2390 ifa->src_is_loop,
2391 dest_is_priv);
2392 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2393 return (NULL);
2394 }
2395 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2396 ifa->src_is_loop,
2397 dest_is_global);
2398 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2399 return (NULL);
2400 }
2401 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2402 /* its an acceptable address */
2403 return (ifa);
2404}
2405
2406int
2408{
2409 struct sctp_laddr *laddr;
2410
2411 if (stcb == NULL) {
2412 /* There are no restrictions, no TCB :-) */
2413 return (0);
2414 }
2415 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2416 if (laddr->ifa == NULL) {
2417 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2418 __func__);
2419 continue;
2420 }
2421 if (laddr->ifa == ifa) {
2422 /* Yes it is on the list */
2423 return (1);
2424 }
2425 }
2426 return (0);
2427}
2428
2429int
2431{
2432 struct sctp_laddr *laddr;
2433
2434 if (ifa == NULL)
2435 return (0);
2436 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2437 if (laddr->ifa == NULL) {
2438 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2439 __func__);
2440 continue;
2441 }
2442 if ((laddr->ifa == ifa) && laddr->action == 0)
2443 /* same pointer */
2444 return (1);
2445 }
2446 return (0);
2447}
2448
2449static struct sctp_ifa *
2451 sctp_route_t *ro,
2453 int non_asoc_addr_ok,
2454 uint8_t dest_is_priv,
2455 uint8_t dest_is_loop,
2456 sa_family_t fam)
2457{
2458 struct sctp_laddr *laddr, *starting_point;
2459 void *ifn;
2460 int resettotop = 0;
2461 struct sctp_ifn *sctp_ifn;
2462 struct sctp_ifa *sctp_ifa, *sifa;
2463 struct sctp_vrf *vrf;
2464 uint32_t ifn_index;
2465
2466 vrf = sctp_find_vrf(vrf_id);
2467 if (vrf == NULL)
2468 return (NULL);
2469
2471 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2472 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2473 /*
2474 * first question, is the ifn we will emit on in our list, if so, we
2475 * want such an address. Note that we first looked for a preferred
2476 * address.
2477 */
2478 if (sctp_ifn) {
2479 /* is a preferred one on the interface we route out? */
2480 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2481#ifdef INET
2482 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2484 &sctp_ifa->address.sin.sin_addr) != 0)) {
2485 continue;
2486 }
2487#endif
2488#ifdef INET6
2489 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2490 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2491 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2492 continue;
2493 }
2494#endif
2496 (non_asoc_addr_ok == 0))
2497 continue;
2499 dest_is_loop,
2500 dest_is_priv, fam);
2501 if (sifa == NULL)
2502 continue;
2503 if (sctp_is_addr_in_ep(inp, sifa)) {
2504 atomic_add_int(&sifa->refcount, 1);
2505 return (sifa);
2506 }
2507 }
2508 }
2509 /*
2510 * ok, now we now need to find one on the list of the addresses. We
2511 * can't get one on the emitting interface so let's find first a
2512 * preferred one. If not that an acceptable one otherwise... we
2513 * return NULL.
2514 */
2515 starting_point = inp->next_addr_touse;
2516once_again:
2517 if (inp->next_addr_touse == NULL) {
2518 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2519 resettotop = 1;
2520 }
2521 for (laddr = inp->next_addr_touse; laddr;
2522 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2523 if (laddr->ifa == NULL) {
2524 /* address has been removed */
2525 continue;
2526 }
2527 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2528 /* address is being deleted */
2529 continue;
2530 }
2531 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2532 dest_is_priv, fam);
2533 if (sifa == NULL)
2534 continue;
2535 atomic_add_int(&sifa->refcount, 1);
2536 return (sifa);
2537 }
2538 if (resettotop == 0) {
2539 inp->next_addr_touse = NULL;
2540 goto once_again;
2541 }
2542
2543 inp->next_addr_touse = starting_point;
2544 resettotop = 0;
2545once_again_too:
2546 if (inp->next_addr_touse == NULL) {
2547 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2548 resettotop = 1;
2549 }
2550
2551 /* ok, what about an acceptable address in the inp */
2552 for (laddr = inp->next_addr_touse; laddr;
2553 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2554 if (laddr->ifa == NULL) {
2555 /* address has been removed */
2556 continue;
2557 }
2558 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2559 /* address is being deleted */
2560 continue;
2561 }
2562 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2563 dest_is_priv, fam);
2564 if (sifa == NULL)
2565 continue;
2566 atomic_add_int(&sifa->refcount, 1);
2567 return (sifa);
2568 }
2569 if (resettotop == 0) {
2570 inp->next_addr_touse = NULL;
2571 goto once_again_too;
2572 }
2573
2574 /*
2575 * no address bound can be a source for the destination we are in
2576 * trouble
2577 */
2578 return (NULL);
2579}
2580
2581static struct sctp_ifa *
2583 struct sctp_tcb *stcb,
2584 sctp_route_t *ro,
2586 uint8_t dest_is_priv,
2587 uint8_t dest_is_loop,
2588 int non_asoc_addr_ok,
2589 sa_family_t fam)
2590{
2591 struct sctp_laddr *laddr, *starting_point;
2592 void *ifn;
2593 struct sctp_ifn *sctp_ifn;
2594 struct sctp_ifa *sctp_ifa, *sifa;
2595 uint8_t start_at_beginning = 0;
2596 struct sctp_vrf *vrf;
2597 uint32_t ifn_index;
2598
2599 /*
2600 * first question, is the ifn we will emit on in our list, if so, we
2601 * want that one.
2602 */
2603 vrf = sctp_find_vrf(vrf_id);
2604 if (vrf == NULL)
2605 return (NULL);
2606
2608 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2609 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2610
2611 /*
2612 * first question, is the ifn we will emit on in our list? If so,
2613 * we want that one. First we look for a preferred. Second, we go
2614 * for an acceptable.
2615 */
2616 if (sctp_ifn) {
2617 /* first try for a preferred address on the ep */
2618 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2619#ifdef INET
2620 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2622 &sctp_ifa->address.sin.sin_addr) != 0)) {
2623 continue;
2624 }
2625#endif
2626#ifdef INET6
2627 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2628 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2629 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2630 continue;
2631 }
2632#endif
2633 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2634 continue;
2635 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2636 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2637 if (sifa == NULL)
2638 continue;
2639 if (((non_asoc_addr_ok == 0) &&
2640 (sctp_is_addr_restricted(stcb, sifa))) ||
2641 (non_asoc_addr_ok &&
2642 (sctp_is_addr_restricted(stcb, sifa)) &&
2643 (!sctp_is_addr_pending(stcb, sifa)))) {
2644 /* on the no-no list */
2645 continue;
2646 }
2647 atomic_add_int(&sifa->refcount, 1);
2648 return (sifa);
2649 }
2650 }
2651 /* next try for an acceptable address on the ep */
2652 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2653#ifdef INET
2654 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2656 &sctp_ifa->address.sin.sin_addr) != 0)) {
2657 continue;
2658 }
2659#endif
2660#ifdef INET6
2661 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2662 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2663 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2664 continue;
2665 }
2666#endif
2667 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2668 continue;
2669 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2670 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2671 if (sifa == NULL)
2672 continue;
2673 if (((non_asoc_addr_ok == 0) &&
2674 (sctp_is_addr_restricted(stcb, sifa))) ||
2675 (non_asoc_addr_ok &&
2676 (sctp_is_addr_restricted(stcb, sifa)) &&
2677 (!sctp_is_addr_pending(stcb, sifa)))) {
2678 /* on the no-no list */
2679 continue;
2680 }
2681 atomic_add_int(&sifa->refcount, 1);
2682 return (sifa);
2683 }
2684 }
2685 }
2686 /*
2687 * if we can't find one like that then we must look at all addresses
2688 * bound to pick one at first preferable then secondly acceptable.
2689 */
2690 starting_point = stcb->asoc.last_used_address;
2691sctp_from_the_top:
2692 if (stcb->asoc.last_used_address == NULL) {
2693 start_at_beginning = 1;
2694 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2695 }
2696 /* search beginning with the last used address */
2697 for (laddr = stcb->asoc.last_used_address; laddr;
2698 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2699 if (laddr->ifa == NULL) {
2700 /* address has been removed */
2701 continue;
2702 }
2703 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2704 /* address is being deleted */
2705 continue;
2706 }
2707 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2708 if (sifa == NULL)
2709 continue;
2710 if (((non_asoc_addr_ok == 0) &&
2711 (sctp_is_addr_restricted(stcb, sifa))) ||
2712 (non_asoc_addr_ok &&
2713 (sctp_is_addr_restricted(stcb, sifa)) &&
2714 (!sctp_is_addr_pending(stcb, sifa)))) {
2715 /* on the no-no list */
2716 continue;
2717 }
2718 stcb->asoc.last_used_address = laddr;
2719 atomic_add_int(&sifa->refcount, 1);
2720 return (sifa);
2721 }
2722 if (start_at_beginning == 0) {
2723 stcb->asoc.last_used_address = NULL;
2724 goto sctp_from_the_top;
2725 }
2726 /* now try for any higher scope than the destination */
2727 stcb->asoc.last_used_address = starting_point;
2728 start_at_beginning = 0;
2729sctp_from_the_top2:
2730 if (stcb->asoc.last_used_address == NULL) {
2731 start_at_beginning = 1;
2732 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2733 }
2734 /* search beginning with the last used address */
2735 for (laddr = stcb->asoc.last_used_address; laddr;
2736 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2737 if (laddr->ifa == NULL) {
2738 /* address has been removed */
2739 continue;
2740 }
2741 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2742 /* address is being deleted */
2743 continue;
2744 }
2745 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2746 dest_is_priv, fam);
2747 if (sifa == NULL)
2748 continue;
2749 if (((non_asoc_addr_ok == 0) &&
2750 (sctp_is_addr_restricted(stcb, sifa))) ||
2751 (non_asoc_addr_ok &&
2752 (sctp_is_addr_restricted(stcb, sifa)) &&
2753 (!sctp_is_addr_pending(stcb, sifa)))) {
2754 /* on the no-no list */
2755 continue;
2756 }
2757 stcb->asoc.last_used_address = laddr;
2758 atomic_add_int(&sifa->refcount, 1);
2759 return (sifa);
2760 }
2761 if (start_at_beginning == 0) {
2762 stcb->asoc.last_used_address = NULL;
2763 goto sctp_from_the_top2;
2764 }
2765 return (NULL);
2766}
2767
2768static struct sctp_ifa *
2770 struct sctp_inpcb *inp,
2771 struct sctp_tcb *stcb,
2772 int non_asoc_addr_ok,
2773 uint8_t dest_is_loop,
2774 uint8_t dest_is_priv,
2775 int addr_wanted,
2776 sa_family_t fam,
2777 sctp_route_t *ro)
2778{
2779 struct sctp_ifa *ifa, *sifa;
2780 int num_eligible_addr = 0;
2781#ifdef INET6
2782 struct sockaddr_in6 sin6, lsa6;
2783
2784 if (fam == AF_INET6) {
2785 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2786 (void)sa6_recoverscope(&sin6);
2787 }
2788#endif /* INET6 */
2789 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2790#ifdef INET
2791 if ((ifa->address.sa.sa_family == AF_INET) &&
2793 &ifa->address.sin.sin_addr) != 0)) {
2794 continue;
2795 }
2796#endif
2797#ifdef INET6
2798 if ((ifa->address.sa.sa_family == AF_INET6) &&
2799 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2800 &ifa->address.sin6.sin6_addr) != 0)) {
2801 continue;
2802 }
2803#endif
2804 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2805 (non_asoc_addr_ok == 0))
2806 continue;
2807 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2808 dest_is_priv, fam);
2809 if (sifa == NULL)
2810 continue;
2811#ifdef INET6
2812 if (fam == AF_INET6 &&
2813 dest_is_loop &&
2814 sifa->src_is_loop && sifa->src_is_priv) {
2815 /*
2816 * don't allow fe80::1 to be a src on loop ::1, we
2817 * don't list it to the peer so we will get an
2818 * abort.
2819 */
2820 continue;
2821 }
2822 if (fam == AF_INET6 &&
2823 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2824 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2825 /*
2826 * link-local <-> link-local must belong to the same
2827 * scope.
2828 */
2829 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2830 (void)sa6_recoverscope(&lsa6);
2831 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2832 continue;
2833 }
2834 }
2835#endif /* INET6 */
2836
2837 /*
2838 * Check if the IPv6 address matches to next-hop. In the
2839 * mobile case, old IPv6 address may be not deleted from the
2840 * interface. Then, the interface has previous and new
2841 * addresses. We should use one corresponding to the
2842 * next-hop. (by micchie)
2843 */
2844#ifdef INET6
2845 if (stcb && fam == AF_INET6 &&
2847 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2848 == 0) {
2849 continue;
2850 }
2851 }
2852#endif
2853#ifdef INET
2854 /* Avoid topologically incorrect IPv4 address */
2855 if (stcb && fam == AF_INET &&
2857 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2858 continue;
2859 }
2860 }
2861#endif
2862 if (stcb) {
2863 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2864 continue;
2865 }
2866 if (((non_asoc_addr_ok == 0) &&
2867 (sctp_is_addr_restricted(stcb, sifa))) ||
2868 (non_asoc_addr_ok &&
2869 (sctp_is_addr_restricted(stcb, sifa)) &&
2870 (!sctp_is_addr_pending(stcb, sifa)))) {
2871 /*
2872 * It is restricted for some reason..
2873 * probably not yet added.
2874 */
2875 continue;
2876 }
2877 }
2878 if (num_eligible_addr >= addr_wanted) {
2879 return (sifa);
2880 }
2881 num_eligible_addr++;
2882 }
2883 return (NULL);
2884}
2885
2886static int
2888 struct sctp_inpcb *inp,
2889 struct sctp_tcb *stcb,
2890 int non_asoc_addr_ok,
2891 uint8_t dest_is_loop,
2892 uint8_t dest_is_priv,
2893 sa_family_t fam)
2894{
2895 struct sctp_ifa *ifa, *sifa;
2896 int num_eligible_addr = 0;
2897
2898 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2899#ifdef INET
2900 if ((ifa->address.sa.sa_family == AF_INET) &&
2902 &ifa->address.sin.sin_addr) != 0)) {
2903 continue;
2904 }
2905#endif
2906#ifdef INET6
2907 if ((ifa->address.sa.sa_family == AF_INET6) &&
2908 (stcb != NULL) &&
2909 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2910 &ifa->address.sin6.sin6_addr) != 0)) {
2911 continue;
2912 }
2913#endif
2914 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2915 (non_asoc_addr_ok == 0)) {
2916 continue;
2917 }
2918 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2919 dest_is_priv, fam);
2920 if (sifa == NULL) {
2921 continue;
2922 }
2923 if (stcb) {
2924 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2925 continue;
2926 }
2927 if (((non_asoc_addr_ok == 0) &&
2928 (sctp_is_addr_restricted(stcb, sifa))) ||
2929 (non_asoc_addr_ok &&
2930 (sctp_is_addr_restricted(stcb, sifa)) &&
2931 (!sctp_is_addr_pending(stcb, sifa)))) {
2932 /*
2933 * It is restricted for some reason..
2934 * probably not yet added.
2935 */
2936 continue;
2937 }
2938 }
2939 num_eligible_addr++;
2940 }
2941 return (num_eligible_addr);
2942}
2943
2944static struct sctp_ifa *
2946 struct sctp_tcb *stcb,
2947 struct sctp_nets *net,
2948 sctp_route_t *ro,
2950 uint8_t dest_is_priv,
2951 uint8_t dest_is_loop,
2952 int non_asoc_addr_ok,
2953 sa_family_t fam)
2954{
2955 int cur_addr_num = 0, num_preferred = 0;
2956 void *ifn;
2957 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2958 struct sctp_ifa *sctp_ifa, *sifa;
2959 uint32_t ifn_index;
2960 struct sctp_vrf *vrf;
2961#ifdef INET
2962 int retried = 0;
2963#endif
2964
2965 /*-
2966 * For boundall we can use any address in the association.
2967 * If non_asoc_addr_ok is set we can use any address (at least in
2968 * theory). So we look for preferred addresses first. If we find one,
2969 * we use it. Otherwise we next try to get an address on the
2970 * interface, which we should be able to do (unless non_asoc_addr_ok
2971 * is false and we are routed out that way). In these cases where we
2972 * can't use the address of the interface we go through all the
2973 * ifn's looking for an address we can use and fill that in. Punting
2974 * means we send back address 0, which will probably cause problems
2975 * actually since then IP will fill in the address of the route ifn,
2976 * which means we probably already rejected it.. i.e. here comes an
2977 * abort :-<.
2978 */
2979 vrf = sctp_find_vrf(vrf_id);
2980 if (vrf == NULL)
2981 return (NULL);
2982
2984 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2985 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2986 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2987 if (sctp_ifn == NULL) {
2988 /* ?? We don't have this guy ?? */
2989 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2990 goto bound_all_plan_b;
2991 }
2992 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
2993 ifn_index, sctp_ifn->ifn_name);
2994
2995 if (net) {
2996 cur_addr_num = net->indx_of_eligible_next_to_use;
2997 }
2999 inp, stcb,
3000 non_asoc_addr_ok,
3001 dest_is_loop,
3002 dest_is_priv, fam);
3003 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3004 num_preferred, sctp_ifn->ifn_name);
3005 if (num_preferred == 0) {
3006 /*
3007 * no eligible addresses, we must use some other interface
3008 * address if we can find one.
3009 */
3010 goto bound_all_plan_b;
3011 }
3012 /*
3013 * Ok we have num_eligible_addr set with how many we can use, this
3014 * may vary from call to call due to addresses being deprecated
3015 * etc..
3016 */
3017 if (cur_addr_num >= num_preferred) {
3018 cur_addr_num = 0;
3019 }
3020 /*
3021 * select the nth address from the list (where cur_addr_num is the
3022 * nth) and 0 is the first one, 1 is the second one etc...
3023 */
3024 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3025
3026 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3027 dest_is_priv, cur_addr_num, fam, ro);
3028
3029 /* if sctp_ifa is NULL something changed??, fall to plan b. */
3030 if (sctp_ifa) {
3031 atomic_add_int(&sctp_ifa->refcount, 1);
3032 if (net) {
3033 /* save off where the next one we will want */
3034 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3035 }
3036 return (sctp_ifa);
3037 }
3038 /*
3039 * plan_b: Look at all interfaces and find a preferred address. If
3040 * no preferred fall through to plan_c.
3041 */
3042bound_all_plan_b:
3043 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3044 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3045 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3047 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3048 /* wrong base scope */
3049 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3050 continue;
3051 }
3052 if ((sctp_ifn == looked_at) && looked_at) {
3053 /* already looked at this guy */
3054 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3055 continue;
3056 }
3057 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3058 dest_is_loop, dest_is_priv, fam);
3060 "Found ifn:%p %d preferred source addresses\n",
3061 ifn, num_preferred);
3062 if (num_preferred == 0) {
3063 /* None on this interface. */
3064 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n");
3065 continue;
3066 }
3068 "num preferred:%d on interface:%p cur_addr_num:%d\n",
3069 num_preferred, (void *)sctp_ifn, cur_addr_num);
3070
3071 /*
3072 * Ok we have num_eligible_addr set with how many we can
3073 * use, this may vary from call to call due to addresses
3074 * being deprecated etc..
3075 */
3076 if (cur_addr_num >= num_preferred) {
3077 cur_addr_num = 0;
3078 }
3079 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3080 dest_is_priv, cur_addr_num, fam, ro);
3081 if (sifa == NULL)
3082 continue;
3083 if (net) {
3084 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3085 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3086 cur_addr_num);
3087 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3089 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3091 }
3092 atomic_add_int(&sifa->refcount, 1);
3093 return (sifa);
3094 }
3095#ifdef INET
3096again_with_private_addresses_allowed:
3097#endif
3098 /* plan_c: do we have an acceptable address on the emit interface */
3099 sifa = NULL;
3100 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
3101 if (emit_ifn == NULL) {
3102 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
3103 goto plan_d;
3104 }
3105 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3106 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3107#ifdef INET
3108 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3110 &sctp_ifa->address.sin.sin_addr) != 0)) {
3111 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3112 continue;
3113 }
3114#endif
3115#ifdef INET6
3116 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3117 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3118 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3119 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3120 continue;
3121 }
3122#endif
3124 (non_asoc_addr_ok == 0)) {
3125 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
3126 continue;
3127 }
3128 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3129 dest_is_priv, fam);
3130 if (sifa == NULL) {
3131 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3132 continue;
3133 }
3134 if (stcb) {
3135 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3136 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3137 sifa = NULL;
3138 continue;
3139 }
3140 if (((non_asoc_addr_ok == 0) &&
3141 (sctp_is_addr_restricted(stcb, sifa))) ||
3142 (non_asoc_addr_ok &&
3143 (sctp_is_addr_restricted(stcb, sifa)) &&
3144 (!sctp_is_addr_pending(stcb, sifa)))) {
3145 /*
3146 * It is restricted for some reason..
3147 * probably not yet added.
3148 */
3149 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n");
3150 sifa = NULL;
3151 continue;
3152 }
3153 }
3154 atomic_add_int(&sifa->refcount, 1);
3155 goto out;
3156 }
3157plan_d:
3158 /*
3159 * plan_d: We are in trouble. No preferred address on the emit
3160 * interface. And not even a preferred address on all interfaces. Go
3161 * out and see if we can find an acceptable address somewhere
3162 * amongst all interfaces.
3163 */
3164 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3165 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3166 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3167 /* wrong base scope */
3168 continue;
3169 }
3170 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3171#ifdef INET
3172 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3174 &sctp_ifa->address.sin.sin_addr) != 0)) {
3175 continue;
3176 }
3177#endif
3178#ifdef INET6
3179 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3180 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3181 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3182 continue;
3183 }
3184#endif
3186 (non_asoc_addr_ok == 0))
3187 continue;
3189 dest_is_loop,
3190 dest_is_priv, fam);
3191 if (sifa == NULL)
3192 continue;
3193 if (stcb) {
3194 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3195 sifa = NULL;
3196 continue;
3197 }
3198 if (((non_asoc_addr_ok == 0) &&
3199 (sctp_is_addr_restricted(stcb, sifa))) ||
3200 (non_asoc_addr_ok &&
3201 (sctp_is_addr_restricted(stcb, sifa)) &&
3202 (!sctp_is_addr_pending(stcb, sifa)))) {
3203 /*
3204 * It is restricted for some
3205 * reason.. probably not yet added.
3206 */
3207 sifa = NULL;
3208 continue;
3209 }
3210 }
3211 goto out;
3212 }
3213 }
3214#ifdef INET
3215 if (stcb) {
3216 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3217 stcb->asoc.scope.ipv4_local_scope = 1;
3218 retried = 1;
3219 goto again_with_private_addresses_allowed;
3220 } else if (retried == 1) {
3221 stcb->asoc.scope.ipv4_local_scope = 0;
3222 }
3223 }
3224#endif
3225out:
3226#ifdef INET
3227 if (sifa) {
3228 if (retried == 1) {
3229 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3230 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3231 /* wrong base scope */
3232 continue;
3233 }
3234 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3235 struct sctp_ifa *tmp_sifa;
3236
3237#ifdef INET
3238 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3240 &sctp_ifa->address.sin.sin_addr) != 0)) {
3241 continue;
3242 }
3243#endif
3244#ifdef INET6
3245 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3246 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3247 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3248 continue;
3249 }
3250#endif
3252 (non_asoc_addr_ok == 0))
3253 continue;
3255 dest_is_loop,
3256 dest_is_priv, fam);
3257 if (tmp_sifa == NULL) {
3258 continue;
3259 }
3260 if (tmp_sifa == sifa) {
3261 continue;
3262 }
3263 if (stcb) {
3264 if (sctp_is_address_in_scope(tmp_sifa,
3265 &stcb->asoc.scope, 0) == 0) {
3266 continue;
3267 }
3268 if (((non_asoc_addr_ok == 0) &&
3269 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3270 (non_asoc_addr_ok &&
3271 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3272 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3273 /*
3274 * It is restricted
3275 * for some reason..
3276 * probably not yet
3277 * added.
3278 */
3279 continue;
3280 }
3281 }
3282 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3283 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3284 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3285 }
3286 }
3287 }
3288 }
3289 atomic_add_int(&sifa->refcount, 1);
3290 }
3291#endif
3292 return (sifa);
3293}
3294
3295/* tcb may be NULL */
3296struct sctp_ifa *
3298 struct sctp_tcb *stcb,
3299 sctp_route_t *ro,
3300 struct sctp_nets *net,
3301 int non_asoc_addr_ok, uint32_t vrf_id)
3302{
3303 struct sctp_ifa *answer;
3304 uint8_t dest_is_priv, dest_is_loop;
3305 sa_family_t fam;
3306#ifdef INET
3307 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3308#endif
3309#ifdef INET6
3310 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3311#endif
3312
3376 if (ro->ro_nh == NULL) {
3377 /*
3378 * Need a route to cache.
3379 */
3380 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
3381 }
3382 if (ro->ro_nh == NULL) {
3383 return (NULL);
3384 }
3385 fam = ro->ro_dst.sa_family;
3386 dest_is_priv = dest_is_loop = 0;
3387 /* Setup our scopes for the destination */
3388 switch (fam) {
3389#ifdef INET
3390 case AF_INET:
3391 /* Scope based on outbound address */
3392 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3393 dest_is_loop = 1;
3394 if (net != NULL) {
3395 /* mark it as local */
3396 net->addr_is_local = 1;
3397 }
3398 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3399 dest_is_priv = 1;
3400 }
3401 break;
3402#endif
3403#ifdef INET6
3404 case AF_INET6:
3405 /* Scope based on outbound address */
3406 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3408 /*
3409 * If the address is a loopback address, which
3410 * consists of "::1" OR "fe80::1%lo0", we are
3411 * loopback scope. But we don't use dest_is_priv
3412 * (link local addresses).
3413 */
3414 dest_is_loop = 1;
3415 if (net != NULL) {
3416 /* mark it as local */
3417 net->addr_is_local = 1;
3418 }
3419 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3420 dest_is_priv = 1;
3421 }
3422 break;
3423#endif
3424 }
3425 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3426 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3429 /*
3430 * Bound all case
3431 */
3432 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3433 dest_is_priv, dest_is_loop,
3434 non_asoc_addr_ok, fam);
3436 return (answer);
3437 }
3438 /*
3439 * Subset bound case
3440 */
3441 if (stcb) {
3442 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3443 vrf_id, dest_is_priv,
3444 dest_is_loop,
3445 non_asoc_addr_ok, fam);
3446 } else {
3447 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3448 non_asoc_addr_ok,
3449 dest_is_priv,
3450 dest_is_loop, fam);
3451 }
3453 return (answer);
3454}
3455
3456static int
3457sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3458{
3459 struct cmsghdr cmh;
3460 struct sctp_sndinfo sndinfo;
3461 struct sctp_prinfo prinfo;
3462 struct sctp_authinfo authinfo;
3463 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3464 int found;
3465
3466 /*
3467 * Independent of how many mbufs, find the c_type inside the control
3468 * structure and copy out the data.
3469 */
3470 found = 0;
3471 tot_len = SCTP_BUF_LEN(control);
3472 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3473 rem_len = tot_len - off;
3474 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3475 /* There is not enough room for one more. */
3476 return (found);
3477 }
3478 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3479 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3480 /* We dont't have a complete CMSG header. */
3481 return (found);
3482 }
3483 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3484 /* We don't have the complete CMSG. */
3485 return (found);
3486 }
3487 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3488 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3489 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3490 ((c_type == cmh.cmsg_type) ||
3491 ((c_type == SCTP_SNDRCV) &&
3492 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3493 (cmh.cmsg_type == SCTP_PRINFO) ||
3494 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3495 if (c_type == cmh.cmsg_type) {
3496 if (cpsize > INT_MAX) {
3497 return (found);
3498 }
3499 if (cmsg_data_len < (int)cpsize) {
3500 return (found);
3501 }
3502 /* It is exactly what we want. Copy it out. */
3503 m_copydata(control, cmsg_data_off, (int)cpsize, (caddr_t)data);
3504 return (1);
3505 } else {
3506 struct sctp_sndrcvinfo *sndrcvinfo;
3507
3508 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3509 if (found == 0) {
3510 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3511 return (found);
3512 }
3513 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3514 }
3515 switch (cmh.cmsg_type) {
3516 case SCTP_SNDINFO:
3517 if (cmsg_data_len < (int)sizeof(struct sctp_sndinfo)) {
3518 return (found);
3519 }
3520 m_copydata(control, cmsg_data_off, sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3521 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3522 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3523 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3524 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3525 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3526 break;
3527 case SCTP_PRINFO:
3528 if (cmsg_data_len < (int)sizeof(struct sctp_prinfo)) {
3529 return (found);
3530 }
3531 m_copydata(control, cmsg_data_off, sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3532 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3533 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3534 } else {
3535 sndrcvinfo->sinfo_timetolive = 0;
3536 }
3537 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3538 break;
3539 case SCTP_AUTHINFO:
3540 if (cmsg_data_len < (int)sizeof(struct sctp_authinfo)) {
3541 return (found);
3542 }
3543 m_copydata(control, cmsg_data_off, sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3544 sndrcvinfo->sinfo_keynumber_valid = 1;
3545 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3546 break;
3547 default:
3548 return (found);
3549 }
3550 found = 1;
3551 }
3552 }
3553 }
3554 return (found);
3555}
3556
3557static int
3558sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3559{
3560 struct cmsghdr cmh;
3561 struct sctp_initmsg initmsg;
3562#ifdef INET
3563 struct sockaddr_in sin;
3564#endif
3565#ifdef INET6
3566 struct sockaddr_in6 sin6;
3567#endif
3568 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3569
3570 tot_len = SCTP_BUF_LEN(control);
3571 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3572 rem_len = tot_len - off;
3573 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3574 /* There is not enough room for one more. */
3575 *error = EINVAL;
3576 return (1);
3577 }
3578 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3579 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3580 /* We dont't have a complete CMSG header. */
3581 *error = EINVAL;
3582 return (1);
3583 }
3584 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3585 /* We don't have the complete CMSG. */
3586 *error = EINVAL;
3587 return (1);
3588 }
3589 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3590 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3591 if (cmh.cmsg_level == IPPROTO_SCTP) {
3592 switch (cmh.cmsg_type) {
3593 case SCTP_INIT:
3594 if (cmsg_data_len < (int)sizeof(struct sctp_initmsg)) {
3595 *error = EINVAL;
3596 return (1);
3597 }
3598 m_copydata(control, cmsg_data_off, sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3599 if (initmsg.sinit_max_attempts)
3600 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3601 if (initmsg.sinit_num_ostreams)
3602 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3603 if (initmsg.sinit_max_instreams)
3604 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3605 if (initmsg.sinit_max_init_timeo)
3606 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3607 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3608 struct sctp_stream_out *tmp_str;
3609 unsigned int i;
3610#if defined(SCTP_DETAILED_STR_STATS)
3611 int j;
3612#endif
3613
3614 /* Default is NOT correct */
3615 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3616 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3617 SCTP_TCB_UNLOCK(stcb);
3618 SCTP_MALLOC(tmp_str,
3619 struct sctp_stream_out *,
3620 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3621 SCTP_M_STRMO);
3622 SCTP_TCB_LOCK(stcb);
3623 if (tmp_str != NULL) {
3624 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3625 stcb->asoc.strmout = tmp_str;
3627 } else {
3628 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3629 }
3630 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3631 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3632 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
3633 stcb->asoc.strmout[i].chunks_on_queues = 0;
3634#if defined(SCTP_DETAILED_STR_STATS)
3635 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
3636 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
3637 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
3638 }
3639#else
3640 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
3641 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
3642#endif
3643 stcb->asoc.strmout[i].next_mid_ordered = 0;
3644 stcb->asoc.strmout[i].next_mid_unordered = 0;
3645 stcb->asoc.strmout[i].sid = i;
3646 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3648 }
3649 }
3650 break;
3651#ifdef INET
3652 case SCTP_DSTADDRV4:
3653 if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3654 *error = EINVAL;
3655 return (1);
3656 }
3657 memset(&sin, 0, sizeof(struct sockaddr_in));
3658 sin.sin_family = AF_INET;
3659 sin.sin_len = sizeof(struct sockaddr_in);
3660 sin.sin_port = stcb->rport;
3661 m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3662 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3663 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3664 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3665 *error = EINVAL;
3666 return (1);
3667 }
3668 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3670 *error = ENOBUFS;
3671 return (1);
3672 }
3673 break;
3674#endif
3675#ifdef INET6
3676 case SCTP_DSTADDRV6:
3677 if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3678 *error = EINVAL;
3679 return (1);
3680 }
3681 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3682 sin6.sin6_family = AF_INET6;
3683 sin6.sin6_len = sizeof(struct sockaddr_in6);
3684 sin6.sin6_port = stcb->rport;
3685 m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3686 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3687 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3688 *error = EINVAL;
3689 return (1);
3690 }
3691#ifdef INET
3692 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3693 in6_sin6_2_sin(&sin, &sin6);
3694 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3695 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3696 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3697 *error = EINVAL;
3698 return (1);
3699 }
3700 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3702 *error = ENOBUFS;
3703 return (1);
3704 }
3705 } else
3706#endif
3707 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port,
3709 *error = ENOBUFS;
3710 return (1);
3711 }
3712 break;
3713#endif
3714 default:
3715 break;
3716 }
3717 }
3718 }
3719 return (0);
3720}
3721
3722#if defined(INET) || defined(INET6)
3723static struct sctp_tcb *
3724sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3725 uint16_t port,
3726 struct mbuf *control,
3727 struct sctp_nets **net_p,
3728 int *error)
3729{
3730 struct cmsghdr cmh;
3731 struct sctp_tcb *stcb;
3732 struct sockaddr *addr;
3733#ifdef INET
3734 struct sockaddr_in sin;
3735#endif
3736#ifdef INET6
3737 struct sockaddr_in6 sin6;
3738#endif
3739 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3740
3741 tot_len = SCTP_BUF_LEN(control);
3742 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3743 rem_len = tot_len - off;
3744 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3745 /* There is not enough room for one more. */
3746 *error = EINVAL;
3747 return (NULL);
3748 }
3749 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3750 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3751 /* We dont't have a complete CMSG header. */
3752 *error = EINVAL;
3753 return (NULL);
3754 }
3755 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3756 /* We don't have the complete CMSG. */
3757 *error = EINVAL;
3758 return (NULL);
3759 }
3760 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3761 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3762 if (cmh.cmsg_level == IPPROTO_SCTP) {
3763 switch (cmh.cmsg_type) {
3764#ifdef INET
3765 case SCTP_DSTADDRV4:
3766 if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3767 *error = EINVAL;
3768 return (NULL);
3769 }
3770 memset(&sin, 0, sizeof(struct sockaddr_in));
3771 sin.sin_family = AF_INET;
3772 sin.sin_len = sizeof(struct sockaddr_in);
3773 sin.sin_port = port;
3774 m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3775 addr = (struct sockaddr *)&sin;
3776 break;
3777#endif
3778#ifdef INET6
3779 case SCTP_DSTADDRV6:
3780 if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3781 *error = EINVAL;
3782 return (NULL);
3783 }
3784 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3785 sin6.sin6_family = AF_INET6;
3786 sin6.sin6_len = sizeof(struct sockaddr_in6);
3787 sin6.sin6_port = port;
3788 m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3789#ifdef INET
3790 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3791 in6_sin6_2_sin(&sin, &sin6);
3792 addr = (struct sockaddr *)&sin;
3793 } else
3794#endif
3795 addr = (struct sockaddr *)&sin6;
3796 break;
3797#endif
3798 default:
3799 addr = NULL;
3800 break;
3801 }
3802 if (addr) {
3803 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3804 if (stcb != NULL) {
3805 return (stcb);
3806 }
3807 }
3808 }
3809 }
3810 return (NULL);
3811}
3812#endif
3813
3814static struct mbuf *
3815sctp_add_cookie(struct mbuf *init, int init_offset,
3816 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
3817{
3818 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3819 struct sctp_state_cookie *stc;
3820 struct sctp_paramhdr *ph;
3821 uint16_t cookie_sz;
3822
3823 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3824 sizeof(struct sctp_paramhdr)), 0,
3825 M_NOWAIT, 1, MT_DATA);
3826 if (mret == NULL) {
3827 return (NULL);
3828 }
3829 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3830 if (copy_init == NULL) {
3831 sctp_m_freem(mret);
3832 return (NULL);
3833 }
3834#ifdef SCTP_MBUF_LOGGING
3835 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3836 sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
3837 }
3838#endif
3839 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3840 M_NOWAIT);
3841 if (copy_initack == NULL) {
3842 sctp_m_freem(mret);
3843 sctp_m_freem(copy_init);
3844 return (NULL);
3845 }
3846#ifdef SCTP_MBUF_LOGGING
3847 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3848 sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
3849 }
3850#endif
3851 /* easy side we just drop it on the end */
3852 ph = mtod(mret, struct sctp_paramhdr *);
3853 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3854 sizeof(struct sctp_paramhdr);
3855 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3856 sizeof(struct sctp_paramhdr));
3857 ph->param_type = htons(SCTP_STATE_COOKIE);
3858 ph->param_length = 0; /* fill in at the end */
3859 /* Fill in the stc cookie data */
3860 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3861
3862 /* tack the INIT and then the INIT-ACK onto the chain */
3863 cookie_sz = 0;
3864 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3865 cookie_sz += SCTP_BUF_LEN(m_at);
3866 if (SCTP_BUF_NEXT(m_at) == NULL) {
3867 SCTP_BUF_NEXT(m_at) = copy_init;
3868 break;
3869 }
3870 }
3871 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3872 cookie_sz += SCTP_BUF_LEN(m_at);
3873 if (SCTP_BUF_NEXT(m_at) == NULL) {
3874 SCTP_BUF_NEXT(m_at) = copy_initack;
3875 break;
3876 }
3877 }
3878 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3879 cookie_sz += SCTP_BUF_LEN(m_at);
3880 if (SCTP_BUF_NEXT(m_at) == NULL) {
3881 break;
3882 }
3883 }
3884 sig = sctp_get_mbuf_for_msg(SCTP_SIGNATURE_SIZE, 0, M_NOWAIT, 1, MT_DATA);
3885 if (sig == NULL) {
3886 /* no space, so free the entire chain */
3887 sctp_m_freem(mret);
3888 return (NULL);
3889 }
3890 SCTP_BUF_NEXT(m_at) = sig;
3892 cookie_sz += SCTP_SIGNATURE_SIZE;
3893 ph->param_length = htons(cookie_sz);
3894 *signature = (uint8_t *)mtod(sig, caddr_t);
3895 memset(*signature, 0, SCTP_SIGNATURE_SIZE);
3896 return (mret);
3897}
3898
3899static uint8_t
3901{
3902 if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
3903 return (SCTP_ECT0_BIT);
3904 } else {
3905 return (0);
3906 }
3907}
3908
3909#if defined(INET) || defined(INET6)
3910static void
3911sctp_handle_no_route(struct sctp_tcb *stcb,
3912 struct sctp_nets *net,
3913 int so_locked)
3914{
3915 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3916
3917 if (net) {
3918 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3920 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3921 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3922 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
3924 stcb, 0,
3925 (void *)net,
3926 so_locked);
3927 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3928 net->dest_state &= ~SCTP_ADDR_PF;
3929 }
3930 }
3931 if (stcb) {
3932 if (net == stcb->asoc.primary_destination) {
3933 /* need a new primary */
3934 struct sctp_nets *alt;
3935
3936 alt = sctp_find_alternate_net(stcb, net, 0);
3937 if (alt != net) {
3938 if (stcb->asoc.alternate) {
3940 }
3941 stcb->asoc.alternate = alt;
3942 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
3943 if (net->ro._s_addr) {
3944 sctp_free_ifa(net->ro._s_addr);
3945 net->ro._s_addr = NULL;
3946 }
3947 net->src_addr_selected = 0;
3948 }
3949 }
3950 }
3951 }
3952}
3953#endif
3954
3955static int
3957 struct sctp_tcb *stcb, /* may be NULL */
3958 struct sctp_nets *net,
3959 struct sockaddr *to,
3960 struct mbuf *m,
3961 uint32_t auth_offset,
3962 struct sctp_auth_chunk *auth,
3963 uint16_t auth_keyid,
3964 int nofragment_flag,
3965 int ecn_ok,
3966 int out_of_asoc_ok,
3967 uint16_t src_port,
3968 uint16_t dest_port,
3969 uint32_t v_tag,
3970 uint16_t port,
3971 union sctp_sockstore *over_addr,
3972 uint8_t mflowtype, uint32_t mflowid,
3973 int so_locked)
3974{
3975/* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3988 /* Will need ifdefs around this */
3989 struct mbuf *newm;
3990 struct sctphdr *sctphdr;
3991 int packet_length;
3992 int ret;
3993#if defined(INET) || defined(INET6)
3994 uint32_t vrf_id;
3995#endif
3996#if defined(INET) || defined(INET6)
3997 struct mbuf *o_pak;
3998 sctp_route_t *ro = NULL;
3999 struct udphdr *udp = NULL;
4000#endif
4001 uint8_t tos_value;
4002
4003 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4004 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4005 sctp_m_freem(m);
4006 return (EFAULT);
4007 }
4008#if defined(INET) || defined(INET6)
4009 if (stcb) {
4010 vrf_id = stcb->asoc.vrf_id;
4011 } else {
4012 vrf_id = inp->def_vrf_id;
4013 }
4014#endif
4015 /* fill in the HMAC digest for any AUTH chunk in the packet */
4016 if ((auth != NULL) && (stcb != NULL)) {
4017 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4018 }
4019
4020 if (net) {
4021 tos_value = net->dscp;
4022 } else if (stcb) {
4023 tos_value = stcb->asoc.default_dscp;
4024 } else {
4025 tos_value = inp->sctp_ep.default_dscp;
4026 }
4027
4028 switch (to->sa_family) {
4029#ifdef INET
4030 case AF_INET:
4031 {
4032 struct ip *ip = NULL;
4033 sctp_route_t iproute;
4034 int len;
4035
4037 if (port) {
4038 len += sizeof(struct udphdr);
4039 }
4040 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4041 if (newm == NULL) {
4042 sctp_m_freem(m);
4043 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4044 return (ENOMEM);
4045 }
4046 SCTP_ALIGN_TO_END(newm, len);
4047 SCTP_BUF_LEN(newm) = len;
4048 SCTP_BUF_NEXT(newm) = m;
4049 m = newm;
4050 if (net != NULL) {
4051 m->m_pkthdr.flowid = net->flowid;
4052 M_HASHTYPE_SET(m, net->flowtype);
4053 } else {
4054 m->m_pkthdr.flowid = mflowid;
4055 M_HASHTYPE_SET(m, mflowtype);
4056 }
4057 packet_length = sctp_calculate_len(m);
4058 ip = mtod(m, struct ip *);
4059 ip->ip_v = IPVERSION;
4060 ip->ip_hl = (sizeof(struct ip) >> 2);
4061 if (tos_value == 0) {
4062 /*
4063 * This means especially, that it is not set
4064 * at the SCTP layer. So use the value from
4065 * the IP layer.
4066 */
4067 tos_value = inp->ip_inp.inp.inp_ip_tos;
4068 }
4069 tos_value &= 0xfc;
4070 if (ecn_ok) {
4071 tos_value |= sctp_get_ect(stcb);
4072 }
4073 if ((nofragment_flag) && (port == 0)) {
4074 ip->ip_off = htons(IP_DF);
4075 } else {
4076 ip->ip_off = htons(0);
4077 }
4078 /* FreeBSD has a function for ip_id's */
4079 ip_fillid(ip);
4080
4081 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4082 ip->ip_len = htons(packet_length);
4083 ip->ip_tos = tos_value;
4084 if (port) {
4085 ip->ip_p = IPPROTO_UDP;
4086 } else {
4087 ip->ip_p = IPPROTO_SCTP;
4088 }
4089 ip->ip_sum = 0;
4090 if (net == NULL) {
4091 ro = &iproute;
4092 memset(&iproute, 0, sizeof(iproute));
4093 memcpy(&ro->ro_dst, to, to->sa_len);
4094 } else {
4095 ro = (sctp_route_t *)&net->ro;
4096 }
4097 /* Now the address selection part */
4098 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4099
4100 /* call the routine to select the src address */
4101 if (net && out_of_asoc_ok == 0) {
4103 sctp_free_ifa(net->ro._s_addr);
4104 net->ro._s_addr = NULL;
4105 net->src_addr_selected = 0;
4106 RO_NHFREE(ro);
4107 }
4108 if (net->src_addr_selected == 0) {
4109 /* Cache the source address */
4110 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
4111 ro, net, 0,
4112 vrf_id);
4113 net->src_addr_selected = 1;
4114 }
4115 if (net->ro._s_addr == NULL) {
4116 /* No route to host */
4117 net->src_addr_selected = 0;
4118 sctp_handle_no_route(stcb, net, so_locked);
4119 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4120 sctp_m_freem(m);
4121 return (EHOSTUNREACH);
4122 }
4123 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4124 } else {
4125 if (over_addr == NULL) {
4126 struct sctp_ifa *_lsrc;
4127
4128 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4129 net,
4130 out_of_asoc_ok,
4131 vrf_id);
4132 if (_lsrc == NULL) {
4133 sctp_handle_no_route(stcb, net, so_locked);
4134 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4135 sctp_m_freem(m);
4136 return (EHOSTUNREACH);
4137 }
4138 ip->ip_src = _lsrc->address.sin.sin_addr;
4139 sctp_free_ifa(_lsrc);
4140 } else {
4141 ip->ip_src = over_addr->sin.sin_addr;
4142 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4143 }
4144 }
4145 if (port) {
4146 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4147 sctp_handle_no_route(stcb, net, so_locked);
4148 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4149 sctp_m_freem(m);
4150 return (EHOSTUNREACH);
4151 }
4152 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4153 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4154 udp->uh_dport = port;
4155 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip)));
4156 if (V_udp_cksum) {
4157 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4158 } else {
4159 udp->uh_sum = 0;
4160 }
4161 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4162 } else {
4163 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4164 }
4165
4166 sctphdr->src_port = src_port;
4167 sctphdr->dest_port = dest_port;
4168 sctphdr->v_tag = v_tag;
4169 sctphdr->checksum = 0;
4170
4171 /*
4172 * If source address selection fails and we find no
4173 * route then the ip_output should fail as well with
4174 * a NO_ROUTE_TO_HOST type error. We probably should
4175 * catch that somewhere and abort the association
4176 * right away (assuming this is an INIT being sent).
4177 */
4178 if (ro->ro_nh == NULL) {
4179 /*
4180 * src addr selection failed to find a route
4181 * (or valid source addr), so we can't get
4182 * there from here (yet)!
4183 */
4184 sctp_handle_no_route(stcb, net, so_locked);
4185 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4186 sctp_m_freem(m);
4187 return (EHOSTUNREACH);
4188 }
4189 if (ro != &iproute) {
4190 memcpy(&iproute, ro, sizeof(*ro));
4191 }
4192 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4193 (uint32_t)(ntohl(ip->ip_src.s_addr)));
4194 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4195 (uint32_t)(ntohl(ip->ip_dst.s_addr)));
4196 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4197 (void *)ro->ro_nh);
4198
4199 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4200 /* failed to prepend data, give up */
4201 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4202 sctp_m_freem(m);
4203 return (ENOMEM);
4204 }
4205 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4206 if (port) {
4207 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4208 SCTP_STAT_INCR(sctps_sendswcrc);
4209 if (V_udp_cksum) {
4210 SCTP_ENABLE_UDP_CSUM(o_pak);
4211 }
4212 } else {
4213 m->m_pkthdr.csum_flags = CSUM_SCTP;
4214 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4215 SCTP_STAT_INCR(sctps_sendhwcrc);
4216 }
4217#ifdef SCTP_PACKET_LOGGING
4218 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4219 sctp_packet_log(o_pak);
4220#endif
4221 /* send it out. table id is taken from stcb */
4222 SCTP_PROBE5(send, NULL, stcb, ip, stcb, sctphdr);
4223 SCTP_IP_OUTPUT(ret, o_pak, ro, inp, vrf_id);
4224 if (port) {
4225 UDPSTAT_INC(udps_opackets);
4226 }
4227 SCTP_STAT_INCR(sctps_sendpackets);
4228 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4229 if (ret)
4230 SCTP_STAT_INCR(sctps_senderrors);
4231
4232 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4233 if (net == NULL) {
4234 /* free tempy routes */
4235 RO_NHFREE(ro);
4236 } else {
4237 if ((ro->ro_nh != NULL) && (net->ro._s_addr) &&
4238 ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4239 uint32_t mtu;
4240
4241 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh);
4242 if (mtu > 0) {
4243 if (net->port) {
4244 mtu -= sizeof(struct udphdr);
4245 }
4246 if (mtu < net->mtu) {
4247 net->mtu = mtu;
4248 if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4249 sctp_pathmtu_adjustment(stcb, mtu, true);
4250 }
4251 }
4252 }
4253 } else if (ro->ro_nh == NULL) {
4254 /* route was freed */
4255 if (net->ro._s_addr &&
4256 net->src_addr_selected) {
4257 sctp_free_ifa(net->ro._s_addr);
4258 net->ro._s_addr = NULL;
4259 }
4260 net->src_addr_selected = 0;
4261 }
4262 }
4263 return (ret);
4264 }
4265#endif
4266#ifdef INET6
4267 case AF_INET6:
4268 {
4269 uint32_t flowlabel, flowinfo;
4270 struct ip6_hdr *ip6h;
4271 struct route_in6 ip6route;
4272 struct ifnet *ifp;
4273 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4274 int prev_scope = 0;
4275 struct sockaddr_in6 lsa6_storage;
4276 int error;
4277 u_short prev_port = 0;
4278 int len;
4279
4280 if (net) {
4281 flowlabel = net->flowlabel;
4282 } else if (stcb) {
4283 flowlabel = stcb->asoc.default_flowlabel;
4284 } else {
4285 flowlabel = inp->sctp_ep.default_flowlabel;
4286 }
4287 if (flowlabel == 0) {
4288 /*
4289 * This means especially, that it is not set
4290 * at the SCTP layer. So use the value from
4291 * the IP layer.
4292 */
4293 flowlabel = ntohl(((struct inpcb *)inp)->inp_flow);
4294 }
4295 flowlabel &= 0x000fffff;
4296 len = SCTP_MIN_OVERHEAD;
4297 if (port) {
4298 len += sizeof(struct udphdr);
4299 }
4300 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4301 if (newm == NULL) {
4302 sctp_m_freem(m);
4303 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4304 return (ENOMEM);
4305 }
4306 SCTP_ALIGN_TO_END(newm, len);
4307 SCTP_BUF_LEN(newm) = len;
4308 SCTP_BUF_NEXT(newm) = m;
4309 m = newm;
4310 if (net != NULL) {
4311 m->m_pkthdr.flowid = net->flowid;
4312 M_HASHTYPE_SET(m, net->flowtype);
4313 } else {
4314 m->m_pkthdr.flowid = mflowid;
4315 M_HASHTYPE_SET(m, mflowtype);
4316 }
4317 packet_length = sctp_calculate_len(m);
4318
4319 ip6h = mtod(m, struct ip6_hdr *);
4320 /* protect *sin6 from overwrite */
4321 sin6 = (struct sockaddr_in6 *)to;
4322 tmp = *sin6;
4323 sin6 = &tmp;
4324
4325 /* KAME hack: embed scopeid */
4326 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4327 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4328 sctp_m_freem(m);
4329 return (EINVAL);
4330 }
4331 if (net == NULL) {
4332 memset(&ip6route, 0, sizeof(ip6route));
4333 ro = (sctp_route_t *)&ip6route;
4334 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4335 } else {
4336 ro = (sctp_route_t *)&net->ro;
4337 }
4338 /*
4339 * We assume here that inp_flow is in host byte
4340 * order within the TCB!
4341 */
4342 if (tos_value == 0) {
4343 /*
4344 * This means especially, that it is not set
4345 * at the SCTP layer. So use the value from
4346 * the IP layer.
4347 */
4348 tos_value = (ntohl(((struct inpcb *)inp)->inp_flow) >> 20) & 0xff;
4349 }
4350 tos_value &= 0xfc;
4351 if (ecn_ok) {
4352 tos_value |= sctp_get_ect(stcb);
4353 }
4354 flowinfo = 0x06;
4355 flowinfo <<= 8;
4356 flowinfo |= tos_value;
4357 flowinfo <<= 20;
4358 flowinfo |= flowlabel;
4359 ip6h->ip6_flow = htonl(flowinfo);
4360 if (port) {
4361 ip6h->ip6_nxt = IPPROTO_UDP;
4362 } else {
4363 ip6h->ip6_nxt = IPPROTO_SCTP;
4364 }
4365 ip6h->ip6_plen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4366 ip6h->ip6_dst = sin6->sin6_addr;
4367
4368 /*
4369 * Add SRC address selection here: we can only reuse
4370 * to a limited degree the kame src-addr-sel, since
4371 * we can try their selection but it may not be
4372 * bound.
4373 */
4374 memset(&lsa6_tmp, 0, sizeof(lsa6_tmp));
4375 lsa6_tmp.sin6_family = AF_INET6;
4376 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4377 lsa6 = &lsa6_tmp;
4378 if (net && out_of_asoc_ok == 0) {
4380 sctp_free_ifa(net->ro._s_addr);
4381 net->ro._s_addr = NULL;
4382 net->src_addr_selected = 0;
4383 RO_NHFREE(ro);
4384 }
4385 if (net->src_addr_selected == 0) {
4386 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4387 /* KAME hack: embed scopeid */
4388 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4389 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4390 sctp_m_freem(m);
4391 return (EINVAL);
4392 }
4393 /* Cache the source address */
4395 stcb,
4396 ro,
4397 net,
4398 0,
4399 vrf_id);
4400 (void)sa6_recoverscope(sin6);
4401 net->src_addr_selected = 1;
4402 }
4403 if (net->ro._s_addr == NULL) {
4404 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4405 net->src_addr_selected = 0;
4406 sctp_handle_no_route(stcb, net, so_locked);
4407 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4408 sctp_m_freem(m);
4409 return (EHOSTUNREACH);
4410 }
4411 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4412 } else {
4413 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4414 /* KAME hack: embed scopeid */
4415 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4416 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4417 sctp_m_freem(m);
4418 return (EINVAL);
4419 }
4420 if (over_addr == NULL) {
4421 struct sctp_ifa *_lsrc;
4422
4423 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4424 net,
4425 out_of_asoc_ok,
4426 vrf_id);
4427 if (_lsrc == NULL) {
4428 sctp_handle_no_route(stcb, net, so_locked);
4429 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4430 sctp_m_freem(m);
4431 return (EHOSTUNREACH);
4432 }
4433 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4434 sctp_free_ifa(_lsrc);
4435 } else {
4436 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4437 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4438 }
4439 (void)sa6_recoverscope(sin6);
4440 }
4441 lsa6->sin6_port = inp->sctp_lport;
4442
4443 if (ro->ro_nh == NULL) {
4444 /*
4445 * src addr selection failed to find a route
4446 * (or valid source addr), so we can't get
4447 * there from here!
4448 */
4449 sctp_handle_no_route(stcb, net, so_locked);
4450 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4451 sctp_m_freem(m);
4452 return (EHOSTUNREACH);
4453 }
4454 /*
4455 * XXX: sa6 may not have a valid sin6_scope_id in
4456 * the non-SCOPEDROUTING case.
4457 */
4458 memset(&lsa6_storage, 0, sizeof(lsa6_storage));
4459 lsa6_storage.sin6_family = AF_INET6;
4460 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4461 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4462 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4463 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4464 sctp_m_freem(m);
4465 return (error);
4466 }
4467 /* XXX */
4468 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4469 lsa6_storage.sin6_port = inp->sctp_lport;
4470 lsa6 = &lsa6_storage;
4471 ip6h->ip6_src = lsa6->sin6_addr;
4472
4473 if (port) {
4474 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4475 sctp_handle_no_route(stcb, net, so_locked);
4476 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4477 sctp_m_freem(m);
4478 return (EHOSTUNREACH);
4479 }
4480 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4481 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4482 udp->uh_dport = port;
4483 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4484 udp->uh_sum = 0;
4485 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4486 } else {
4487 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4488 }
4489
4490 sctphdr->src_port = src_port;
4491 sctphdr->dest_port = dest_port;
4492 sctphdr->v_tag = v_tag;
4493 sctphdr->checksum = 0;
4494
4495 /*
4496 * We set the hop limit now since there is a good
4497 * chance that our ro pointer is now filled
4498 */
4499 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4501
4502#ifdef SCTP_DEBUG
4503 /* Copy to be sure something bad is not happening */
4504 sin6->sin6_addr = ip6h->ip6_dst;
4505 lsa6->sin6_addr = ip6h->ip6_src;
4506#endif
4507
4508 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4509 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4510 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4511 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4512 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4513 if (net) {
4514 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4515 /*
4516 * preserve the port and scope for link
4517 * local send
4518 */
4519 prev_scope = sin6->sin6_scope_id;
4520 prev_port = sin6->sin6_port;
4521 }
4522
4523 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4524 /* failed to prepend data, give up */
4525 sctp_m_freem(m);
4526 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4527 return (ENOMEM);
4528 }
4529 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4530 if (port) {
4531 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4532 SCTP_STAT_INCR(sctps_sendswcrc);
4533 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4534 udp->uh_sum = 0xffff;
4535 }
4536 } else {
4537 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4538 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4539 SCTP_STAT_INCR(sctps_sendhwcrc);
4540 }
4541 /* send it out. table id is taken from stcb */
4542#ifdef SCTP_PACKET_LOGGING
4543 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4544 sctp_packet_log(o_pak);
4545#endif
4546 SCTP_PROBE5(send, NULL, stcb, ip6h, stcb, sctphdr);
4547 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, inp, vrf_id);
4548 if (net) {
4549 /* for link local this must be done */
4550 sin6->sin6_scope_id = prev_scope;
4551 sin6->sin6_port = prev_port;
4552 }
4553 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4554 if (port) {
4555 UDPSTAT_INC(udps_opackets);
4556 }
4557 SCTP_STAT_INCR(sctps_sendpackets);
4558 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4559 if (ret) {
4560 SCTP_STAT_INCR(sctps_senderrors);
4561 }
4562 if (net == NULL) {
4563 /* Now if we had a temp route free it */
4564 RO_NHFREE(ro);
4565 } else {
4566 /*
4567 * PMTU check versus smallest asoc MTU goes
4568 * here
4569 */
4570 if (ro->ro_nh == NULL) {
4571 /* Route was freed */
4572 if (net->ro._s_addr &&
4573 net->src_addr_selected) {
4574 sctp_free_ifa(net->ro._s_addr);
4575 net->ro._s_addr = NULL;
4576 }
4577 net->src_addr_selected = 0;
4578 }
4579 if ((ro->ro_nh != NULL) && (net->ro._s_addr) &&
4580 ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4581 uint32_t mtu;
4582
4583 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh);
4584 if (mtu > 0) {
4585 if (net->port) {
4586 mtu -= sizeof(struct udphdr);
4587 }
4588 if (mtu < net->mtu) {
4589 net->mtu = mtu;
4590 if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4591 sctp_pathmtu_adjustment(stcb, mtu, false);
4592 }
4593 }
4594 }
4595 } else if (ifp != NULL) {
4596 if ((ND_IFINFO(ifp)->linkmtu > 0) &&
4597 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4598 sctp_pathmtu_adjustment(stcb, ND_IFINFO(ifp)->linkmtu, false);
4599 }
4600 }
4601 }
4602 return (ret);
4603 }
4604#endif
4605 default:
4606 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4607 ((struct sockaddr *)to)->sa_family);
4608 sctp_m_freem(m);
4609 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4610 return (EFAULT);
4611 }
4612}
4613
4614void
4615sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked)
4616{
4617 struct mbuf *m, *m_last;
4618 struct sctp_nets *net;
4619 struct sctp_init_chunk *init;
4620 struct sctp_supported_addr_param *sup_addr;
4622 struct sctp_supported_chunk_types_param *pr_supported;
4623 struct sctp_paramhdr *ph;
4624 int cnt_inits_to = 0;
4625 int error;
4626 uint16_t num_ext, chunk_len, padding_len, parameter_len;
4627
4628 /* INIT's always go to the primary (and usually ONLY address) */
4629 net = stcb->asoc.primary_destination;
4630 if (net == NULL) {
4631 net = TAILQ_FIRST(&stcb->asoc.nets);
4632 if (net == NULL) {
4633 /* TSNH */
4634 return;
4635 }
4636 /* we confirm any address we send an INIT to */
4637 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4638 (void)sctp_set_primary_addr(stcb, NULL, net);
4639 } else {
4640 /* we confirm any address we send an INIT to */
4641 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4642 }
4643 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4644#ifdef INET6
4645 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
4646 /*
4647 * special hook, if we are sending to link local it will not
4648 * show up in our private address count.
4649 */
4650 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
4651 cnt_inits_to = 1;
4652 }
4653#endif
4655 /* This case should not happen */
4656 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4657 return;
4658 }
4659 /* start the INIT timer */
4660 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4661
4662 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
4663 if (m == NULL) {
4664 /* No memory, INIT timer will re-attempt. */
4665 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4666 return;
4667 }
4668 chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
4669 padding_len = 0;
4670 /* Now lets put the chunk header in place */
4671 init = mtod(m, struct sctp_init_chunk *);
4672 /* now the chunk header */
4674 init->ch.chunk_flags = 0;
4675 /* fill in later from mbuf we build */
4676 init->ch.chunk_length = 0;
4677 /* place in my tag */
4678 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4679 /* set up some of the credits. */
4680 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4682 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4683 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4684 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4685
4686 /* Adaptation layer indication parameter */
4688 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
4689 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
4690 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4691 ali->ph.param_length = htons(parameter_len);
4693 chunk_len += parameter_len;
4694 }
4695
4696 /* ECN parameter */
4697 if (stcb->asoc.ecn_supported == 1) {
4698 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4699 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4700 ph->param_type = htons(SCTP_ECN_CAPABLE);
4701 ph->param_length = htons(parameter_len);
4702 chunk_len += parameter_len;
4703 }
4704
4705 /* PR-SCTP supported parameter */
4706 if (stcb->asoc.prsctp_supported == 1) {
4707 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4708 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4709 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
4710 ph->param_length = htons(parameter_len);
4711 chunk_len += parameter_len;
4712 }
4713
4714 /* Add NAT friendly parameter. */
4715 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4716 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4717 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4718 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4719 ph->param_length = htons(parameter_len);
4720 chunk_len += parameter_len;
4721 }
4722
4723 /* And now tell the peer which extensions we support */
4724 num_ext = 0;
4725 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
4726 if (stcb->asoc.prsctp_supported == 1) {
4727 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4728 if (stcb->asoc.idata_supported) {
4729 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
4730 }
4731 }
4732 if (stcb->asoc.auth_supported == 1) {
4733 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4734 }
4735 if (stcb->asoc.asconf_supported == 1) {
4736 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4737 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4738 }
4739 if (stcb->asoc.reconfig_supported == 1) {
4740 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4741 }
4742 if (stcb->asoc.idata_supported) {
4743 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
4744 }
4745 if (stcb->asoc.nrsack_supported == 1) {
4746 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4747 }
4748 if (stcb->asoc.pktdrop_supported == 1) {
4749 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4750 }
4751 if (num_ext > 0) {
4752 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
4753 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4754 pr_supported->ph.param_length = htons(parameter_len);
4755 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4756 chunk_len += parameter_len;
4757 }
4758 /* add authentication parameters */
4759 if (stcb->asoc.auth_supported) {
4760 /* attach RANDOM parameter, if available */
4761 if (stcb->asoc.authinfo.random != NULL) {
4762 struct sctp_auth_random *randp;
4763
4764 if (padding_len > 0) {
4765 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4766 chunk_len += padding_len;
4767 padding_len = 0;
4768 }
4769 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
4770 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
4771 /* random key already contains the header */
4772 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
4773 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4774 chunk_len += parameter_len;
4775 }
4776 /* add HMAC_ALGO parameter */
4777 if (stcb->asoc.local_hmacs != NULL) {
4778 struct sctp_auth_hmac_algo *hmacs;
4779
4780 if (padding_len > 0) {
4781 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4782 chunk_len += padding_len;
4783 padding_len = 0;
4784 }
4785 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
4786 parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
4787 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
4788 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4789 hmacs->ph.param_length = htons(parameter_len);
4791 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4792 chunk_len += parameter_len;
4793 }
4794 /* add CHUNKS parameter */
4795 if (stcb->asoc.local_auth_chunks != NULL) {
4796 struct sctp_auth_chunk_list *chunks;
4797
4798 if (padding_len > 0) {
4799 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4800 chunk_len += padding_len;
4801 padding_len = 0;
4802 }
4803 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
4804 parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
4806 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4807 chunks->ph.param_length = htons(parameter_len);
4809 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4810 chunk_len += parameter_len;
4811 }
4812 }
4813
4814 /* now any cookie time extensions */
4815 if (stcb->asoc.cookie_preserve_req > 0) {
4816 struct sctp_cookie_perserve_param *cookie_preserve;
4817
4818 if (padding_len > 0) {
4819 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4820 chunk_len += padding_len;
4821 padding_len = 0;
4822 }
4823 parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
4824 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t)+chunk_len);
4825 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4826 cookie_preserve->ph.param_length = htons(parameter_len);
4827 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4828 stcb->asoc.cookie_preserve_req = 0;
4829 chunk_len += parameter_len;
4830 }
4831
4832 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
4833 uint8_t i;
4834
4835 if (padding_len > 0) {
4836 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4837 chunk_len += padding_len;
4838 padding_len = 0;
4839 }
4840 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4841 if (stcb->asoc.scope.ipv4_addr_legal) {
4842 parameter_len += (uint16_t)sizeof(uint16_t);
4843 }
4844 if (stcb->asoc.scope.ipv6_addr_legal) {
4845 parameter_len += (uint16_t)sizeof(uint16_t);
4846 }
4847 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t)+chunk_len);
4848 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4849 sup_addr->ph.param_length = htons(parameter_len);
4850 i = 0;
4851 if (stcb->asoc.scope.ipv4_addr_legal) {
4852 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
4853 }
4854 if (stcb->asoc.scope.ipv6_addr_legal) {
4855 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
4856 }
4857 padding_len = 4 - 2 * i;
4858 chunk_len += parameter_len;
4859 }
4860
4861 SCTP_BUF_LEN(m) = chunk_len;
4862 /* now the addresses */
4863 /*
4864 * To optimize this we could put the scoping stuff into a structure
4865 * and remove the individual uint8's from the assoc structure. Then
4866 * we could just sifa in the address within the stcb. But for now
4867 * this is a quick hack to get the address stuff teased apart.
4868 */
4869 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
4870 m, cnt_inits_to,
4871 &padding_len, &chunk_len);
4872
4873 init->ch.chunk_length = htons(chunk_len);
4874 if (padding_len > 0) {
4875 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
4876 sctp_m_freem(m);
4877 return;
4878 }
4879 }
4880 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4881 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
4882 (struct sockaddr *)&net->ro._l_addr,
4883 m, 0, NULL, 0, 0, 0, 0,
4884 inp->sctp_lport, stcb->rport, htonl(0),
4885 net->port, NULL,
4886 0, 0,
4887 so_locked))) {
4888 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
4889 if (error == ENOBUFS) {
4890 stcb->asoc.ifp_had_enobuf = 1;
4891 SCTP_STAT_INCR(sctps_lowlevelerr);
4892 }
4893 } else {
4894 stcb->asoc.ifp_had_enobuf = 0;
4895 }
4896 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4898}
4899
4900struct mbuf *
4902 int param_offset, int *abort_processing,
4903 struct sctp_chunkhdr *cp,
4904 int *nat_friendly,
4905 int *cookie_found)
4906{
4907 /*
4908 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4909 * being equal to the beginning of the params i.e. (iphlen +
4910 * sizeof(struct sctp_init_msg) parse through the parameters to the
4911 * end of the mbuf verifying that all parameters are known.
4912 *
4913 * For unknown parameters build and return a mbuf with
4914 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4915 * processing this chunk stop, and set *abort_processing to 1.
4916 *
4917 * By having param_offset be pre-set to where parameters begin it is
4918 * hoped that this routine may be reused in the future by new
4919 * features.
4920 */
4921 struct sctp_paramhdr *phdr, params;
4922
4923 struct mbuf *mat, *m_tmp, *op_err, *op_err_last;
4924 int at, limit, pad_needed;
4925 uint16_t ptype, plen, padded_size;
4926
4927 *abort_processing = 0;
4928 if (cookie_found != NULL) {
4929 *cookie_found = 0;
4930 }
4931 mat = in_initpkt;
4932 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4933 at = param_offset;
4934 op_err = NULL;
4935 op_err_last = NULL;
4936 pad_needed = 0;
4937 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4938 phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
4939 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4940 ptype = ntohs(phdr->param_type);
4941 plen = ntohs(phdr->param_length);
4942 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
4943 /* wacked parameter */
4944 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
4945 goto invalid_size;
4946 }
4947 limit -= SCTP_SIZE32(plen);
4948 /*-
4949 * All parameters for all chunks that we know/understand are
4950 * listed here. We process them other places and make
4951 * appropriate stop actions per the upper bits. However this
4952 * is the generic routine processor's can call to get back
4953 * an operr.. to either incorporate (init-ack) or send.
4954 */
4955 padded_size = SCTP_SIZE32(plen);
4956 switch (ptype) {
4957 /* Param's with variable size */
4959 case SCTP_UNRECOG_PARAM:
4961 /* ok skip fwd */
4962 at += padded_size;
4963 break;
4964 case SCTP_STATE_COOKIE:
4965 if (cookie_found != NULL) {
4966 *cookie_found = 1;
4967 }
4968 at += padded_size;
4969 break;
4970 /* Param's with variable size within a range */
4971 case SCTP_CHUNK_LIST:
4973 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
4974 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
4975 goto invalid_size;
4976 }
4977 at += padded_size;
4978 break;
4980 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
4981 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
4982 goto invalid_size;
4983 }
4984 at += padded_size;
4985 break;
4986 case SCTP_RANDOM:
4987 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
4988 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
4989 goto invalid_size;
4990 }
4991 at += padded_size;
4992 break;
4993 case SCTP_SET_PRIM_ADDR:
4996 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
4997 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
4998 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
4999 goto invalid_size;
5000 }
5001 at += padded_size;
5002 break;
5003 /* Param's with a fixed size */
5004 case SCTP_IPV4_ADDRESS:
5005 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5006 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5007 goto invalid_size;
5008 }
5009 at += padded_size;
5010 break;
5011 case SCTP_IPV6_ADDRESS:
5012 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5013 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5014 goto invalid_size;
5015 }
5016 at += padded_size;
5017 break;
5019 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5020 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5021 goto invalid_size;
5022 }
5023 at += padded_size;
5024 break;
5026 *nat_friendly = 1;
5027 /* fall through */
5029 if (padded_size != sizeof(struct sctp_paramhdr)) {
5030 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5031 goto invalid_size;
5032 }
5033 at += padded_size;
5034 break;
5035 case SCTP_ECN_CAPABLE:
5036 if (padded_size != sizeof(struct sctp_paramhdr)) {
5037 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5038 goto invalid_size;
5039 }
5040 at += padded_size;
5041 break;
5043 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5044 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5045 goto invalid_size;
5046 }
5047 at += padded_size;
5048 break;
5050 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5051 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5052 goto invalid_size;
5053 }
5054 at += padded_size;
5055 break;
5057 {
5058 /* Hostname parameters are deprecated. */
5059 struct sctp_gen_error_cause *cause;
5060 int l_len;
5061
5062 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5063 *abort_processing = 1;
5064 sctp_m_freem(op_err);
5065 op_err = NULL;
5066 op_err_last = NULL;
5067#ifdef INET6
5068 l_len = SCTP_MIN_OVERHEAD;
5069#else
5070 l_len = SCTP_MIN_V4_OVERHEAD;
5071#endif
5072 l_len += sizeof(struct sctp_chunkhdr);
5073 l_len += sizeof(struct sctp_gen_error_cause);
5074 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5075 if (op_err != NULL) {
5076 /*
5077 * Pre-reserve space for IP, SCTP,
5078 * and chunk header.
5079 */
5080#ifdef INET6
5081 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5082#else
5083 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5084#endif
5085 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5086 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5087 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
5088 cause = mtod(op_err, struct sctp_gen_error_cause *);
5089 cause->code = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5090 cause->length = htons((uint16_t)(sizeof(struct sctp_gen_error_cause) + plen));
5091 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5092 if (SCTP_BUF_NEXT(op_err) == NULL) {
5093 sctp_m_freem(op_err);
5094 op_err = NULL;
5095 op_err_last = NULL;
5096 }
5097 }
5098 return (op_err);
5099 }
5100 default:
5101 /*
5102 * we do not recognize the parameter figure out what
5103 * we do.
5104 */
5105 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5106 if ((ptype & 0x4000) == 0x4000) {
5107 /* Report bit is set?? */
5108 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5109 if (op_err == NULL) {
5110 int l_len;
5111
5112 /* Ok need to try to get an mbuf */
5113#ifdef INET6
5114 l_len = SCTP_MIN_OVERHEAD;
5115#else
5116 l_len = SCTP_MIN_V4_OVERHEAD;
5117#endif
5118 l_len += sizeof(struct sctp_chunkhdr);
5119 l_len += sizeof(struct sctp_paramhdr);
5120 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5121 if (op_err) {
5122 SCTP_BUF_LEN(op_err) = 0;
5123#ifdef INET6
5124 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5125#else
5126 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5127#endif
5128 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5129 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5130 op_err_last = op_err;
5131 }
5132 }
5133 if (op_err != NULL) {
5134 /* If we have space */
5135 struct sctp_paramhdr *param;
5136
5137 if (pad_needed > 0) {
5138 op_err_last = sctp_add_pad_tombuf(op_err_last, pad_needed);
5139 }
5140 if (op_err_last == NULL) {
5141 sctp_m_freem(op_err);
5142 op_err = NULL;
5143 op_err_last = NULL;
5144 goto more_processing;
5145 }
5146 if (M_TRAILINGSPACE(op_err_last) < (int)sizeof(struct sctp_paramhdr)) {
5147 m_tmp = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA);
5148 if (m_tmp == NULL) {
5149 sctp_m_freem(op_err);
5150 op_err = NULL;
5151 op_err_last = NULL;
5152 goto more_processing;
5153 }
5154 SCTP_BUF_LEN(m_tmp) = 0;
5155 SCTP_BUF_NEXT(m_tmp) = NULL;
5156 SCTP_BUF_NEXT(op_err_last) = m_tmp;
5157 op_err_last = m_tmp;
5158 }
5159 param = (struct sctp_paramhdr *)(mtod(op_err_last, caddr_t)+SCTP_BUF_LEN(op_err_last));
5160 param->param_type = htons(SCTP_UNRECOG_PARAM);
5161 param->param_length = htons((uint16_t)sizeof(struct sctp_paramhdr) + plen);
5162 SCTP_BUF_LEN(op_err_last) += sizeof(struct sctp_paramhdr);
5163 SCTP_BUF_NEXT(op_err_last) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5164 if (SCTP_BUF_NEXT(op_err_last) == NULL) {
5165 sctp_m_freem(op_err);
5166 op_err = NULL;
5167 op_err_last = NULL;
5168 goto more_processing;
5169 } else {
5170 while (SCTP_BUF_NEXT(op_err_last) != NULL) {
5171 op_err_last = SCTP_BUF_NEXT(op_err_last);
5172 }
5173 }
5174 if (plen % 4 != 0) {
5175 pad_needed = 4 - (plen % 4);
5176 } else {
5177 pad_needed = 0;
5178 }
5179 }
5180 }
5181 more_processing:
5182 if ((ptype & 0x8000) == 0x0000) {
5183 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5184 return (op_err);
5185 } else {
5186 /* skip this chunk and continue processing */
5187 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5188 at += SCTP_SIZE32(plen);
5189 }
5190 break;
5191 }
5192 phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
5193 }
5194 return (op_err);
5195invalid_size:
5196 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5197 *abort_processing = 1;
5198 sctp_m_freem(op_err);
5199 op_err = NULL;
5200 op_err_last = NULL;
5201 if (phdr != NULL) {
5202 struct sctp_paramhdr *param;
5203 int l_len;
5204#ifdef INET6
5205 l_len = SCTP_MIN_OVERHEAD;
5206#else
5207 l_len = SCTP_MIN_V4_OVERHEAD;
5208#endif
5209 l_len += sizeof(struct sctp_chunkhdr);
5210 l_len += (2 * sizeof(struct sctp_paramhdr));
5211 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5212 if (op_err) {
5213 SCTP_BUF_LEN(op_err) = 0;
5214#ifdef INET6
5215 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5216#else
5217 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5218#endif
5219 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5220 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5221 SCTP_BUF_LEN(op_err) = 2 * sizeof(struct sctp_paramhdr);
5222 param = mtod(op_err, struct sctp_paramhdr *);
5224 param->param_length = htons(2 * sizeof(struct sctp_paramhdr));
5225 param++;
5226 param->param_type = htons(ptype);
5227 param->param_length = htons(plen);
5228 }
5229 }
5230 return (op_err);
5231}
5232
5233/*
5234 * Given a INIT chunk, look through the parameters to verify that there
5235 * are no new addresses.
5236 * Return true, if there is a new address or there is a problem parsing
5237 the parameters. Provide an optional error cause used when sending an ABORT.
5238 * Return false, if there are no new addresses and there is no problem in
5239 parameter processing.
5240 */
5241static bool
5243 struct mbuf *in_initpkt, int offset, int limit, struct sockaddr *src,
5244 struct mbuf **op_err)
5245{
5246 struct sockaddr *sa_touse;
5247 struct sockaddr *sa;
5248 struct sctp_paramhdr *phdr, params;
5249 struct sctp_nets *net;
5250#ifdef INET
5251 struct sockaddr_in sin4, *sa4;
5252#endif
5253#ifdef INET6
5254 struct sockaddr_in6 sin6, *sa6;
5255#endif
5256 uint16_t ptype, plen;
5257 bool fnd, check_src;
5258
5259 *op_err = NULL;
5260#ifdef INET
5261 memset(&sin4, 0, sizeof(sin4));
5262 sin4.sin_family = AF_INET;
5263 sin4.sin_len = sizeof(sin4);
5264#endif
5265#ifdef INET6
5266 memset(&sin6, 0, sizeof(sin6));
5267 sin6.sin6_family = AF_INET6;
5268 sin6.sin6_len = sizeof(sin6);
5269#endif
5270 /* First what about the src address of the pkt ? */
5271 check_src = false;
5272 switch (src->sa_family) {
5273#ifdef INET
5274 case AF_INET:
5275 if (asoc->scope.ipv4_addr_legal) {
5276 check_src = true;
5277 }
5278 break;
5279#endif
5280#ifdef INET6
5281 case AF_INET6:
5282 if (asoc->scope.ipv6_addr_legal) {
5283 check_src = true;
5284 }
5285 break;
5286#endif
5287 default:
5288 /* TSNH */
5289 break;
5290 }
5291 if (check_src) {
5292 fnd = false;
5293 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5294 sa = (struct sockaddr *)&net->ro._l_addr;
5295 if (sa->sa_family == src->sa_family) {
5296#ifdef INET
5297 if (sa->sa_family == AF_INET) {
5298 struct sockaddr_in *src4;
5299
5300 sa4 = (struct sockaddr_in *)sa;
5301 src4 = (struct sockaddr_in *)src;
5302 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5303 fnd = true;
5304 break;
5305 }
5306 }
5307#endif
5308#ifdef INET6
5309 if (sa->sa_family == AF_INET6) {
5310 struct sockaddr_in6 *src6;
5311
5312 sa6 = (struct sockaddr_in6 *)sa;
5313 src6 = (struct sockaddr_in6 *)src;
5314 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5315 fnd = true;
5316 break;
5317 }
5318 }
5319#endif
5320 }
5321 }
5322 if (!fnd) {
5323 /*
5324 * If sending an ABORT in case of an additional
5325 * address, don't use the new address error cause.
5326 * This looks no different than if no listener was
5327 * present.
5328 */
5329 *op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Address added");
5330 return (true);
5331 }
5332 }
5333 /* Ok so far lets munge through the rest of the packet */
5334 offset += sizeof(struct sctp_init_chunk);
5335 phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
5336 while (phdr) {
5337 sa_touse = NULL;
5338 ptype = ntohs(phdr->param_type);
5339 plen = ntohs(phdr->param_length);
5340 if (offset + plen > limit) {
5341 *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Partial parameter");
5342 return (true);
5343 }
5344 if (plen < sizeof(struct sctp_paramhdr)) {
5345 *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Parameter length too small");
5346 return (true);
5347 }
5348 switch (ptype) {
5349#ifdef INET
5350 case SCTP_IPV4_ADDRESS:
5351 {
5352 struct sctp_ipv4addr_param *p4, p4_buf;
5353
5354 if (plen != sizeof(struct sctp_ipv4addr_param)) {
5355 *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Parameter length illegal");
5356 return (true);
5357 }
5358 phdr = sctp_get_next_param(in_initpkt, offset,
5359 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5360 if (phdr == NULL) {
5362 return (true);
5363 }
5364 if (asoc->scope.ipv4_addr_legal) {
5365 p4 = (struct sctp_ipv4addr_param *)phdr;
5366 sin4.sin_addr.s_addr = p4->addr;
5367 sa_touse = (struct sockaddr *)&sin4;
5368 }
5369 break;
5370 }
5371#endif
5372#ifdef INET6
5373 case SCTP_IPV6_ADDRESS:
5374 {
5375 struct sctp_ipv6addr_param *p6, p6_buf;
5376
5377 if (plen != sizeof(struct sctp_ipv6addr_param)) {
5378 *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Parameter length illegal");
5379 return (true);
5380 }
5381 phdr = sctp_get_next_param(in_initpkt, offset,
5382 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5383 if (phdr == NULL) {
5385 return (true);
5386 }
5387 if (asoc->scope.ipv6_addr_legal) {
5388 p6 = (struct sctp_ipv6addr_param *)phdr;
5389 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5390 sizeof(p6->addr));
5391 sa_touse = (struct sockaddr *)&sin6;
5392 }
5393 break;
5394 }
5395#endif
5396 default:
5397 sa_touse = NULL;
5398 break;
5399 }
5400 if (sa_touse) {
5401 /* ok, sa_touse points to one to check */
5402 fnd = false;
5403 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5404 sa = (struct sockaddr *)&net->ro._l_addr;
5405 if (sa->sa_family != sa_touse->sa_family) {
5406 continue;
5407 }
5408#ifdef INET
5409 if (sa->sa_family == AF_INET) {
5410 sa4 = (struct sockaddr_in *)sa;
5411 if (sa4->sin_addr.s_addr ==
5412 sin4.sin_addr.s_addr) {
5413 fnd = true;
5414 break;
5415 }
5416 }
5417#endif
5418#ifdef INET6
5419 if (sa->sa_family == AF_INET6) {
5420 sa6 = (struct sockaddr_in6 *)sa;
5421 if (SCTP6_ARE_ADDR_EQUAL(
5422 sa6, &sin6)) {
5423 fnd = true;
5424 break;
5425 }
5426 }
5427#endif
5428 }
5429 if (!fnd) {
5430 /*
5431 * If sending an ABORT in case of an
5432 * additional address, don't use the new
5433 * address error cause. This looks no
5434 * different than if no listener was
5435 * present.
5436 */
5437 *op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Address added");
5438 return (true);
5439 }
5440 }
5441 offset += SCTP_SIZE32(plen);
5442 if (offset >= limit) {
5443 break;
5444 }
5445 phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
5446 }
5447 return (false);
5448}
5449
5450/*
5451 * Given a MBUF chain that was sent into us containing an INIT. Build a
5452 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5453 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5454 * message (i.e. the struct sctp_init_msg).
5455 */
5456void
5458 struct sctp_nets *src_net, struct mbuf *init_pkt,
5459 int iphlen, int offset,
5460 struct sockaddr *src, struct sockaddr *dst,
5461 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5462 uint8_t mflowtype, uint32_t mflowid,
5463 uint32_t vrf_id, uint16_t port)
5464{
5465 struct sctp_association *asoc;
5466 struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
5467 struct sctp_init_ack_chunk *initack;
5469 struct sctp_supported_chunk_types_param *pr_supported;
5470 struct sctp_paramhdr *ph;
5471 union sctp_sockstore *over_addr;
5472 struct sctp_scoping scp;
5473 struct timeval now;
5474#ifdef INET
5475 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5476 struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5477 struct sockaddr_in *sin;
5478#endif
5479#ifdef INET6
5480 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5481 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5482 struct sockaddr_in6 *sin6;
5483#endif
5484 struct sockaddr *to;
5485 struct sctp_state_cookie stc;
5486 struct sctp_nets *net = NULL;
5487 uint8_t *signature = NULL;
5488 int cnt_inits_to = 0;
5489 uint16_t his_limit, i_want;
5490 int abort_flag;
5491 int nat_friendly = 0;
5492 int error;
5493 struct socket *so;
5494 uint16_t num_ext, chunk_len, padding_len, parameter_len;
5495
5496 if (stcb) {
5497 asoc = &stcb->asoc;
5498 } else {
5499 asoc = NULL;
5500 }
5501 if ((asoc != NULL) &&
5503 if (sctp_are_there_new_addresses(asoc, init_pkt, offset, offset + ntohs(init_chk->ch.chunk_length), src, &op_err)) {
5504 /*
5505 * new addresses, out of here in non-cookie-wait
5506 * states
5507 */
5508 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5509 mflowtype, mflowid, inp->fibnum,
5510 vrf_id, port);
5511 return;
5512 }
5513 if (src_net != NULL && (src_net->port != port)) {
5514 /*
5515 * change of remote encapsulation port, out of here
5516 * in non-cookie-wait states
5517 *
5518 * Send an ABORT, without an specific error cause.
5519 * This looks no different than if no listener was
5520 * present.
5521 */
5522 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5523 "Remote encapsulation port changed");
5524 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5525 mflowtype, mflowid, inp->fibnum,
5526 vrf_id, port);
5527 return;
5528 }
5529 }
5530 abort_flag = 0;
5531 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5532 (offset + sizeof(struct sctp_init_chunk)),
5533 &abort_flag,
5534 (struct sctp_chunkhdr *)init_chk,
5535 &nat_friendly, NULL);
5536 if (abort_flag) {
5537do_a_abort:
5538 if (op_err == NULL) {
5539 char msg[SCTP_DIAG_INFO_LEN];
5540
5541 SCTP_SNPRINTF(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__);
5542 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5543 msg);
5544 }
5545 sctp_send_abort(init_pkt, iphlen, src, dst, sh,
5546 init_chk->init.initiate_tag, op_err,
5547 mflowtype, mflowid, inp->fibnum,
5548 vrf_id, port);
5549 return;
5550 }
5551 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
5552 if (m == NULL) {
5553 /* No memory, INIT timer will re-attempt. */
5554 sctp_m_freem(op_err);
5555 return;
5556 }
5557 chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk);
5558 padding_len = 0;
5559
5560 /*
5561 * We might not overwrite the identification[] completely and on
5562 * some platforms time_entered will contain some padding. Therefore
5563 * zero out the cookie to avoid putting uninitialized memory on the
5564 * wire.
5565 */
5566 memset(&stc, 0, sizeof(struct sctp_state_cookie));
5567
5568 /* the time I built cookie */
5569 (void)SCTP_GETTIME_TIMEVAL(&now);
5570 stc.time_entered.tv_sec = now.tv_sec;
5571 stc.time_entered.tv_usec = now.tv_usec;
5572
5573 /* populate any tie tags */
5574 if (asoc != NULL) {
5575 /* unlock before tag selections */
5576 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5578 stc.cookie_life = asoc->cookie_life;
5579 net = asoc->primary_destination;
5580 } else {
5581 stc.tie_tag_my_vtag = 0;
5582 stc.tie_tag_peer_vtag = 0;
5583 /* life I will award this cookie */
5585 }
5586
5587 /* copy in the ports for later check */
5588 stc.myport = sh->dest_port;
5589 stc.peerport = sh->src_port;
5590
5591 /*
5592 * If we wanted to honor cookie life extensions, we would add to
5593 * stc.cookie_life. For now we should NOT honor any extension
5594 */
5595 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5597 stc.ipv6_addr_legal = 1;
5598 if (SCTP_IPV6_V6ONLY(inp)) {
5599 stc.ipv4_addr_legal = 0;
5600 } else {
5601 stc.ipv4_addr_legal = 1;
5602 }
5603 } else {
5604 stc.ipv6_addr_legal = 0;
5605 stc.ipv4_addr_legal = 1;
5606 }
5607 stc.ipv4_scope = 0;
5608 if (net == NULL) {
5609 to = src;
5610 switch (dst->sa_family) {
5611#ifdef INET
5612 case AF_INET:
5613 {
5614 /* lookup address */
5615 stc.address[0] = src4->sin_addr.s_addr;
5616 stc.address[1] = 0;
5617 stc.address[2] = 0;
5618 stc.address[3] = 0;
5620 /* local from address */
5621 stc.laddress[0] = dst4->sin_addr.s_addr;
5622 stc.laddress[1] = 0;
5623 stc.laddress[2] = 0;
5624 stc.laddress[3] = 0;
5626 /* scope_id is only for v6 */
5627 stc.scope_id = 0;
5628 if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) ||
5629 (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))) {
5630 stc.ipv4_scope = 1;
5631 }
5632 /* Must use the address in this case */
5633 if (sctp_is_address_on_local_host(src, vrf_id)) {
5634 stc.loopback_scope = 1;
5635 stc.ipv4_scope = 1;
5636 stc.site_scope = 1;
5637 stc.local_scope = 0;
5638 }
5639 break;
5640 }
5641#endif
5642#ifdef INET6
5643 case AF_INET6:
5644 {
5646 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
5647 stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr));
5648 if (sctp_is_address_on_local_host(src, vrf_id)) {
5649 stc.loopback_scope = 1;
5650 stc.local_scope = 0;
5651 stc.site_scope = 1;
5652 stc.ipv4_scope = 1;
5653 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) ||
5654 IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) {
5655 /*
5656 * If the new destination or source
5657 * is a LINK_LOCAL we must have
5658 * common both site and local scope.
5659 * Don't set local scope though
5660 * since we must depend on the
5661 * source to be added implicitly. We
5662 * cannot assure just because we
5663 * share one link that all links are
5664 * common.
5665 */
5666 stc.local_scope = 0;
5667 stc.site_scope = 1;
5668 stc.ipv4_scope = 1;
5669 /*
5670 * we start counting for the private
5671 * address stuff at 1. since the
5672 * link local we source from won't
5673 * show up in our scoped count.
5674 */
5675 cnt_inits_to = 1;
5676 /*
5677 * pull out the scope_id from
5678 * incoming pkt
5679 */
5680 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
5681 IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
5682 /*
5683 * If the new destination or source
5684 * is SITE_LOCAL then we must have
5685 * site scope in common.
5686 */
5687 stc.site_scope = 1;
5688 }
5689 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
5691 break;
5692 }
5693#endif
5694 default:
5695 /* TSNH */
5696 goto do_a_abort;
5697 break;
5698 }
5699 } else {
5700 /* set the scope per the existing tcb */
5701
5702#ifdef INET6
5703 struct sctp_nets *lnet;
5704#endif
5705
5707 stc.ipv4_scope = asoc->scope.ipv4_local_scope;
5708 stc.site_scope = asoc->scope.site_scope;
5709 stc.local_scope = asoc->scope.local_scope;
5710#ifdef INET6
5711 /* Why do we not consider IPv4 LL addresses? */
5712 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5713 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5714 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5715 /*
5716 * if we have a LL address, start
5717 * counting at 1.
5718 */
5719 cnt_inits_to = 1;
5720 }
5721 }
5722 }
5723#endif
5724 /* use the net pointer */
5725 to = (struct sockaddr *)&net->ro._l_addr;
5726 switch (to->sa_family) {
5727#ifdef INET
5728 case AF_INET:
5729 sin = (struct sockaddr_in *)to;
5730 stc.address[0] = sin->sin_addr.s_addr;
5731 stc.address[1] = 0;
5732 stc.address[2] = 0;
5733 stc.address[3] = 0;
5735 if (net->src_addr_selected == 0) {
5736 /*
5737 * strange case here, the INIT should have
5738 * did the selection.
5739 */
5741 stcb, (sctp_route_t *)&net->ro,
5742 net, 0, vrf_id);
5743 if (net->ro._s_addr == NULL) {
5744 sctp_m_freem(op_err);
5745 sctp_m_freem(m);
5746 return;
5747 }
5748
5749 net->src_addr_selected = 1;
5750 }
5751 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5752 stc.laddress[1] = 0;
5753 stc.laddress[2] = 0;
5754 stc.laddress[3] = 0;
5756 /* scope_id is only for v6 */
5757 stc.scope_id = 0;
5758 break;
5759#endif
5760#ifdef INET6
5761 case AF_INET6:
5762 sin6 = (struct sockaddr_in6 *)to;
5763 memcpy(&stc.address, &sin6->sin6_addr,
5764 sizeof(struct in6_addr));
5766 stc.scope_id = sin6->sin6_scope_id;
5767 if (net->src_addr_selected == 0) {
5768 /*
5769 * strange case here, the INIT should have
5770 * done the selection.
5771 */
5773 stcb, (sctp_route_t *)&net->ro,
5774 net, 0, vrf_id);
5775 if (net->ro._s_addr == NULL) {
5776 sctp_m_freem(op_err);
5777 sctp_m_freem(m);
5778 return;
5779 }
5780
5781 net->src_addr_selected = 1;
5782 }
5783 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5784 sizeof(struct in6_addr));
5786 break;
5787#endif
5788 }
5789 }
5790 /* Now lets put the SCTP header in place */
5791 initack = mtod(m, struct sctp_init_ack_chunk *);
5792 /* Save it off for quick ref */
5793 stc.peers_vtag = ntohl(init_chk->init.initiate_tag);
5794 /* who are we */
5796 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5797 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
5798 /* now the chunk header */
5799 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5800 initack->ch.chunk_flags = 0;
5801 /* fill in later from mbuf we build */
5802 initack->ch.chunk_length = 0;
5803 /* place in my tag */
5804 if ((asoc != NULL) &&
5806 (SCTP_GET_STATE(stcb) == SCTP_STATE_INUSE) ||
5808 /* re-use the v-tags and init-seq here */
5809 initack->init.initiate_tag = htonl(asoc->my_vtag);
5810 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5811 } else {
5812 uint32_t vtag, itsn;
5813
5814 if (asoc) {
5815 atomic_add_int(&asoc->refcnt, 1);
5816 SCTP_TCB_UNLOCK(stcb);
5817 new_tag:
5819 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5821 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5822 /*
5823 * Got a duplicate vtag on some guy behind a
5824 * nat make sure we don't use it.
5825 */
5826 goto new_tag;
5827 }
5828 initack->init.initiate_tag = htonl(vtag);
5829 /* get a TSN to use too */
5830 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5831 initack->init.initial_tsn = htonl(itsn);
5832 SCTP_TCB_LOCK(stcb);
5833 atomic_subtract_int(&asoc->refcnt, 1);
5834 } else {
5835 SCTP_INP_INCR_REF(inp);
5836 SCTP_INP_RUNLOCK(inp);
5838 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5840 initack->init.initiate_tag = htonl(vtag);
5841 /* get a TSN to use too */
5842 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5843 SCTP_INP_RLOCK(inp);
5844 SCTP_INP_DECR_REF(inp);
5845 }
5846 }
5847 /* save away my tag to */
5848 stc.my_vtag = initack->init.initiate_tag;
5849
5850 /* set up some of the credits. */
5851 so = inp->sctp_socket;
5852 if (so == NULL) {
5853 /* memory problem */
5854 sctp_m_freem(op_err);
5855 sctp_m_freem(m);
5856 return;
5857 } else {
5858 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5859 }
5860 /* set what I want */
5861 his_limit = ntohs(init_chk->init.num_inbound_streams);
5862 /* choose what I want */
5863 if (asoc != NULL) {
5864 if (asoc->streamoutcnt > asoc->pre_open_streams) {
5865 i_want = asoc->streamoutcnt;
5866 } else {
5867 i_want = asoc->pre_open_streams;
5868 }
5869 } else {
5870 i_want = inp->sctp_ep.pre_open_stream_count;
5871 }
5872 if (his_limit < i_want) {
5873 /* I Want more :< */
5874 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5875 } else {
5876 /* I can have what I want :> */
5877 initack->init.num_outbound_streams = htons(i_want);
5878 }
5879 /* tell him his limit. */
5880 initack->init.num_inbound_streams =
5881 htons(inp->sctp_ep.max_open_streams_intome);
5882
5883 /* adaptation layer indication parameter */
5885 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
5886 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
5887 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5888 ali->ph.param_length = htons(parameter_len);
5890 chunk_len += parameter_len;
5891 }
5892
5893 /* ECN parameter */
5894 if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
5895 ((asoc == NULL) && (inp->ecn_supported == 1))) {
5896 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5897 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5898 ph->param_type = htons(SCTP_ECN_CAPABLE);
5899 ph->param_length = htons(parameter_len);
5900 chunk_len += parameter_len;
5901 }
5902
5903 /* PR-SCTP supported parameter */
5904 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
5905 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
5906 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5907 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5908 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5909 ph->param_length = htons(parameter_len);
5910 chunk_len += parameter_len;
5911 }
5912
5913 /* Add NAT friendly parameter */
5914 if (nat_friendly) {
5915 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5916 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5917 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5918 ph->param_length = htons(parameter_len);
5919 chunk_len += parameter_len;
5920 }
5921
5922 /* And now tell the peer which extensions we support */
5923 num_ext = 0;
5924 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
5925 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
5926 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
5927 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5928 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
5929 ((asoc == NULL) && (inp->idata_supported == 1))) {
5930 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
5931 }
5932 }
5933 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
5934 ((asoc == NULL) && (inp->auth_supported == 1))) {
5935 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5936 }
5937 if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
5938 ((asoc == NULL) && (inp->asconf_supported == 1))) {
5939 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5940 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5941 }
5942 if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
5943 ((asoc == NULL) && (inp->reconfig_supported == 1))) {
5944 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5945 }
5946 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
5947 ((asoc == NULL) && (inp->idata_supported == 1))) {
5948 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
5949 }
5950 if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
5951 ((asoc == NULL) && (inp->nrsack_supported == 1))) {
5952 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5953 }
5954 if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
5955 ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
5956 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5957 }
5958 if (num_ext > 0) {
5959 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
5960 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5961 pr_supported->ph.param_length = htons(parameter_len);
5962 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5963 chunk_len += parameter_len;
5964 }
5965
5966 /* add authentication parameters */
5967 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
5968 ((asoc == NULL) && (inp->auth_supported == 1))) {
5969 struct sctp_auth_random *randp;
5970 struct sctp_auth_hmac_algo *hmacs;
5971 struct sctp_auth_chunk_list *chunks;
5972
5973 if (padding_len > 0) {
5974 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
5975 chunk_len += padding_len;
5976 padding_len = 0;
5977 }
5978 /* generate and add RANDOM parameter */
5979 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
5980 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) +
5982 randp->ph.param_type = htons(SCTP_RANDOM);
5983 randp->ph.param_length = htons(parameter_len);
5985 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5986 chunk_len += parameter_len;
5987
5988 if (padding_len > 0) {
5989 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
5990 chunk_len += padding_len;
5991 padding_len = 0;
5992 }
5993 /* add HMAC_ALGO parameter */
5994 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
5995 parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) +
5997 (uint8_t *)hmacs->hmac_ids);
5998 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5999 hmacs->ph.param_length = htons(parameter_len);
6000 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6001 chunk_len += parameter_len;
6002
6003 if (padding_len > 0) {
6004 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6005 chunk_len += padding_len;
6006 padding_len = 0;
6007 }
6008 /* add CHUNKS parameter */
6009 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
6010 parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) +
6012 chunks->chunk_types);
6013 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6014 chunks->ph.param_length = htons(parameter_len);
6015 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6016 chunk_len += parameter_len;
6017 }
6018 SCTP_BUF_LEN(m) = chunk_len;
6019 m_last = m;
6020 /* now the addresses */
6021 /*
6022 * To optimize this we could put the scoping stuff into a structure
6023 * and remove the individual uint8's from the stc structure. Then we
6024 * could just sifa in the address within the stc.. but for now this
6025 * is a quick hack to get the address stuff teased apart.
6026 */
6030 scp.ipv4_local_scope = stc.ipv4_scope;
6031 scp.local_scope = stc.local_scope;
6032 scp.site_scope = stc.site_scope;
6033 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
6034 cnt_inits_to,
6035 &padding_len, &chunk_len);
6036 /* padding_len can only be positive, if no addresses have been added */
6037 if (padding_len > 0) {
6038 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6039 chunk_len += padding_len;
6040 SCTP_BUF_LEN(m) += padding_len;
6041 padding_len = 0;
6042 }
6043
6044 /* tack on the operational error if present */
6045 if (op_err) {
6046 parameter_len = 0;
6047 for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6048 parameter_len += SCTP_BUF_LEN(m_tmp);
6049 }
6050 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6051 SCTP_BUF_NEXT(m_last) = op_err;
6052 while (SCTP_BUF_NEXT(m_last) != NULL) {
6053 m_last = SCTP_BUF_NEXT(m_last);
6054 }
6055 chunk_len += parameter_len;
6056 }
6057 if (padding_len > 0) {
6058 m_last = sctp_add_pad_tombuf(m_last, padding_len);
6059 if (m_last == NULL) {
6060 /* Houston we have a problem, no space */
6061 sctp_m_freem(m);
6062 return;
6063 }
6064 chunk_len += padding_len;
6065 padding_len = 0;
6066 }
6067 /* Now we must build a cookie */
6068 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6069 if (m_cookie == NULL) {
6070 /* memory problem */
6071 sctp_m_freem(m);
6072 return;
6073 }
6074 /* Now append the cookie to the end and update the space/size */
6075 SCTP_BUF_NEXT(m_last) = m_cookie;
6076 parameter_len = 0;
6077 for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6078 parameter_len += SCTP_BUF_LEN(m_tmp);
6079 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6080 m_last = m_tmp;
6081 }
6082 }
6083 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6084 chunk_len += parameter_len;
6085
6086 /*
6087 * Place in the size, but we don't include the last pad (if any) in
6088 * the INIT-ACK.
6089 */
6090 initack->ch.chunk_length = htons(chunk_len);
6091
6092 /*
6093 * Time to sign the cookie, we don't sign over the cookie signature
6094 * though thus we set trailer.
6095 */
6096 (void)sctp_hmac_m(SCTP_HMAC,
6098 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6099 (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
6100 /*
6101 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6102 * here since the timer will drive a retranmission.
6103 */
6104 if (padding_len > 0) {
6105 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
6106 sctp_m_freem(m);
6107 return;
6108 }
6109 }
6110 if (stc.loopback_scope) {
6111 over_addr = (union sctp_sockstore *)dst;
6112 } else {
6113 over_addr = NULL;
6114 }
6115
6116 if ((error = sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6117 0, 0,
6118 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6119 port, over_addr,
6120 mflowtype, mflowid,
6122 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
6123 if (error == ENOBUFS) {
6124 if (asoc != NULL) {
6125 asoc->ifp_had_enobuf = 1;
6126 }
6127 SCTP_STAT_INCR(sctps_lowlevelerr);
6128 }
6129 } else {
6130 if (asoc != NULL) {
6131 asoc->ifp_had_enobuf = 0;
6132 }
6133 }
6134 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6135}
6136
6137static void
6139 struct sctp_association *asoc,
6140 struct sctp_sndrcvinfo *srcv,
6141 int dataout)
6142{
6143 int freed_spc = 0;
6144 struct sctp_tmit_chunk *chk, *nchk;
6145
6147 if ((asoc->prsctp_supported) &&
6149 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6150 /*
6151 * Look for chunks marked with the PR_SCTP flag AND
6152 * the buffer space flag. If the one being sent is
6153 * equal or greater priority then purge the old one
6154 * and free some space.
6155 */
6156 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6157 /*
6158 * This one is PR-SCTP AND buffer space
6159 * limited type
6160 */
6161 if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) {
6162 /*
6163 * Lower numbers equates to higher
6164 * priority. So if the one we are
6165 * looking at has a larger priority,
6166 * we want to drop the data and NOT
6167 * retransmit it.
6168 */
6169 if (chk->data) {
6170 /*
6171 * We release the book_size
6172 * if the mbuf is here
6173 */
6174 int ret_spc;
6175 uint8_t sent;
6176
6177 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6178 sent = 1;
6179 else
6180 sent = 0;
6181 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6182 sent,
6184 freed_spc += ret_spc;
6185 if (freed_spc >= dataout) {
6186 return;
6187 }
6188 } /* if chunk was present */
6189 } /* if of sufficient priority */
6190 } /* if chunk has enabled */
6191 } /* tailqforeach */
6192
6193 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6194 /* Here we must move to the sent queue and mark */
6195 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6196 if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) {
6197 if (chk->data) {
6198 /*
6199 * We release the book_size
6200 * if the mbuf is here
6201 */
6202 int ret_spc;
6203
6204 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6205 0, SCTP_SO_LOCKED);
6206
6207 freed_spc += ret_spc;
6208 if (freed_spc >= dataout) {
6209 return;
6210 }
6211 } /* end if chk->data */
6212 } /* end if right class */
6213 } /* end if chk pr-sctp */
6214 } /* tailqforeachsafe (chk) */
6215 } /* if enabled in asoc */
6216}
6217
6220{
6221 struct sctp_association *asoc;
6222 uint32_t frag_point, overhead;
6223
6224 asoc = &stcb->asoc;
6225 /* Consider IP header and SCTP common header. */
6227 overhead = SCTP_MIN_OVERHEAD;
6228 } else {
6229 overhead = SCTP_MIN_V4_OVERHEAD;
6230 }
6231 /* Consider DATA/IDATA chunk header and AUTH header, if needed. */
6232 if (asoc->idata_supported) {
6233 overhead += sizeof(struct sctp_idata_chunk);
6235 overhead += sctp_get_auth_chunk_len(asoc->peer_hmac_id);
6236 }
6237 } else {
6238 overhead += sizeof(struct sctp_data_chunk);
6240 overhead += sctp_get_auth_chunk_len(asoc->peer_hmac_id);
6241 }
6242 }
6243 KASSERT(overhead % 4 == 0,
6244 ("overhead (%u) not a multiple of 4", overhead));
6245 /* Consider padding. */
6246 if (asoc->smallest_mtu % 4 > 0) {
6247 overhead += (asoc->smallest_mtu % 4);
6248 }
6249 KASSERT(asoc->smallest_mtu > overhead,
6250 ("Association MTU (%u) too small for overhead (%u)",
6251 asoc->smallest_mtu, overhead));
6252 frag_point = asoc->smallest_mtu - overhead;
6253 KASSERT(frag_point % 4 == 0,
6254 ("frag_point (%u) not a multiple of 4", frag_point));
6255 /* Honor MAXSEG socket option. */
6256 if ((asoc->sctp_frag_point > 0) &&
6257 (asoc->sctp_frag_point < frag_point)) {
6258 frag_point = asoc->sctp_frag_point;
6259 }
6260 return (frag_point);
6261}
6262
6263static void
6265{
6266 /*
6267 * We assume that the user wants PR_SCTP_TTL if the user provides a
6268 * positive lifetime but does not specify any PR_SCTP policy.
6269 */
6270 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6272 } else if (sp->timetolive > 0) {
6275 } else {
6276 return;
6277 }
6278 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6280 /*
6281 * Time to live is a priority stored in tv_sec when doing
6282 * the buffer drop thing.
6283 */
6284 sp->ts.tv_sec = sp->timetolive;
6285 sp->ts.tv_usec = 0;
6286 break;
6288 {
6289 struct timeval tv;
6290
6291 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6292 tv.tv_sec = sp->timetolive / 1000;
6293 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6294 /*
6295 * TODO sctp_constants.h needs alternative time
6296 * macros when _KERNEL is undefined.
6297 */
6298 timevaladd(&sp->ts, &tv);
6299 }
6300 break;
6302 /*
6303 * Time to live is a the number or retransmissions stored in
6304 * tv_sec.
6305 */
6306 sp->ts.tv_sec = sp->timetolive;
6307 sp->ts.tv_usec = 0;
6308 break;
6309 default:
6311 "Unknown PR_SCTP policy %u.\n",
6313 break;
6314 }
6315}
6316
6317static int
6319 struct sctp_nets *net,
6320 struct mbuf *m,
6321 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6322{
6323 int error = 0;
6324 struct mbuf *at;
6325 struct sctp_stream_queue_pending *sp = NULL;
6326 struct sctp_stream_out *strm;
6327
6328 /*
6329 * Given an mbuf chain, put it into the association send queue and
6330 * place it on the wheel
6331 */
6332 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6333 /* Invalid stream number */
6334 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6335 error = EINVAL;
6336 goto out_now;
6337 }
6338 if ((stcb->asoc.stream_locked) &&
6339 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6340 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6341 error = EINVAL;
6342 goto out_now;
6343 }
6344 /* Now can we send this? */
6349 /* got data while shutting down */
6350 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6351 error = ECONNRESET;
6352 goto out_now;
6353 }
6354 sctp_alloc_a_strmoq(stcb, sp);
6355 if (sp == NULL) {
6356 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6357 error = ENOMEM;
6358 goto out_now;
6359 }
6360 sp->sinfo_flags = srcv->sinfo_flags;
6361 sp->timetolive = srcv->sinfo_timetolive;
6362 sp->ppid = srcv->sinfo_ppid;
6363 sp->context = srcv->sinfo_context;
6364 sp->fsn = 0;
6365 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6366 sp->net = net;
6367 atomic_add_int(&sp->net->ref_count, 1);
6368 } else {
6369 sp->net = NULL;
6370 }
6371 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6372 sp->sid = srcv->sinfo_stream;
6373 sp->msg_is_complete = 1;
6374 sp->sender_all_done = 1;
6375 sp->some_taken = 0;
6376 sp->data = m;
6377 sp->tail_mbuf = NULL;
6379 /*
6380 * We could in theory (for sendall) sifa the length in, but we would
6381 * still have to hunt through the chain since we need to setup the
6382 * tail_mbuf
6383 */
6384 sp->length = 0;
6385 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6386 if (SCTP_BUF_NEXT(at) == NULL)
6387 sp->tail_mbuf = at;
6388 sp->length += SCTP_BUF_LEN(at);
6389 }
6390 if (srcv->sinfo_keynumber_valid) {
6391 sp->auth_keyid = srcv->sinfo_keynumber;
6392 } else {
6394 }
6397 sp->holds_key_ref = 1;
6398 }
6399 if (hold_stcb_lock == 0) {
6400 SCTP_TCB_SEND_LOCK(stcb);
6401 }
6402 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6403 sctp_snd_sb_alloc(stcb, sp->length);
6404 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6405 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6406 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp);
6407 m = NULL;
6408 if (hold_stcb_lock == 0) {
6410 }
6411out_now:
6412 if (m) {
6413 sctp_m_freem(m);
6414 }
6415 return (error);
6416}
6417
6418static struct mbuf *
6419sctp_copy_mbufchain(struct mbuf *clonechain,
6420 struct mbuf *outchain,
6421 struct mbuf **endofchain,
6422 int can_take_mbuf,
6423 int sizeofcpy,
6424 uint8_t copy_by_ref)
6425{
6426 struct mbuf *m;
6427 struct mbuf *appendchain;
6428 caddr_t cp;
6429 int len;
6430
6431 if (endofchain == NULL) {
6432 /* error */
6433error_out:
6434 if (outchain)
6435 sctp_m_freem(outchain);
6436 return (NULL);
6437 }
6438 if (can_take_mbuf) {
6439 appendchain = clonechain;
6440 } else {
6441 if (!copy_by_ref &&
6442 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))) {
6443 /* Its not in a cluster */
6444 if (*endofchain == NULL) {
6445 /* lets get a mbuf cluster */
6446 if (outchain == NULL) {
6447 /* This is the general case */
6448 new_mbuf:
6449 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6450 if (outchain == NULL) {
6451 goto error_out;
6452 }
6453 SCTP_BUF_LEN(outchain) = 0;
6454 *endofchain = outchain;
6455 /* get the prepend space */
6456 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
6457 } else {
6458 /*
6459 * We really should not get a NULL
6460 * in endofchain
6461 */
6462 /* find end */
6463 m = outchain;
6464 while (m) {
6465 if (SCTP_BUF_NEXT(m) == NULL) {
6466 *endofchain = m;
6467 break;
6468 }
6469 m = SCTP_BUF_NEXT(m);
6470 }
6471 /* sanity */
6472 if (*endofchain == NULL) {
6473 /*
6474 * huh, TSNH XXX maybe we
6475 * should panic
6476 */
6477 sctp_m_freem(outchain);
6478 goto new_mbuf;
6479 }
6480 }
6481 /* get the new end of length */
6482 len = (int)M_TRAILINGSPACE(*endofchain);
6483 } else {
6484 /* how much is left at the end? */
6485 len = (int)M_TRAILINGSPACE(*endofchain);
6486 }
6487 /* Find the end of the data, for appending */
6488 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6489
6490 /* Now lets copy it out */
6491 if (len >= sizeofcpy) {
6492 /* It all fits, copy it in */
6493 m_copydata(clonechain, 0, sizeofcpy, cp);
6494 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6495 } else {
6496 /* fill up the end of the chain */
6497 if (len > 0) {
6498 m_copydata(clonechain, 0, len, cp);
6499 SCTP_BUF_LEN((*endofchain)) += len;
6500 /* now we need another one */
6501 sizeofcpy -= len;
6502 }
6503 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6504 if (m == NULL) {
6505 /* We failed */
6506 goto error_out;
6507 }
6508 SCTP_BUF_NEXT((*endofchain)) = m;
6509 *endofchain = m;
6510 cp = mtod((*endofchain), caddr_t);
6511 m_copydata(clonechain, len, sizeofcpy, cp);
6512 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6513 }
6514 return (outchain);
6515 } else {
6516 /* copy the old fashion way */
6517 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
6518#ifdef SCTP_MBUF_LOGGING
6519 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6520 sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
6521 }
6522#endif
6523 }
6524 }
6525 if (appendchain == NULL) {
6526 /* error */
6527 if (outchain)
6528 sctp_m_freem(outchain);
6529 return (NULL);
6530 }
6531 if (outchain) {
6532 /* tack on to the end */
6533 if (*endofchain != NULL) {
6534 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6535 } else {
6536 m = outchain;
6537 while (m) {
6538 if (SCTP_BUF_NEXT(m) == NULL) {
6539 SCTP_BUF_NEXT(m) = appendchain;
6540 break;
6541 }
6542 m = SCTP_BUF_NEXT(m);
6543 }
6544 }
6545 /*
6546 * save off the end and update the end-chain position
6547 */
6548 m = appendchain;
6549 while (m) {
6550 if (SCTP_BUF_NEXT(m) == NULL) {
6551 *endofchain = m;
6552 break;
6553 }
6554 m = SCTP_BUF_NEXT(m);
6555 }
6556 return (outchain);
6557 } else {
6558 /* save off the end and update the end-chain position */
6559 m = appendchain;
6560 while (m) {
6561 if (SCTP_BUF_NEXT(m) == NULL) {
6562 *endofchain = m;
6563 break;
6564 }
6565 m = SCTP_BUF_NEXT(m);
6566 }
6567 return (appendchain);
6568 }
6569}
6570
6571static int
6573 struct sctp_tcb *stcb,
6574 struct sctp_association *asoc,
6575 int *num_out,
6576 int *reason_code,
6577 int control_only, int from_where,
6578 struct timeval *now, int *now_filled,
6579 uint32_t frag_point, int so_locked);
6580
6581static void
6582sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6584{
6585 struct sctp_copy_all *ca;
6586 struct mbuf *m;
6587 int ret = 0;
6588 int added_control = 0;
6589 int un_sent, do_chunk_output = 1;
6590 struct sctp_association *asoc;
6591 struct sctp_nets *net;
6592
6593 ca = (struct sctp_copy_all *)ptr;
6594 if (ca->m == NULL) {
6595 return;
6596 }
6597 if (ca->inp != inp) {
6598 /* TSNH */
6599 return;
6600 }
6601 if (ca->sndlen > 0) {
6602 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
6603 if (m == NULL) {
6604 /* can't copy so we are done */
6605 ca->cnt_failed++;
6606 return;
6607 }
6608#ifdef SCTP_MBUF_LOGGING
6609 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6610 sctp_log_mbc(m, SCTP_MBUF_ICOPY);
6611 }
6612#endif
6613 } else {
6614 m = NULL;
6615 }
6617 if (stcb->asoc.alternate) {
6618 net = stcb->asoc.alternate;
6619 } else {
6620 net = stcb->asoc.primary_destination;
6621 }
6622 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6623 /* Abort this assoc with m as the user defined reason */
6624 if (m != NULL) {
6625 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
6626 } else {
6627 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
6628 0, M_NOWAIT, 1, MT_DATA);
6629 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
6630 }
6631 if (m != NULL) {
6632 struct sctp_paramhdr *ph;
6633
6634 ph = mtod(m, struct sctp_paramhdr *);
6636 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + ca->sndlen));
6637 }
6638 /*
6639 * We add one here to keep the assoc from dis-appearing on
6640 * us.
6641 */
6642 atomic_add_int(&stcb->asoc.refcnt, 1);
6643 sctp_abort_an_association(inp, stcb, m, false, SCTP_SO_NOT_LOCKED);
6644 /*
6645 * sctp_abort_an_association calls sctp_free_asoc() free
6646 * association will NOT free it since we incremented the
6647 * refcnt .. we do this to prevent it being freed and things
6648 * getting tricky since we could end up (from free_asoc)
6649 * calling inpcb_free which would get a recursive lock call
6650 * to the iterator lock.. But as a consequence of that the
6651 * stcb will return to us un-locked.. since free_asoc
6652 * returns with either no TCB or the TCB unlocked, we must
6653 * relock.. to unlock in the iterator timer :-0
6654 */
6655 SCTP_TCB_LOCK(stcb);
6656 atomic_subtract_int(&stcb->asoc.refcnt, 1);
6657 goto no_chunk_output;
6658 } else {
6659 if (m) {
6660 ret = sctp_msg_append(stcb, net, m,
6661 &ca->sndrcv, 1);
6662 }
6663 asoc = &stcb->asoc;
6664 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6665 /* shutdown this assoc */
6666 if (TAILQ_EMPTY(&asoc->send_queue) &&
6667 TAILQ_EMPTY(&asoc->sent_queue) &&
6669 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
6670 goto abort_anyway;
6671 }
6672 /*
6673 * there is nothing queued to send, so I'm
6674 * done...
6675 */
6679 /*
6680 * only send SHUTDOWN the first time
6681 * through
6682 */
6683 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
6684 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6685 }
6688 sctp_send_shutdown(stcb, net);
6690 net);
6692 NULL);
6693 added_control = 1;
6694 do_chunk_output = 0;
6695 }
6696 } else {
6697 /*
6698 * we still got (or just got) data to send,
6699 * so set SHUTDOWN_PENDING
6700 */
6701 /*
6702 * XXX sockets draft says that SCTP_EOF
6703 * should be sent with no data. currently,
6704 * we will allow user data to be sent first
6705 * and move to SHUTDOWN-PENDING
6706 */
6710 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
6712 }
6714 if (TAILQ_EMPTY(&asoc->send_queue) &&
6715 TAILQ_EMPTY(&asoc->sent_queue) &&
6717 struct mbuf *op_err;
6718 char msg[SCTP_DIAG_INFO_LEN];
6719
6720 abort_anyway:
6721 SCTP_SNPRINTF(msg, sizeof(msg),
6722 "%s:%d at %s", __FILE__, __LINE__, __func__);
6723 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6724 msg);
6725 atomic_add_int(&stcb->asoc.refcnt, 1);
6727 op_err, false, SCTP_SO_NOT_LOCKED);
6728 atomic_subtract_int(&stcb->asoc.refcnt, 1);
6729 goto no_chunk_output;
6730 }
6732 NULL);
6733 }
6734 }
6735 }
6736 }
6737 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6739
6741 (stcb->asoc.total_flight > 0) &&
6742 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
6743 do_chunk_output = 0;
6744 }
6745 if (do_chunk_output)
6747 else if (added_control) {
6748 struct timeval now;
6749 int num_out, reason, now_filled = 0;
6750
6751 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6752 &reason, 1, 1, &now, &now_filled,
6753 sctp_get_frag_point(stcb),
6755 }
6756no_chunk_output:
6757 if (ret) {
6758 ca->cnt_failed++;
6759 } else {
6760 ca->cnt_sent++;
6761 }
6762}
6763
6764static void
6766{
6767 struct sctp_copy_all *ca;
6768
6769 ca = (struct sctp_copy_all *)ptr;
6770 /*
6771 * Do a notify here? Kacheong suggests that the notify be done at
6772 * the send time.. so you would push up a notification if any send
6773 * failed. Don't know if this is feasible since the only failures we
6774 * have is "memory" related and if you cannot get an mbuf to send
6775 * the data you surely can't get an mbuf to send up to notify the
6776 * user you can't send the data :->
6777 */
6778
6779 /* now free everything */
6780 if (ca->inp) {
6781 /* Lets clear the flag to allow others to run. */
6782 SCTP_INP_WLOCK(ca->inp);
6783 ca->inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
6784 SCTP_INP_WUNLOCK(ca->inp);
6785 }
6786 sctp_m_freem(ca->m);
6787 SCTP_FREE(ca, SCTP_M_COPYAL);
6788}
6789
6790static struct mbuf *
6791sctp_copy_out_all(struct uio *uio, ssize_t len)
6792{
6793 struct mbuf *ret, *at;
6794 ssize_t left, willcpy, cancpy, error;
6795
6796 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
6797 if (ret == NULL) {
6798 /* TSNH */
6799 return (NULL);
6800 }
6801 left = len;
6802 SCTP_BUF_LEN(ret) = 0;
6803 /* save space for the data chunk header */
6804 cancpy = (int)M_TRAILINGSPACE(ret);
6805 willcpy = min(cancpy, left);
6806 at = ret;
6807 while (left > 0) {
6808 /* Align data to the end */
6809 error = uiomove(mtod(at, caddr_t), (int)willcpy, uio);
6810 if (error) {
6811 err_out_now:
6812 sctp_m_freem(at);
6813 return (NULL);
6814 }
6815 SCTP_BUF_LEN(at) = (int)willcpy;
6816 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6817 left -= willcpy;
6818 if (left > 0) {
6819 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg((unsigned int)left, 0, M_WAITOK, 1, MT_DATA);
6820 if (SCTP_BUF_NEXT(at) == NULL) {
6821 goto err_out_now;
6822 }
6823 at = SCTP_BUF_NEXT(at);
6824 SCTP_BUF_LEN(at) = 0;
6825 cancpy = (int)M_TRAILINGSPACE(at);
6826 willcpy = min(cancpy, left);
6827 }
6828 }
6829 return (ret);
6830}
6831
6832static int
6833sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6834 struct sctp_sndrcvinfo *srcv)
6835{
6836 int ret;
6837 struct sctp_copy_all *ca;
6838
6839 if (uio->uio_resid > (ssize_t)SCTP_BASE_SYSCTL(sctp_sendall_limit)) {
6840 /* You must not be larger than the limit! */
6841 return (EMSGSIZE);
6842 }
6843 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6844 SCTP_M_COPYAL);
6845 if (ca == NULL) {
6846 sctp_m_freem(m);
6847 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6848 return (ENOMEM);
6849 }
6850 memset(ca, 0, sizeof(struct sctp_copy_all));
6851
6852 ca->inp = inp;
6853 if (srcv) {
6854 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6855 }
6856
6857 /* Serialize. */
6861 sctp_m_freem(m);
6862 SCTP_FREE(ca, SCTP_M_COPYAL);
6863 return (EBUSY);
6864 }
6867
6868 /*
6869 * take off the sendall flag, it would be bad if we failed to do
6870 * this :-0
6871 */
6872 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6873 /* get length and mbuf chain */
6874 if (uio) {
6875 ca->sndlen = uio->uio_resid;
6876 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6877 if (ca->m == NULL) {
6878 SCTP_FREE(ca, SCTP_M_COPYAL);
6879 sctp_m_freem(m);
6881 inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
6883 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6884 return (ENOMEM);
6885 }
6886 } else {
6887 /* Gather the length of the send */
6888 struct mbuf *mat;
6889
6890 ca->sndlen = 0;
6891 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6892 ca->sndlen += SCTP_BUF_LEN(mat);
6893 }
6894 }
6898 (void *)ca, 0,
6899 sctp_sendall_completes, inp, 1);
6900 if (ret) {
6901 SCTP_INP_WLOCK(inp);
6902 inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
6903 SCTP_INP_WUNLOCK(inp);
6904 SCTP_FREE(ca, SCTP_M_COPYAL);
6905 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6906 return (EFAULT);
6907 }
6908 return (0);
6909}
6910
6911void
6913{
6914 struct sctp_tmit_chunk *chk, *nchk;
6915
6916 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6917 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6918 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6920 if (chk->data) {
6921 sctp_m_freem(chk->data);
6922 chk->data = NULL;
6923 }
6925 }
6926 }
6927}
6928
6929void
6931{
6932 struct sctp_association *asoc;
6933 struct sctp_tmit_chunk *chk, *nchk;
6934 struct sctp_asconf_chunk *acp;
6935
6936 asoc = &stcb->asoc;
6937 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6938 /* find SCTP_ASCONF chunk in queue */
6939 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6940 if (chk->data) {
6941 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6942 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6943 /* Not Acked yet */
6944 break;
6945 }
6946 }
6947 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6948 asoc->ctrl_queue_cnt--;
6949 if (chk->data) {
6950 sctp_m_freem(chk->data);
6951 chk->data = NULL;
6952 }
6954 }
6955 }
6956}
6957
6958static void
6960 struct sctp_association *asoc,
6961 struct sctp_tmit_chunk **data_list,
6962 int bundle_at,
6963 struct sctp_nets *net)
6964{
6965 int i;
6966 struct sctp_tmit_chunk *tp1;
6967
6968 for (i = 0; i < bundle_at; i++) {
6969 /* off of the send queue */
6970 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
6972 if (i > 0) {
6973 /*
6974 * Any chunk NOT 0 you zap the time chunk 0 gets
6975 * zapped or set based on if a RTO measurment is
6976 * needed.
6977 */
6978 data_list[i]->do_rtt = 0;
6979 }
6980 /* record time */
6981 data_list[i]->sent_rcv_time = net->last_sent_time;
6982 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
6983 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn;
6984 if (data_list[i]->whoTo == NULL) {
6985 data_list[i]->whoTo = net;
6986 atomic_add_int(&net->ref_count, 1);
6987 }
6988 /* on to the sent queue */
6989 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6990 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
6991 struct sctp_tmit_chunk *tpp;
6992
6993 /* need to move back */
6994 back_up_more:
6995 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6996 if (tpp == NULL) {
6997 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6998 goto all_done;
6999 }
7000 tp1 = tpp;
7001 if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7002 goto back_up_more;
7003 }
7004 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
7005 } else {
7006 TAILQ_INSERT_TAIL(&asoc->sent_queue,
7007 data_list[i],
7008 sctp_next);
7009 }
7010all_done:
7011 /* This does not lower until the cum-ack passes it */
7013 if ((asoc->peers_rwnd <= 0) &&
7014 (asoc->total_flight == 0) &&
7015 (bundle_at == 1)) {
7016 /* Mark the chunk as being a window probe */
7017 SCTP_STAT_INCR(sctps_windowprobed);
7018 }
7019#ifdef SCTP_AUDITING_ENABLED
7020 sctp_audit_log(0xC2, 3);
7021#endif
7022 data_list[i]->sent = SCTP_DATAGRAM_SENT;
7023 data_list[i]->snd_count = 1;
7024 data_list[i]->rec.data.chunk_was_revoked = 0;
7025 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7027 data_list[i]->whoTo->flight_size,
7028 data_list[i]->book_size,
7029 (uint32_t)(uintptr_t)data_list[i]->whoTo,
7030 data_list[i]->rec.data.tsn);
7031 }
7032 sctp_flight_size_increase(data_list[i]);
7033 sctp_total_flight_increase(stcb, data_list[i]);
7034 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7036 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7037 }
7039 (uint32_t)(data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7041 /* SWS sender side engages */
7042 asoc->peers_rwnd = 0;
7043 }
7044 }
7047 }
7048}
7049
7050static void
7051sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked)
7052{
7053 struct sctp_tmit_chunk *chk, *nchk;
7054
7055 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7056 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7057 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
7059 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7060 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7061 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7062 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7063 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7064 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7065 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7066 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7067 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7068 /* Stray chunks must be cleaned up */
7069 clean_up_anyway:
7070 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7072 if (chk->data) {
7073 sctp_m_freem(chk->data);
7074 chk->data = NULL;
7075 }
7076 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
7077 asoc->fwd_tsn_cnt--;
7078 }
7079 sctp_free_a_chunk(stcb, chk, so_locked);
7080 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7081 /* special handling, we must look into the param */
7082 if (chk != asoc->str_reset) {
7083 goto clean_up_anyway;
7084 }
7085 }
7086 }
7087}
7088
7089static uint32_t
7091 uint32_t space_left, uint32_t frag_point, int eeor_on)
7092{
7093 /*
7094 * Make a decision on if I should split a msg into multiple parts.
7095 * This is only asked of incomplete messages.
7096 */
7097 if (eeor_on) {
7098 /*
7099 * If we are doing EEOR we need to always send it if its the
7100 * entire thing, since it might be all the guy is putting in
7101 * the hopper.
7102 */
7103 if (space_left >= length) {
7104 /*-
7105 * If we have data outstanding,
7106 * we get another chance when the sack
7107 * arrives to transmit - wait for more data
7108 */
7109 if (stcb->asoc.total_flight == 0) {
7110 /*
7111 * If nothing is in flight, we zero the
7112 * packet counter.
7113 */
7114 return (length);
7115 }
7116 return (0);
7117
7118 } else {
7119 /* You can fill the rest */
7120 return (space_left);
7121 }
7122 }
7123 /*-
7124 * For those strange folk that make the send buffer
7125 * smaller than our fragmentation point, we can't
7126 * get a full msg in so we have to allow splitting.
7127 */
7128 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7129 return (length);
7130 }
7131 if ((length <= space_left) ||
7132 ((length - space_left) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7133 /* Sub-optimial residual don't split in non-eeor mode. */
7134 return (0);
7135 }
7136 /*
7137 * If we reach here length is larger than the space_left. Do we wish
7138 * to split it for the sake of packet putting together?
7139 */
7140 if (space_left >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7141 /* Its ok to split it */
7142 return (min(space_left, frag_point));
7143 }
7144 /* Nope, can't split */
7145 return (0);
7146}
7147
7148static uint32_t
7150 struct sctp_nets *net,
7151 struct sctp_stream_out *strq,
7152 uint32_t space_left,
7153 uint32_t frag_point,
7154 int *giveup,
7155 int eeor_mode,
7156 int *bail,
7157 int so_locked)
7158{
7159 /* Move from the stream to the send_queue keeping track of the total */
7160 struct sctp_association *asoc;
7161 struct sctp_stream_queue_pending *sp;
7162 struct sctp_tmit_chunk *chk;
7163 struct sctp_data_chunk *dchkh = NULL;
7164 struct sctp_idata_chunk *ndchkh = NULL;
7165 uint32_t to_move, length;
7166 int leading;
7167 uint8_t rcv_flags = 0;
7168 uint8_t some_taken;
7169 uint8_t send_lock_up = 0;
7170
7172 asoc = &stcb->asoc;
7173one_more_time:
7174 /* sa_ignore FREED_MEMORY */
7175 sp = TAILQ_FIRST(&strq->outqueue);
7176 if (sp == NULL) {
7177 if (send_lock_up == 0) {
7178 SCTP_TCB_SEND_LOCK(stcb);
7179 send_lock_up = 1;
7180 }
7181 sp = TAILQ_FIRST(&strq->outqueue);
7182 if (sp) {
7183 goto one_more_time;
7184 }
7186 (stcb->asoc.idata_supported == 0) &&
7187 (strq->last_msg_incomplete)) {
7188 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7189 strq->sid,
7190 strq->last_msg_incomplete);
7191 strq->last_msg_incomplete = 0;
7192 }
7193 to_move = 0;
7194 if (send_lock_up) {
7196 send_lock_up = 0;
7197 }
7198 goto out_of;
7199 }
7200 if ((sp->msg_is_complete) && (sp->length == 0)) {
7201 if (sp->sender_all_done) {
7202 /*
7203 * We are doing deferred cleanup. Last time through
7204 * when we took all the data the sender_all_done was
7205 * not set.
7206 */
7207 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7208 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7209 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7210 sp->sender_all_done,
7211 sp->length,
7212 sp->msg_is_complete,
7213 sp->put_last_out,
7214 send_lock_up);
7215 }
7216 if (send_lock_up == 0) {
7217 SCTP_TCB_SEND_LOCK(stcb);
7218 send_lock_up = 1;
7219 }
7220 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7221 TAILQ_REMOVE(&strq->outqueue, sp, next);
7222 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp);
7223 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7224 (strq->chunks_on_queues == 0) &&
7225 TAILQ_EMPTY(&strq->outqueue)) {
7226 stcb->asoc.trigger_reset = 1;
7227 }
7228 if (sp->net) {
7230 sp->net = NULL;
7231 }
7232 if (sp->data) {
7233 sctp_m_freem(sp->data);
7234 sp->data = NULL;
7235 }
7236 sctp_free_a_strmoq(stcb, sp, so_locked);
7237 /* we can't be locked to it */
7238 if (send_lock_up) {
7240 send_lock_up = 0;
7241 }
7242 /* back to get the next msg */
7243 goto one_more_time;
7244 } else {
7245 /*
7246 * sender just finished this but still holds a
7247 * reference
7248 */
7249 *giveup = 1;
7250 to_move = 0;
7251 goto out_of;
7252 }
7253 } else {
7254 /* is there some to get */
7255 if (sp->length == 0) {
7256 /* no */
7257 *giveup = 1;
7258 to_move = 0;
7259 goto out_of;
7260 } else if (sp->discard_rest) {
7261 if (send_lock_up == 0) {
7262 SCTP_TCB_SEND_LOCK(stcb);
7263 send_lock_up = 1;
7264 }
7265 /* Whack down the size */
7266 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7267 if ((stcb->sctp_socket != NULL) &&
7270 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7271 }
7272 if (sp->data) {
7273 sctp_m_freem(sp->data);
7274 sp->data = NULL;
7275 sp->tail_mbuf = NULL;
7276 }
7277 sp->length = 0;
7278 sp->some_taken = 1;
7279 *giveup = 1;
7280 to_move = 0;
7281 goto out_of;
7282 }
7283 }
7284 some_taken = sp->some_taken;
7285re_look:
7286 length = sp->length;
7287 if (sp->msg_is_complete) {
7288 /* The message is complete */
7289 to_move = min(length, frag_point);
7290 if (to_move == length) {
7291 /* All of it fits in the MTU */
7292 if (sp->some_taken) {
7293 rcv_flags |= SCTP_DATA_LAST_FRAG;
7294 } else {
7295 rcv_flags |= SCTP_DATA_NOT_FRAG;
7296 }
7297 sp->put_last_out = 1;
7299 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7300 }
7301 } else {
7302 /* Not all of it fits, we fragment */
7303 if (sp->some_taken == 0) {
7304 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7305 }
7306 sp->some_taken = 1;
7307 }
7308 } else {
7309 to_move = sctp_can_we_split_this(stcb, length, space_left, frag_point, eeor_mode);
7310 if (to_move) {
7311 /*-
7312 * We use a snapshot of length in case it
7313 * is expanding during the compare.
7314 */
7315 uint32_t llen;
7316
7317 llen = length;
7318 if (to_move >= llen) {
7319 to_move = llen;
7320 if (send_lock_up == 0) {
7321 /*-
7322 * We are taking all of an incomplete msg
7323 * thus we need a send lock.
7324 */
7325 SCTP_TCB_SEND_LOCK(stcb);
7326 send_lock_up = 1;
7327 if (sp->msg_is_complete) {
7328 /*
7329 * the sender finished the
7330 * msg
7331 */
7332 goto re_look;
7333 }
7334 }
7335 }
7336 if (sp->some_taken == 0) {
7337 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7338 sp->some_taken = 1;
7339 }
7340 } else {
7341 /* Nothing to take. */
7342 *giveup = 1;
7343 to_move = 0;
7344 goto out_of;
7345 }
7346 }
7347
7348 /* If we reach here, we can copy out a chunk */
7349 sctp_alloc_a_chunk(stcb, chk);
7350 if (chk == NULL) {
7351 /* No chunk memory */
7352 *giveup = 1;
7353 to_move = 0;
7354 goto out_of;
7355 }
7356 /*
7357 * Setup for unordered if needed by looking at the user sent info
7358 * flags.
7359 */
7360 if (sp->sinfo_flags & SCTP_UNORDERED) {
7361 rcv_flags |= SCTP_DATA_UNORDERED;
7362 }
7363 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
7364 (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) {
7365 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7366 }
7367 /* clear out the chunk before setting up */
7368 memset(chk, 0, sizeof(*chk));
7369 chk->rec.data.rcv_flags = rcv_flags;
7370
7371 if (to_move >= length) {
7372 /* we think we can steal the whole thing */
7373 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7374 SCTP_TCB_SEND_LOCK(stcb);
7375 send_lock_up = 1;
7376 }
7377 if (to_move < sp->length) {
7378 /* bail, it changed */
7379 goto dont_do_it;
7380 }
7381 chk->data = sp->data;
7382 chk->last_mbuf = sp->tail_mbuf;
7383 /* register the stealing */
7384 sp->data = sp->tail_mbuf = NULL;
7385 } else {
7386 struct mbuf *m;
7387
7388dont_do_it:
7389 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7390 chk->last_mbuf = NULL;
7391 if (chk->data == NULL) {
7392 sp->some_taken = some_taken;
7393 sctp_free_a_chunk(stcb, chk, so_locked);
7394 *bail = 1;
7395 to_move = 0;
7396 goto out_of;
7397 }
7398#ifdef SCTP_MBUF_LOGGING
7399 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7400 sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
7401 }
7402#endif
7403 /* Pull off the data */
7404 m_adj(sp->data, to_move);
7405 /* Now lets work our way down and compact it */
7406 m = sp->data;
7407 while (m && (SCTP_BUF_LEN(m) == 0)) {
7408 sp->data = SCTP_BUF_NEXT(m);
7409 SCTP_BUF_NEXT(m) = NULL;
7410 if (sp->tail_mbuf == m) {
7411 /*-
7412 * Freeing tail? TSNH since
7413 * we supposedly were taking less
7414 * than the sp->length.
7415 */
7416#ifdef INVARIANTS
7417 panic("Huh, freing tail? - TSNH");
7418#else
7419 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7420 sp->tail_mbuf = sp->data = NULL;
7421 sp->length = 0;
7422#endif
7423 }
7424 sctp_m_free(m);
7425 m = sp->data;
7426 }
7427 }
7428 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7429 chk->copy_by_ref = 1;
7430 } else {
7431 chk->copy_by_ref = 0;
7432 }
7433 /*
7434 * get last_mbuf and counts of mb usage This is ugly but hopefully
7435 * its only one mbuf.
7436 */
7437 if (chk->last_mbuf == NULL) {
7438 chk->last_mbuf = chk->data;
7439 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7440 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7441 }
7442 }
7443
7444 if (to_move > length) {
7445 /*- This should not happen either
7446 * since we always lower to_move to the size
7447 * of sp->length if its larger.
7448 */
7449#ifdef INVARIANTS
7450 panic("Huh, how can to_move be larger?");
7451#else
7452 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7453 sp->length = 0;
7454#endif
7455 } else {
7456 atomic_subtract_int(&sp->length, to_move);
7457 }
7458 leading = SCTP_DATA_CHUNK_OVERHEAD(stcb);
7459 if (M_LEADINGSPACE(chk->data) < leading) {
7460 /* Not enough room for a chunk header, get some */
7461 struct mbuf *m;
7462
7463 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 1, MT_DATA);
7464 if (m == NULL) {
7465 /*
7466 * we're in trouble here. _PREPEND below will free
7467 * all the data if there is no leading space, so we
7468 * must put the data back and restore.
7469 */
7470 if (send_lock_up == 0) {
7471 SCTP_TCB_SEND_LOCK(stcb);
7472 send_lock_up = 1;
7473 }
7474 if (sp->data == NULL) {
7475 /* unsteal the data */
7476 sp->data = chk->data;
7477 sp->tail_mbuf = chk->last_mbuf;
7478 } else {
7479 struct mbuf *m_tmp;
7480
7481 /* reassemble the data */
7482 m_tmp = sp->data;
7483 sp->data = chk->data;
7484 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7485 }
7486 sp->some_taken = some_taken;
7487 atomic_add_int(&sp->length, to_move);
7488 chk->data = NULL;
7489 *bail = 1;
7490 sctp_free_a_chunk(stcb, chk, so_locked);
7491 to_move = 0;
7492 goto out_of;
7493 } else {
7494 SCTP_BUF_LEN(m) = 0;
7495 SCTP_BUF_NEXT(m) = chk->data;
7496 chk->data = m;
7497 M_ALIGN(chk->data, 4);
7498 }
7499 }
7500 SCTP_BUF_PREPEND(chk->data, SCTP_DATA_CHUNK_OVERHEAD(stcb), M_NOWAIT);
7501 if (chk->data == NULL) {
7502 /* HELP, TSNH since we assured it would not above? */
7503#ifdef INVARIANTS
7504 panic("prepend fails HELP?");
7505#else
7506 SCTP_PRINTF("prepend fails HELP?\n");
7507 sctp_free_a_chunk(stcb, chk, so_locked);
7508#endif
7509 *bail = 1;
7510 to_move = 0;
7511 goto out_of;
7512 }
7514 chk->book_size = chk->send_size = (uint16_t)(to_move + SCTP_DATA_CHUNK_OVERHEAD(stcb));
7515 chk->book_size_scale = 0;
7517
7518 chk->flags = 0;
7519 chk->asoc = &stcb->asoc;
7520 chk->pad_inplace = 0;
7521 chk->no_fr_allowed = 0;
7522 if (stcb->asoc.idata_supported == 0) {
7523 if (rcv_flags & SCTP_DATA_UNORDERED) {
7524 /* Just use 0. The receiver ignores the values. */
7525 chk->rec.data.mid = 0;
7526 } else {
7527 chk->rec.data.mid = strq->next_mid_ordered;
7528 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7529 strq->next_mid_ordered++;
7530 }
7531 }
7532 } else {
7533 if (rcv_flags & SCTP_DATA_UNORDERED) {
7534 chk->rec.data.mid = strq->next_mid_unordered;
7535 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7536 strq->next_mid_unordered++;
7537 }
7538 } else {
7539 chk->rec.data.mid = strq->next_mid_ordered;
7540 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7541 strq->next_mid_ordered++;
7542 }
7543 }
7544 }
7545 chk->rec.data.sid = sp->sid;
7546 chk->rec.data.ppid = sp->ppid;
7547 chk->rec.data.context = sp->context;
7549
7550 chk->rec.data.timetodrop = sp->ts;
7551 chk->flags = sp->act_flags;
7552
7553 if (sp->net) {
7554 chk->whoTo = sp->net;
7555 atomic_add_int(&chk->whoTo->ref_count, 1);
7556 } else
7557 chk->whoTo = NULL;
7558
7559 if (sp->holds_key_ref) {
7560 chk->auth_keyid = sp->auth_keyid;
7562 chk->holds_key_ref = 1;
7563 }
7564 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, to_move);
7565 chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1);
7566 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7568 (uint32_t)(uintptr_t)stcb, sp->length,
7569 (uint32_t)((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)),
7570 chk->rec.data.tsn);
7571 }
7572 if (stcb->asoc.idata_supported == 0) {
7573 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7574 } else {
7575 ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
7576 }
7577 /*
7578 * Put the rest of the things in place now. Size was done earlier in
7579 * previous loop prior to padding.
7580 */
7581
7582#ifdef SCTP_ASOCLOG_OF_TSNS
7584 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7585 asoc->tsn_out_at = 0;
7586 asoc->tsn_out_wrapped = 1;
7587 }
7588 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn;
7589 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid;
7590 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid;
7591 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7592 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7593 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7594 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7595 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7596 asoc->tsn_out_at++;
7597#endif
7598 if (stcb->asoc.idata_supported == 0) {
7599 dchkh->ch.chunk_type = SCTP_DATA;
7600 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7601 dchkh->dp.tsn = htonl(chk->rec.data.tsn);
7602 dchkh->dp.sid = htons(strq->sid);
7603 dchkh->dp.ssn = htons((uint16_t)chk->rec.data.mid);
7604 dchkh->dp.ppid = chk->rec.data.ppid;
7605 dchkh->ch.chunk_length = htons(chk->send_size);
7606 } else {
7607 ndchkh->ch.chunk_type = SCTP_IDATA;
7608 ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7609 ndchkh->dp.tsn = htonl(chk->rec.data.tsn);
7610 ndchkh->dp.sid = htons(strq->sid);
7611 ndchkh->dp.reserved = htons(0);
7612 ndchkh->dp.mid = htonl(chk->rec.data.mid);
7613 if (sp->fsn == 0)
7614 ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid;
7615 else
7616 ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn);
7617 sp->fsn++;
7618 ndchkh->ch.chunk_length = htons(chk->send_size);
7619 }
7620 /* Now advance the chk->send_size by the actual pad needed. */
7621 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7622 /* need a pad */
7623 struct mbuf *lm;
7624 int pads;
7625
7626 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7627 lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
7628 if (lm != NULL) {
7629 chk->last_mbuf = lm;
7630 chk->pad_inplace = 1;
7631 }
7632 chk->send_size += pads;
7633 }
7634 if (PR_SCTP_ENABLED(chk->flags)) {
7635 asoc->pr_sctp_cnt++;
7636 }
7637 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7638 /* All done pull and kill the message */
7639 if (sp->put_last_out == 0) {
7640 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7641 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7642 sp->sender_all_done,
7643 sp->length,
7644 sp->msg_is_complete,
7645 sp->put_last_out,
7646 send_lock_up);
7647 }
7648 if (send_lock_up == 0) {
7649 SCTP_TCB_SEND_LOCK(stcb);
7650 send_lock_up = 1;
7651 }
7652 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7653 TAILQ_REMOVE(&strq->outqueue, sp, next);
7654 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp);
7655 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7656 (strq->chunks_on_queues == 0) &&
7657 TAILQ_EMPTY(&strq->outqueue)) {
7658 stcb->asoc.trigger_reset = 1;
7659 }
7660 if (sp->net) {
7662 sp->net = NULL;
7663 }
7664 if (sp->data) {
7665 sctp_m_freem(sp->data);
7666 sp->data = NULL;
7667 }
7668 sctp_free_a_strmoq(stcb, sp, so_locked);
7669 }
7670 asoc->chunks_on_out_queue++;
7671 strq->chunks_on_queues++;
7672 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7673 asoc->send_queue_cnt++;
7674out_of:
7675 if (send_lock_up) {
7677 }
7678 return (to_move);
7679}
7680
7681static void
7682sctp_fill_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
7683 uint32_t frag_point, int eeor_mode, int *quit_now,
7684 int so_locked)
7685{
7686 struct sctp_association *asoc;
7687 struct sctp_stream_out *strq;
7688 uint32_t space_left, moved, total_moved;
7689 int bail, giveup;
7690
7692 asoc = &stcb->asoc;
7693 total_moved = 0;
7694 switch (net->ro._l_addr.sa.sa_family) {
7695#ifdef INET
7696 case AF_INET:
7697 space_left = net->mtu - SCTP_MIN_V4_OVERHEAD;
7698 break;
7699#endif
7700#ifdef INET6
7701 case AF_INET6:
7702 space_left = net->mtu - SCTP_MIN_OVERHEAD;
7703 break;
7704#endif
7705 default:
7706 /* TSNH */
7707 space_left = net->mtu;
7708 break;
7709 }
7710 /* Need an allowance for the data chunk header too */
7711 space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
7712
7713 /* must make even word boundary */
7714 space_left &= 0xfffffffc;
7715 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7716 giveup = 0;
7717 bail = 0;
7718 while ((space_left > 0) && (strq != NULL)) {
7719 moved = sctp_move_to_outqueue(stcb, net, strq, space_left,
7720 frag_point, &giveup, eeor_mode,
7721 &bail, so_locked);
7722 if ((giveup != 0) || (bail != 0)) {
7723 break;
7724 }
7725 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7726 total_moved += moved;
7727 if (space_left >= moved) {
7728 space_left -= moved;
7729 } else {
7730 space_left = 0;
7731 }
7732 if (space_left >= SCTP_DATA_CHUNK_OVERHEAD(stcb)) {
7733 space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
7734 } else {
7735 space_left = 0;
7736 }
7737 space_left &= 0xfffffffc;
7738 }
7739 if (bail != 0)
7740 *quit_now = 1;
7741
7742 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7743
7744 if (total_moved == 0) {
7745 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7746 (net == stcb->asoc.primary_destination)) {
7747 /* ran dry for primary network net */
7748 SCTP_STAT_INCR(sctps_primary_randry);
7749 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7750 /* ran dry with CMT on */
7751 SCTP_STAT_INCR(sctps_cmt_randry);
7752 }
7753 }
7754}
7755
7756void
7758{
7759 struct sctp_tmit_chunk *chk;
7760
7761 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7762 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7764 }
7765 }
7766}
7767
7768void
7770{
7771 struct sctp_association *asoc;
7772 struct sctp_tmit_chunk *chk;
7773 struct sctp_stream_queue_pending *sp;
7774 unsigned int i;
7775
7776 if (net == NULL) {
7777 return;
7778 }
7779 asoc = &stcb->asoc;
7780 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7781 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7782 if (sp->net == net) {
7784 sp->net = NULL;
7785 }
7786 }
7787 }
7788 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7789 if (chk->whoTo == net) {
7791 chk->whoTo = NULL;
7792 }
7793 }
7794}
7795
7796int
7798 struct sctp_tcb *stcb,
7799 struct sctp_association *asoc,
7800 int *num_out,
7801 int *reason_code,
7802 int control_only, int from_where,
7803 struct timeval *now, int *now_filled,
7804 uint32_t frag_point, int so_locked)
7805{
7818 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7819 struct mbuf *outchain, *endoutchain;
7820 struct sctp_tmit_chunk *chk, *nchk;
7821
7822 /* temp arrays for unlinking */
7823 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7824 int no_fragmentflg, error;
7825 unsigned int max_rwnd_per_dest, max_send_per_dest;
7826 int one_chunk, hbflag, skip_data_for_this_net;
7827 int asconf, cookie, no_out_cnt;
7828 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7829 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7830 int tsns_sent = 0;
7831 uint32_t auth_offset;
7832 struct sctp_auth_chunk *auth;
7833 uint16_t auth_keyid;
7834 int override_ok = 1;
7835 int skip_fill_up = 0;
7836 int data_auth_reqd = 0;
7837
7838 /*
7839 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7840 * destination.
7841 */
7842 int quit_now = 0;
7843
7844 *num_out = 0;
7845 *reason_code = 0;
7846 auth_keyid = stcb->asoc.authinfo.active_keyid;
7847 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7850 eeor_mode = 1;
7851 } else {
7852 eeor_mode = 0;
7853 }
7854 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7855 /*
7856 * First lets prime the pump. For each destination, if there is room
7857 * in the flight size, attempt to pull an MTU's worth out of the
7858 * stream queues into the general send_queue
7859 */
7860#ifdef SCTP_AUDITING_ENABLED
7861 sctp_audit_log(0xC2, 2);
7862#endif
7864 hbflag = 0;
7865 if (control_only)
7866 no_data_chunks = 1;
7867 else
7868 no_data_chunks = 0;
7869
7870 /* Nothing to possible to send? */
7871 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7872 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7873 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7874 TAILQ_EMPTY(&asoc->send_queue) &&
7875 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
7876nothing_to_send:
7877 *reason_code = 9;
7878 return (0);
7879 }
7880 if (asoc->peers_rwnd == 0) {
7881 /* No room in peers rwnd */
7882 *reason_code = 1;
7883 if (asoc->total_flight > 0) {
7884 /* we are allowed one chunk in flight */
7885 no_data_chunks = 1;
7886 }
7887 }
7888 if (stcb->asoc.ecn_echo_cnt_onq) {
7889 /* Record where a sack goes, if any */
7890 if (no_data_chunks &&
7891 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7892 /* Nothing but ECNe to send - we don't do that */
7893 goto nothing_to_send;
7894 }
7895 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7896 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7898 sack_goes_to = chk->whoTo;
7899 break;
7900 }
7901 }
7902 }
7903 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7904 if (stcb->sctp_socket)
7905 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7906 else
7907 max_send_per_dest = 0;
7908 if (no_data_chunks == 0) {
7909 /* How many non-directed chunks are there? */
7910 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7911 if (chk->whoTo == NULL) {
7912 /*
7913 * We already have non-directed chunks on
7914 * the queue, no need to do a fill-up.
7915 */
7916 skip_fill_up = 1;
7917 break;
7918 }
7919 }
7920 }
7921 if ((no_data_chunks == 0) &&
7922 (skip_fill_up == 0) &&
7923 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7924 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7925 /*
7926 * This for loop we are in takes in each net, if
7927 * its's got space in cwnd and has data sent to it
7928 * (when CMT is off) then it calls
7929 * sctp_fill_outqueue for the net. This gets data on
7930 * the send queue for that network.
7931 *
7932 * In sctp_fill_outqueue TSN's are assigned and data
7933 * is copied out of the stream buffers. Note mostly
7934 * copy by reference (we hope).
7935 */
7936 net->window_probe = 0;
7937 if ((net != stcb->asoc.alternate) &&
7938 ((net->dest_state & SCTP_ADDR_PF) ||
7939 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
7941 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7942 sctp_log_cwnd(stcb, net, 1,
7944 }
7945 continue;
7946 }
7948 (net->flight_size == 0)) {
7950 }
7951 if (net->flight_size >= net->cwnd) {
7952 /* skip this network, no room - can't fill */
7953 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7954 sctp_log_cwnd(stcb, net, 3,
7956 }
7957 continue;
7958 }
7959 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7961 }
7962 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
7963 if (quit_now) {
7964 /* memory alloc failure */
7965 no_data_chunks = 1;
7966 break;
7967 }
7968 }
7969 }
7970 /* now service each destination and send out what we can for it */
7971 /* Nothing to send? */
7972 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7973 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7974 TAILQ_EMPTY(&asoc->send_queue)) {
7975 *reason_code = 8;
7976 return (0);
7977 }
7978
7979 if (asoc->sctp_cmt_on_off > 0) {
7980 /* get the last start point */
7981 start_at = asoc->last_net_cmt_send_started;
7982 if (start_at == NULL) {
7983 /* null so to beginning */
7984 start_at = TAILQ_FIRST(&asoc->nets);
7985 } else {
7986 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
7987 if (start_at == NULL) {
7988 start_at = TAILQ_FIRST(&asoc->nets);
7989 }
7990 }
7991 asoc->last_net_cmt_send_started = start_at;
7992 } else {
7993 start_at = TAILQ_FIRST(&asoc->nets);
7994 }
7995 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7996 if (chk->whoTo == NULL) {
7997 if (asoc->alternate) {
7998 chk->whoTo = asoc->alternate;
7999 } else {
8000 chk->whoTo = asoc->primary_destination;
8001 }
8002 atomic_add_int(&chk->whoTo->ref_count, 1);
8003 }
8004 }
8005 old_start_at = NULL;
8006again_one_more_time:
8007 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
8008 /* how much can we send? */
8009 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
8010 if (old_start_at && (old_start_at == net)) {
8011 /* through list ocmpletely. */
8012 break;
8013 }
8014 tsns_sent = 0xa;
8015 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8016 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8017 (net->flight_size >= net->cwnd)) {
8018 /*
8019 * Nothing on control or asconf and flight is full,
8020 * we can skip even in the CMT case.
8021 */
8022 continue;
8023 }
8024 bundle_at = 0;
8025 endoutchain = outchain = NULL;
8026 auth = NULL;
8027 auth_offset = 0;
8028 no_fragmentflg = 1;
8029 one_chunk = 0;
8030 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8031 skip_data_for_this_net = 1;
8032 } else {
8033 skip_data_for_this_net = 0;
8034 }
8035 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8036#ifdef INET
8037 case AF_INET:
8038 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8039 break;
8040#endif
8041#ifdef INET6
8042 case AF_INET6:
8043 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8044 break;
8045#endif
8046 default:
8047 /* TSNH */
8048 mtu = net->mtu;
8049 break;
8050 }
8051 mx_mtu = mtu;
8052 to_out = 0;
8053 if (mtu > asoc->peers_rwnd) {
8054 if (asoc->total_flight > 0) {
8055 /* We have a packet in flight somewhere */
8056 r_mtu = asoc->peers_rwnd;
8057 } else {
8058 /* We are always allowed to send one MTU out */
8059 one_chunk = 1;
8060 r_mtu = mtu;
8061 }
8062 } else {
8063 r_mtu = mtu;
8064 }
8065 error = 0;
8066 /************************/
8067 /* ASCONF transmission */
8068 /************************/
8069 /* Now first lets go through the asconf queue */
8070 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8071 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8072 continue;
8073 }
8074 if (chk->whoTo == NULL) {
8075 if (asoc->alternate == NULL) {
8076 if (asoc->primary_destination != net) {
8077 break;
8078 }
8079 } else {
8080 if (asoc->alternate != net) {
8081 break;
8082 }
8083 }
8084 } else {
8085 if (chk->whoTo != net) {
8086 break;
8087 }
8088 }
8089 if (chk->data == NULL) {
8090 break;
8091 }
8092 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8093 chk->sent != SCTP_DATAGRAM_RESEND) {
8094 break;
8095 }
8096 /*
8097 * if no AUTH is yet included and this chunk
8098 * requires it, make sure to account for it. We
8099 * don't apply the size until the AUTH chunk is
8100 * actually added below in case there is no room for
8101 * this chunk. NOTE: we overload the use of "omtu"
8102 * here
8103 */
8104 if ((auth == NULL) &&
8106 stcb->asoc.peer_auth_chunks)) {
8108 } else
8109 omtu = 0;
8110 /* Here we do NOT factor the r_mtu */
8111 if ((chk->send_size < (int)(mtu - omtu)) ||
8112 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8113 /*
8114 * We probably should glom the mbuf chain
8115 * from the chk->data for control but the
8116 * problem is it becomes yet one more level
8117 * of tracking to do if for some reason
8118 * output fails. Then I have got to
8119 * reconstruct the merged control chain.. el
8120 * yucko.. for now we take the easy way and
8121 * do the copy
8122 */
8123 /*
8124 * Add an AUTH chunk, if chunk requires it
8125 * save the offset into the chain for AUTH
8126 */
8127 if ((auth == NULL) &&
8129 stcb->asoc.peer_auth_chunks))) {
8130 outchain = sctp_add_auth_chunk(outchain,
8131 &endoutchain,
8132 &auth,
8133 &auth_offset,
8134 stcb,
8135 chk->rec.chunk_id.id);
8136 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8137 }
8138 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8139 (int)chk->rec.chunk_id.can_take_data,
8140 chk->send_size, chk->copy_by_ref);
8141 if (outchain == NULL) {
8142 *reason_code = 8;
8143 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8144 return (ENOMEM);
8145 }
8146 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8147 /* update our MTU size */
8148 if (mtu > (chk->send_size + omtu))
8149 mtu -= (chk->send_size + omtu);
8150 else
8151 mtu = 0;
8152 to_out += (chk->send_size + omtu);
8153 /* Do clear IP_DF ? */
8154 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8155 no_fragmentflg = 0;
8156 }
8157 if (chk->rec.chunk_id.can_take_data)
8158 chk->data = NULL;
8159 /*
8160 * set hb flag since we can use these for
8161 * RTO
8162 */
8163 hbflag = 1;
8164 asconf = 1;
8165 /*
8166 * should sysctl this: don't bundle data
8167 * with ASCONF since it requires AUTH
8168 */
8169 no_data_chunks = 1;
8170 chk->sent = SCTP_DATAGRAM_SENT;
8171 if (chk->whoTo == NULL) {
8172 chk->whoTo = net;
8173 atomic_add_int(&net->ref_count, 1);
8174 }
8175 chk->snd_count++;
8176 if (mtu == 0) {
8177 /*
8178 * Ok we are out of room but we can
8179 * output without effecting the
8180 * flight size since this little guy
8181 * is a control only packet.
8182 */
8184 /*
8185 * do NOT clear the asconf flag as
8186 * it is used to do appropriate
8187 * source address selection.
8188 */
8189 if (*now_filled == 0) {
8190 (void)SCTP_GETTIME_TIMEVAL(now);
8191 *now_filled = 1;
8192 }
8193 net->last_sent_time = *now;
8194 hbflag = 0;
8195 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8196 (struct sockaddr *)&net->ro._l_addr,
8197 outchain, auth_offset, auth,
8199 no_fragmentflg, 0, asconf,
8200 inp->sctp_lport, stcb->rport,
8201 htonl(stcb->asoc.peer_vtag),
8202 net->port, NULL,
8203 0, 0,
8204 so_locked))) {
8205 /*
8206 * error, we could not
8207 * output
8208 */
8209 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8210 if (from_where == 0) {
8211 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8212 }
8213 if (error == ENOBUFS) {
8214 asoc->ifp_had_enobuf = 1;
8215 SCTP_STAT_INCR(sctps_lowlevelerr);
8216 }
8217 /* error, could not output */
8218 if (error == EHOSTUNREACH) {
8219 /*
8220 * Destination went
8221 * unreachable
8222 * during this send
8223 */
8224 sctp_move_chunks_from_net(stcb, net);
8225 }
8226 *reason_code = 7;
8227 break;
8228 } else {
8229 asoc->ifp_had_enobuf = 0;
8230 }
8231 /*
8232 * increase the number we sent, if a
8233 * cookie is sent we don't tell them
8234 * any was sent out.
8235 */
8236 outchain = endoutchain = NULL;
8237 auth = NULL;
8238 auth_offset = 0;
8239 if (!no_out_cnt)
8240 *num_out += ctl_cnt;
8241 /* recalc a clean slate and setup */
8242 switch (net->ro._l_addr.sa.sa_family) {
8243#ifdef INET
8244 case AF_INET:
8245 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8246 break;
8247#endif
8248#ifdef INET6
8249 case AF_INET6:
8250 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8251 break;
8252#endif
8253 default:
8254 /* TSNH */
8255 mtu = net->mtu;
8256 break;
8257 }
8258 to_out = 0;
8259 no_fragmentflg = 1;
8260 }
8261 }
8262 }
8263 if (error != 0) {
8264 /* try next net */
8265 continue;
8266 }
8267 /************************/
8268 /* Control transmission */
8269 /************************/
8270 /* Now first lets go through the control queue */
8271 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8272 if ((sack_goes_to) &&
8273 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8274 (chk->whoTo != sack_goes_to)) {
8275 /*
8276 * if we have a sack in queue, and we are
8277 * looking at an ecn echo that is NOT queued
8278 * to where the sack is going..
8279 */
8280 if (chk->whoTo == net) {
8281 /*
8282 * Don't transmit it to where its
8283 * going (current net)
8284 */
8285 continue;
8286 } else if (sack_goes_to == net) {
8287 /*
8288 * But do transmit it to this
8289 * address
8290 */
8291 goto skip_net_check;
8292 }
8293 }
8294 if (chk->whoTo == NULL) {
8295 if (asoc->alternate == NULL) {
8296 if (asoc->primary_destination != net) {
8297 continue;
8298 }
8299 } else {
8300 if (asoc->alternate != net) {
8301 continue;
8302 }
8303 }
8304 } else {
8305 if (chk->whoTo != net) {
8306 continue;
8307 }
8308 }
8309 skip_net_check:
8310 if (chk->data == NULL) {
8311 continue;
8312 }
8313 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8314 /*
8315 * It must be unsent. Cookies and ASCONF's
8316 * hang around but there timers will force
8317 * when marked for resend.
8318 */
8319 continue;
8320 }
8321 /*
8322 * if no AUTH is yet included and this chunk
8323 * requires it, make sure to account for it. We
8324 * don't apply the size until the AUTH chunk is
8325 * actually added below in case there is no room for
8326 * this chunk. NOTE: we overload the use of "omtu"
8327 * here
8328 */
8329 if ((auth == NULL) &&
8331 stcb->asoc.peer_auth_chunks)) {
8333 } else
8334 omtu = 0;
8335 /* Here we do NOT factor the r_mtu */
8336 if ((chk->send_size <= (int)(mtu - omtu)) ||
8337 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8338 /*
8339 * We probably should glom the mbuf chain
8340 * from the chk->data for control but the
8341 * problem is it becomes yet one more level
8342 * of tracking to do if for some reason
8343 * output fails. Then I have got to
8344 * reconstruct the merged control chain.. el
8345 * yucko.. for now we take the easy way and
8346 * do the copy
8347 */
8348 /*
8349 * Add an AUTH chunk, if chunk requires it
8350 * save the offset into the chain for AUTH
8351 */
8352 if ((auth == NULL) &&
8354 stcb->asoc.peer_auth_chunks))) {
8355 outchain = sctp_add_auth_chunk(outchain,
8356 &endoutchain,
8357 &auth,
8358 &auth_offset,
8359 stcb,
8360 chk->rec.chunk_id.id);
8361 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8362 }
8363 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8364 (int)chk->rec.chunk_id.can_take_data,
8365 chk->send_size, chk->copy_by_ref);
8366 if (outchain == NULL) {
8367 *reason_code = 8;
8368 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8369 return (ENOMEM);
8370 }
8371 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8372 /* update our MTU size */
8373 if (mtu > (chk->send_size + omtu))
8374 mtu -= (chk->send_size + omtu);
8375 else
8376 mtu = 0;
8377 to_out += (chk->send_size + omtu);
8378 /* Do clear IP_DF ? */
8379 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8380 no_fragmentflg = 0;
8381 }
8382 if (chk->rec.chunk_id.can_take_data)
8383 chk->data = NULL;
8384 /* Mark things to be removed, if needed */
8385 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8386 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8388 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8389 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8390 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8391 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8392 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8393 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8394 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8395 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8396 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8397 hbflag = 1;
8398 }
8399 /* remove these chunks at the end */
8400 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8402 /* turn off the timer */
8405 inp, stcb, NULL,
8407 }
8408 }
8409 ctl_cnt++;
8410 } else {
8411 /*
8412 * Other chunks, since they have
8413 * timers running (i.e. COOKIE) we
8414 * just "trust" that it gets sent or
8415 * retransmitted.
8416 */
8417 ctl_cnt++;
8418 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8419 cookie = 1;
8420 no_out_cnt = 1;
8421 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8422 /*
8423 * Increment ecne send count
8424 * here this means we may be
8425 * over-zealous in our
8426 * counting if the send
8427 * fails, but its the best
8428 * place to do it (we used
8429 * to do it in the queue of
8430 * the chunk, but that did
8431 * not tell how many times
8432 * it was sent.
8433 */
8434 SCTP_STAT_INCR(sctps_sendecne);
8435 }
8436 chk->sent = SCTP_DATAGRAM_SENT;
8437 if (chk->whoTo == NULL) {
8438 chk->whoTo = net;
8439 atomic_add_int(&net->ref_count, 1);
8440 }
8441 chk->snd_count++;
8442 }
8443 if (mtu == 0) {
8444 /*
8445 * Ok we are out of room but we can
8446 * output without effecting the
8447 * flight size since this little guy
8448 * is a control only packet.
8449 */
8450 if (asconf) {
8452 /*
8453 * do NOT clear the asconf
8454 * flag as it is used to do
8455 * appropriate source
8456 * address selection.
8457 */
8458 }
8459 if (cookie) {
8461 cookie = 0;
8462 }
8463 /* Only HB or ASCONF advances time */
8464 if (hbflag) {
8465 if (*now_filled == 0) {
8466 (void)SCTP_GETTIME_TIMEVAL(now);
8467 *now_filled = 1;
8468 }
8469 net->last_sent_time = *now;
8470 hbflag = 0;
8471 }
8472 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8473 (struct sockaddr *)&net->ro._l_addr,
8474 outchain,
8475 auth_offset, auth,
8477 no_fragmentflg, 0, asconf,
8478 inp->sctp_lport, stcb->rport,
8479 htonl(stcb->asoc.peer_vtag),
8480 net->port, NULL,
8481 0, 0,
8482 so_locked))) {
8483 /*
8484 * error, we could not
8485 * output
8486 */
8487 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8488 if (from_where == 0) {
8489 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8490 }
8491 if (error == ENOBUFS) {
8492 asoc->ifp_had_enobuf = 1;
8493 SCTP_STAT_INCR(sctps_lowlevelerr);
8494 }
8495 if (error == EHOSTUNREACH) {
8496 /*
8497 * Destination went
8498 * unreachable
8499 * during this send
8500 */
8501 sctp_move_chunks_from_net(stcb, net);
8502 }
8503 *reason_code = 7;
8504 break;
8505 } else {
8506 asoc->ifp_had_enobuf = 0;
8507 }
8508 /*
8509 * increase the number we sent, if a
8510 * cookie is sent we don't tell them
8511 * any was sent out.
8512 */
8513 outchain = endoutchain = NULL;
8514 auth = NULL;
8515 auth_offset = 0;
8516 if (!no_out_cnt)
8517 *num_out += ctl_cnt;
8518 /* recalc a clean slate and setup */
8519 switch (net->ro._l_addr.sa.sa_family) {
8520#ifdef INET
8521 case AF_INET:
8522 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8523 break;
8524#endif
8525#ifdef INET6
8526 case AF_INET6:
8527 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8528 break;
8529#endif
8530 default:
8531 /* TSNH */
8532 mtu = net->mtu;
8533 break;
8534 }
8535 to_out = 0;
8536 no_fragmentflg = 1;
8537 }
8538 }
8539 }
8540 if (error != 0) {
8541 /* try next net */
8542 continue;
8543 }
8544 /* JRI: if dest is in PF state, do not send data to it */
8545 if ((asoc->sctp_cmt_on_off > 0) &&
8546 (net != stcb->asoc.alternate) &&
8547 (net->dest_state & SCTP_ADDR_PF)) {
8548 goto no_data_fill;
8549 }
8550 if (net->flight_size >= net->cwnd) {
8551 goto no_data_fill;
8552 }
8553 if ((asoc->sctp_cmt_on_off > 0) &&
8554 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8555 (net->flight_size > max_rwnd_per_dest)) {
8556 goto no_data_fill;
8557 }
8558 /*
8559 * We need a specific accounting for the usage of the send
8560 * buffer. We also need to check the number of messages per
8561 * net. For now, this is better than nothing and it disabled
8562 * by default...
8563 */
8564 if ((asoc->sctp_cmt_on_off > 0) &&
8565 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8566 (max_send_per_dest > 0) &&
8567 (net->flight_size > max_send_per_dest)) {
8568 goto no_data_fill;
8569 }
8570 /*********************/
8571 /* Data transmission */
8572 /*********************/
8573 /*
8574 * if AUTH for DATA is required and no AUTH has been added
8575 * yet, account for this in the mtu now... if no data can be
8576 * bundled, this adjustment won't matter anyways since the
8577 * packet will be going out...
8578 */
8579 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8580 stcb->asoc.peer_auth_chunks);
8581 if (data_auth_reqd && (auth == NULL)) {
8583 }
8584 /* now lets add any data within the MTU constraints */
8585 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8586#ifdef INET
8587 case AF_INET:
8588 if (net->mtu > SCTP_MIN_V4_OVERHEAD)
8589 omtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8590 else
8591 omtu = 0;
8592 break;
8593#endif
8594#ifdef INET6
8595 case AF_INET6:
8596 if (net->mtu > SCTP_MIN_OVERHEAD)
8597 omtu = net->mtu - SCTP_MIN_OVERHEAD;
8598 else
8599 omtu = 0;
8600 break;
8601#endif
8602 default:
8603 /* TSNH */
8604 omtu = 0;
8605 break;
8606 }
8607 if ((((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
8609 (skip_data_for_this_net == 0)) ||
8610 (cookie)) {
8611 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8612 if (no_data_chunks) {
8613 /* let only control go out */
8614 *reason_code = 1;
8615 break;
8616 }
8617 if (net->flight_size >= net->cwnd) {
8618 /* skip this net, no room for data */
8619 *reason_code = 2;
8620 break;
8621 }
8622 if ((chk->whoTo != NULL) &&
8623 (chk->whoTo != net)) {
8624 /* Don't send the chunk on this net */
8625 continue;
8626 }
8627
8628 if (asoc->sctp_cmt_on_off == 0) {
8629 if ((asoc->alternate) &&
8630 (asoc->alternate != net) &&
8631 (chk->whoTo == NULL)) {
8632 continue;
8633 } else if ((net != asoc->primary_destination) &&
8634 (asoc->alternate == NULL) &&
8635 (chk->whoTo == NULL)) {
8636 continue;
8637 }
8638 }
8639 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8640 /*-
8641 * strange, we have a chunk that is
8642 * to big for its destination and
8643 * yet no fragment ok flag.
8644 * Something went wrong when the
8645 * PMTU changed...we did not mark
8646 * this chunk for some reason?? I
8647 * will fix it here by letting IP
8648 * fragment it for now and printing
8649 * a warning. This really should not
8650 * happen ...
8651 */
8652 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8653 chk->send_size, mtu);
8655 }
8656 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8658 struct sctp_data_chunk *dchkh;
8659
8660 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8662 }
8663 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8664 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8665 /* ok we will add this one */
8666
8667 /*
8668 * Add an AUTH chunk, if chunk
8669 * requires it, save the offset into
8670 * the chain for AUTH
8671 */
8672 if (data_auth_reqd) {
8673 if (auth == NULL) {
8674 outchain = sctp_add_auth_chunk(outchain,
8675 &endoutchain,
8676 &auth,
8677 &auth_offset,
8678 stcb,
8679 SCTP_DATA);
8680 auth_keyid = chk->auth_keyid;
8681 override_ok = 0;
8682 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8683 } else if (override_ok) {
8684 /*
8685 * use this data's
8686 * keyid
8687 */
8688 auth_keyid = chk->auth_keyid;
8689 override_ok = 0;
8690 } else if (auth_keyid != chk->auth_keyid) {
8691 /*
8692 * different keyid,
8693 * so done bundling
8694 */
8695 break;
8696 }
8697 }
8698 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8699 chk->send_size, chk->copy_by_ref);
8700 if (outchain == NULL) {
8701 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8703 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8704 }
8705 *reason_code = 3;
8706 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8707 return (ENOMEM);
8708 }
8709 /* upate our MTU size */
8710 /* Do clear IP_DF ? */
8711 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8712 no_fragmentflg = 0;
8713 }
8714 /* unsigned subtraction of mtu */
8715 if (mtu > chk->send_size)
8716 mtu -= chk->send_size;
8717 else
8718 mtu = 0;
8719 /* unsigned subtraction of r_mtu */
8720 if (r_mtu > chk->send_size)
8721 r_mtu -= chk->send_size;
8722 else
8723 r_mtu = 0;
8724
8725 to_out += chk->send_size;
8726 if ((to_out > mx_mtu) && no_fragmentflg) {
8727#ifdef INVARIANTS
8728 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8729#else
8730 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8731 mx_mtu, to_out);
8732#endif
8733 }
8734 chk->window_probe = 0;
8735 data_list[bundle_at++] = chk;
8736 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8737 break;
8738 }
8739 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8740 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8741 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8742 } else {
8743 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8744 }
8746 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8747 /*
8748 * Count number of
8749 * user msg's that
8750 * were fragmented
8751 * we do this by
8752 * counting when we
8753 * see a LAST
8754 * fragment only.
8755 */
8756 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8757 }
8758 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8759 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8760 data_list[0]->window_probe = 1;
8761 net->window_probe = 1;
8762 }
8763 break;
8764 }
8765 } else {
8766 /*
8767 * Must be sent in order of the
8768 * TSN's (on a network)
8769 */
8770 break;
8771 }
8772 } /* for (chunk gather loop for this net) */
8773 } /* if asoc.state OPEN */
8774no_data_fill:
8775 /* Is there something to send for this destination? */
8776 if (outchain) {
8777 /* We may need to start a control timer or two */
8778 if (asconf) {
8780 stcb, net);
8781 /*
8782 * do NOT clear the asconf flag as it is
8783 * used to do appropriate source address
8784 * selection.
8785 */
8786 }
8787 if (cookie) {
8789 cookie = 0;
8790 }
8791 /* must start a send timer if data is being sent */
8792 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8793 /*
8794 * no timer running on this destination
8795 * restart it.
8796 */
8797 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8798 }
8799 if (bundle_at || hbflag) {
8800 /* For data/asconf and hb set time */
8801 if (*now_filled == 0) {
8802 (void)SCTP_GETTIME_TIMEVAL(now);
8803 *now_filled = 1;
8804 }
8805 net->last_sent_time = *now;
8806 }
8807 /* Now send it, if there is anything to send :> */
8808 if ((error = sctp_lowlevel_chunk_output(inp,
8809 stcb,
8810 net,
8811 (struct sockaddr *)&net->ro._l_addr,
8812 outchain,
8813 auth_offset,
8814 auth,
8815 auth_keyid,
8816 no_fragmentflg,
8817 bundle_at,
8818 asconf,
8819 inp->sctp_lport, stcb->rport,
8820 htonl(stcb->asoc.peer_vtag),
8821 net->port, NULL,
8822 0, 0,
8823 so_locked))) {
8824 /* error, we could not output */
8825 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8826 if (from_where == 0) {
8827 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8828 }
8829 if (error == ENOBUFS) {
8830 asoc->ifp_had_enobuf = 1;
8831 SCTP_STAT_INCR(sctps_lowlevelerr);
8832 }
8833 if (error == EHOSTUNREACH) {
8834 /*
8835 * Destination went unreachable
8836 * during this send
8837 */
8838 sctp_move_chunks_from_net(stcb, net);
8839 }
8840 *reason_code = 6;
8841 /*-
8842 * I add this line to be paranoid. As far as
8843 * I can tell the continue, takes us back to
8844 * the top of the for, but just to make sure
8845 * I will reset these again here.
8846 */
8847 ctl_cnt = 0;
8848 continue; /* This takes us back to the
8849 * for() for the nets. */
8850 } else {
8851 asoc->ifp_had_enobuf = 0;
8852 }
8853 endoutchain = NULL;
8854 auth = NULL;
8855 auth_offset = 0;
8856 if (!no_out_cnt) {
8857 *num_out += (ctl_cnt + bundle_at);
8858 }
8859 if (bundle_at) {
8860 /* setup for a RTO measurement */
8861 tsns_sent = data_list[0]->rec.data.tsn;
8862 /* fill time if not already filled */
8863 if (*now_filled == 0) {
8865 *now_filled = 1;
8866 *now = asoc->time_last_sent;
8867 } else {
8868 asoc->time_last_sent = *now;
8869 }
8870 if (net->rto_needed) {
8871 data_list[0]->do_rtt = 1;
8872 net->rto_needed = 0;
8873 }
8874 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8875 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8876 }
8877 if (one_chunk) {
8878 break;
8879 }
8880 }
8881 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8882 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8883 }
8884 }
8885 if (old_start_at == NULL) {
8886 old_start_at = start_at;
8887 start_at = TAILQ_FIRST(&asoc->nets);
8888 if (old_start_at)
8889 goto again_one_more_time;
8890 }
8891
8892 /*
8893 * At the end there should be no NON timed chunks hanging on this
8894 * queue.
8895 */
8896 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8897 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8898 }
8899 if ((*num_out == 0) && (*reason_code == 0)) {
8900 *reason_code = 4;
8901 } else {
8902 *reason_code = 5;
8903 }
8904 sctp_clean_up_ctl(stcb, asoc, so_locked);
8905 return (0);
8906}
8907
8908void
8909sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8910{
8911 /*-
8912 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8913 * the control chunk queue.
8914 */
8915 struct sctp_chunkhdr *hdr;
8916 struct sctp_tmit_chunk *chk;
8917 struct mbuf *mat, *last_mbuf;
8918 uint32_t chunk_length;
8919 uint16_t padding_length;
8920
8922 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
8923 if (op_err == NULL) {
8924 return;
8925 }
8926 last_mbuf = NULL;
8927 chunk_length = 0;
8928 for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
8929 chunk_length += SCTP_BUF_LEN(mat);
8930 if (SCTP_BUF_NEXT(mat) == NULL) {
8931 last_mbuf = mat;
8932 }
8933 }
8934 if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
8935 sctp_m_freem(op_err);
8936 return;
8937 }
8938 padding_length = chunk_length % 4;
8939 if (padding_length != 0) {
8940 padding_length = 4 - padding_length;
8941 }
8942 if (padding_length != 0) {
8943 if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
8944 sctp_m_freem(op_err);
8945 return;
8946 }
8947 }
8948 sctp_alloc_a_chunk(stcb, chk);
8949 if (chk == NULL) {
8950 /* no memory */
8951 sctp_m_freem(op_err);
8952 return;
8953 }
8954 chk->copy_by_ref = 0;
8956 chk->rec.chunk_id.can_take_data = 0;
8957 chk->flags = 0;
8958 chk->send_size = (uint16_t)chunk_length;
8960 chk->snd_count = 0;
8961 chk->asoc = &stcb->asoc;
8962 chk->data = op_err;
8963 chk->whoTo = NULL;
8964 hdr = mtod(op_err, struct sctp_chunkhdr *);
8966 hdr->chunk_flags = 0;
8967 hdr->chunk_length = htons(chk->send_size);
8968 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8969 chk->asoc->ctrl_queue_cnt++;
8970}
8971
8972int
8974 int offset, int limit,
8975 struct sctp_tcb *stcb,
8976 struct sctp_nets *net)
8977{
8978 /*-
8979 * pull out the cookie and put it at the front of the control chunk
8980 * queue.
8981 */
8982 int at;
8983 struct mbuf *cookie;
8984 struct sctp_paramhdr param, *phdr;
8985 struct sctp_chunkhdr *hdr;
8986 struct sctp_tmit_chunk *chk;
8987 uint16_t ptype, plen;
8988
8990 /* First find the cookie in the param area */
8991 cookie = NULL;
8992 at = offset + sizeof(struct sctp_init_chunk);
8993 for (;;) {
8994 phdr = sctp_get_next_param(m, at, &param, sizeof(param));
8995 if (phdr == NULL) {
8996 return (-3);
8997 }
8998 ptype = ntohs(phdr->param_type);
8999 plen = ntohs(phdr->param_length);
9000 if (plen < sizeof(struct sctp_paramhdr)) {
9001 return (-6);
9002 }
9003 if (ptype == SCTP_STATE_COOKIE) {
9004 int pad;
9005
9006 /* found the cookie */
9007 if (at + plen > limit) {
9008 return (-7);
9009 }
9010 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
9011 if (cookie == NULL) {
9012 /* No memory */
9013 return (-2);
9014 }
9015 if ((pad = (plen % 4)) > 0) {
9016 pad = 4 - pad;
9017 }
9018 if (pad > 0) {
9019 if (sctp_pad_lastmbuf(cookie, pad, NULL) == NULL) {
9020 return (-8);
9021 }
9022 }
9023#ifdef SCTP_MBUF_LOGGING
9024 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9025 sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
9026 }
9027#endif
9028 break;
9029 }
9030 at += SCTP_SIZE32(plen);
9031 }
9032 /* ok, we got the cookie lets change it into a cookie echo chunk */
9033 /* first the change from param to cookie */
9034 hdr = mtod(cookie, struct sctp_chunkhdr *);
9036 hdr->chunk_flags = 0;
9037 /* get the chunk stuff now and place it in the FRONT of the queue */
9038 sctp_alloc_a_chunk(stcb, chk);
9039 if (chk == NULL) {
9040 /* no memory */
9042 return (-5);
9043 }
9044 chk->copy_by_ref = 0;
9046 chk->rec.chunk_id.can_take_data = 0;
9048 chk->send_size = SCTP_SIZE32(plen);
9050 chk->snd_count = 0;
9051 chk->asoc = &stcb->asoc;
9052 chk->data = cookie;
9053 chk->whoTo = net;
9054 atomic_add_int(&chk->whoTo->ref_count, 1);
9055 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9056 chk->asoc->ctrl_queue_cnt++;
9057 return (0);
9058}
9059
9060void
9062 struct mbuf *m,
9063 int offset,
9064 int chk_length,
9065 struct sctp_nets *net)
9066{
9067 /*
9068 * take a HB request and make it into a HB ack and send it.
9069 */
9070 struct mbuf *outchain;
9071 struct sctp_chunkhdr *chdr;
9072 struct sctp_tmit_chunk *chk;
9073
9074 if (net == NULL)
9075 /* must have a net pointer */
9076 return;
9077
9078 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9079 if (outchain == NULL) {
9080 /* gak out of memory */
9081 return;
9082 }
9083#ifdef SCTP_MBUF_LOGGING
9084 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9085 sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
9086 }
9087#endif
9088 chdr = mtod(outchain, struct sctp_chunkhdr *);
9090 chdr->chunk_flags = 0;
9091 if (chk_length % 4 != 0) {
9092 sctp_pad_lastmbuf(outchain, 4 - (chk_length % 4), NULL);
9093 }
9094 sctp_alloc_a_chunk(stcb, chk);
9095 if (chk == NULL) {
9096 /* no memory */
9097 sctp_m_freem(outchain);
9098 return;
9099 }
9100 chk->copy_by_ref = 0;
9102 chk->rec.chunk_id.can_take_data = 1;
9103 chk->flags = 0;
9104 chk->send_size = chk_length;
9106 chk->snd_count = 0;
9107 chk->asoc = &stcb->asoc;
9108 chk->data = outchain;
9109 chk->whoTo = net;
9110 atomic_add_int(&chk->whoTo->ref_count, 1);
9111 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9112 chk->asoc->ctrl_queue_cnt++;
9113}
9114
9115void
9117{
9118 /* formulate and queue a cookie-ack back to sender */
9119 struct mbuf *cookie_ack;
9120 struct sctp_chunkhdr *hdr;
9121 struct sctp_tmit_chunk *chk;
9122
9124
9125 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9126 if (cookie_ack == NULL) {
9127 /* no mbuf's */
9128 return;
9129 }
9131 sctp_alloc_a_chunk(stcb, chk);
9132 if (chk == NULL) {
9133 /* no memory */
9134 sctp_m_freem(cookie_ack);
9135 return;
9136 }
9137 chk->copy_by_ref = 0;
9139 chk->rec.chunk_id.can_take_data = 1;
9140 chk->flags = 0;
9141 chk->send_size = sizeof(struct sctp_chunkhdr);
9143 chk->snd_count = 0;
9144 chk->asoc = &stcb->asoc;
9145 chk->data = cookie_ack;
9146 if (chk->asoc->last_control_chunk_from != NULL) {
9147 chk->whoTo = chk->asoc->last_control_chunk_from;
9148 atomic_add_int(&chk->whoTo->ref_count, 1);
9149 } else {
9150 chk->whoTo = NULL;
9151 }
9152 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9154 hdr->chunk_flags = 0;
9155 hdr->chunk_length = htons(chk->send_size);
9156 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9157 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9158 chk->asoc->ctrl_queue_cnt++;
9159 return;
9160}
9161
9162void
9164{
9165 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9166 struct mbuf *m_shutdown_ack;
9167 struct sctp_shutdown_ack_chunk *ack_cp;
9168 struct sctp_tmit_chunk *chk;
9169
9170 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9171 if (m_shutdown_ack == NULL) {
9172 /* no mbuf's */
9173 return;
9174 }
9175 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9176 sctp_alloc_a_chunk(stcb, chk);
9177 if (chk == NULL) {
9178 /* no memory */
9179 sctp_m_freem(m_shutdown_ack);
9180 return;
9181 }
9182 chk->copy_by_ref = 0;
9184 chk->rec.chunk_id.can_take_data = 1;
9185 chk->flags = 0;
9186 chk->send_size = sizeof(struct sctp_chunkhdr);
9188 chk->snd_count = 0;
9189 chk->asoc = &stcb->asoc;
9190 chk->data = m_shutdown_ack;
9191 chk->whoTo = net;
9192 if (chk->whoTo) {
9193 atomic_add_int(&chk->whoTo->ref_count, 1);
9194 }
9195 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9197 ack_cp->ch.chunk_flags = 0;
9198 ack_cp->ch.chunk_length = htons(chk->send_size);
9199 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9200 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9201 chk->asoc->ctrl_queue_cnt++;
9202 return;
9203}
9204
9205void
9206sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9207{
9208 /* formulate and queue a SHUTDOWN to the sender */
9209 struct mbuf *m_shutdown;
9210 struct sctp_shutdown_chunk *shutdown_cp;
9211 struct sctp_tmit_chunk *chk;
9212
9213 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
9214 if (chk->rec.chunk_id.id == SCTP_SHUTDOWN) {
9215 /* We already have a SHUTDOWN queued. Reuse it. */
9216 if (chk->whoTo) {
9218 chk->whoTo = NULL;
9219 }
9220 break;
9221 }
9222 }
9223 if (chk == NULL) {
9224 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9225 if (m_shutdown == NULL) {
9226 /* no mbuf's */
9227 return;
9228 }
9230 sctp_alloc_a_chunk(stcb, chk);
9231 if (chk == NULL) {
9232 /* no memory */
9233 sctp_m_freem(m_shutdown);
9234 return;
9235 }
9236 chk->copy_by_ref = 0;
9238 chk->rec.chunk_id.can_take_data = 1;
9239 chk->flags = 0;
9240 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9242 chk->snd_count = 0;
9243 chk->asoc = &stcb->asoc;
9244 chk->data = m_shutdown;
9245 chk->whoTo = net;
9246 if (chk->whoTo) {
9247 atomic_add_int(&chk->whoTo->ref_count, 1);
9248 }
9249 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9250 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9251 shutdown_cp->ch.chunk_flags = 0;
9252 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9253 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9254 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9255 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9256 chk->asoc->ctrl_queue_cnt++;
9257 } else {
9258 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, sctp_next);
9259 chk->whoTo = net;
9260 if (chk->whoTo) {
9261 atomic_add_int(&chk->whoTo->ref_count, 1);
9262 }
9263 shutdown_cp = mtod(chk->data, struct sctp_shutdown_chunk *);
9264 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9265 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
9266 }
9267 return;
9268}
9269
9270void
9271sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9272{
9273 /*
9274 * formulate and queue an ASCONF to the peer. ASCONF parameters
9275 * should be queued on the assoc queue.
9276 */
9277 struct sctp_tmit_chunk *chk;
9278 struct mbuf *m_asconf;
9279 int len;
9280
9282
9283 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9285 /* can't send a new one if there is one in flight already */
9286 return;
9287 }
9288
9289 /* compose an ASCONF chunk, maximum length is PMTU */
9290 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9291 if (m_asconf == NULL) {
9292 return;
9293 }
9294
9295 sctp_alloc_a_chunk(stcb, chk);
9296 if (chk == NULL) {
9297 /* no memory */
9298 sctp_m_freem(m_asconf);
9299 return;
9300 }
9301
9302 chk->copy_by_ref = 0;
9303 chk->rec.chunk_id.id = SCTP_ASCONF;
9304 chk->rec.chunk_id.can_take_data = 0;
9306 chk->data = m_asconf;
9307 chk->send_size = len;
9309 chk->snd_count = 0;
9310 chk->asoc = &stcb->asoc;
9311 chk->whoTo = net;
9312 if (chk->whoTo) {
9313 atomic_add_int(&chk->whoTo->ref_count, 1);
9314 }
9315 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9316 chk->asoc->ctrl_queue_cnt++;
9317 return;
9318}
9319
9320void
9322{
9323 /*
9324 * formulate and queue a asconf-ack back to sender. the asconf-ack
9325 * must be stored in the tcb.
9326 */
9327 struct sctp_tmit_chunk *chk;
9328 struct sctp_asconf_ack *ack, *latest_ack;
9329 struct mbuf *m_ack;
9330 struct sctp_nets *net = NULL;
9331
9333 /* Get the latest ASCONF-ACK */
9334 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9335 if (latest_ack == NULL) {
9336 return;
9337 }
9338 if (latest_ack->last_sent_to != NULL &&
9339 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9340 /* we're doing a retransmission */
9342 if (net == NULL) {
9343 /* no alternate */
9344 if (stcb->asoc.last_control_chunk_from == NULL) {
9345 if (stcb->asoc.alternate) {
9346 net = stcb->asoc.alternate;
9347 } else {
9348 net = stcb->asoc.primary_destination;
9349 }
9350 } else {
9351 net = stcb->asoc.last_control_chunk_from;
9352 }
9353 }
9354 } else {
9355 /* normal case */
9356 if (stcb->asoc.last_control_chunk_from == NULL) {
9357 if (stcb->asoc.alternate) {
9358 net = stcb->asoc.alternate;
9359 } else {
9360 net = stcb->asoc.primary_destination;
9361 }
9362 } else {
9363 net = stcb->asoc.last_control_chunk_from;
9364 }
9365 }
9366 latest_ack->last_sent_to = net;
9367
9368 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9369 if (ack->data == NULL) {
9370 continue;
9371 }
9372
9373 /* copy the asconf_ack */
9374 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9375 if (m_ack == NULL) {
9376 /* couldn't copy it */
9377 return;
9378 }
9379#ifdef SCTP_MBUF_LOGGING
9380 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9381 sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
9382 }
9383#endif
9384
9385 sctp_alloc_a_chunk(stcb, chk);
9386 if (chk == NULL) {
9387 /* no memory */
9388 if (m_ack)
9389 sctp_m_freem(m_ack);
9390 return;
9391 }
9392 chk->copy_by_ref = 0;
9394 chk->rec.chunk_id.can_take_data = 1;
9396 chk->whoTo = net;
9397 if (chk->whoTo) {
9398 atomic_add_int(&chk->whoTo->ref_count, 1);
9399 }
9400 chk->data = m_ack;
9401 chk->send_size = ack->len;
9403 chk->snd_count = 0;
9404 chk->asoc = &stcb->asoc;
9405
9406 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9407 chk->asoc->ctrl_queue_cnt++;
9408 }
9409 return;
9410}
9411
9412static int
9414 struct sctp_tcb *stcb,
9415 struct sctp_association *asoc,
9416 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked)
9417{
9418 /*-
9419 * send out one MTU of retransmission. If fast_retransmit is
9420 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9421 * rwnd. For a Cookie or Asconf in the control chunk queue we
9422 * retransmit them by themselves.
9423 *
9424 * For data chunks we will pick out the lowest TSN's in the sent_queue
9425 * marked for resend and bundle them all together (up to a MTU of
9426 * destination). The address to send to should have been
9427 * selected/changed where the retransmission was marked (i.e. in FR
9428 * or t3-timeout routines).
9429 */
9430 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9431 struct sctp_tmit_chunk *chk, *fwd;
9432 struct mbuf *m, *endofchain;
9433 struct sctp_nets *net = NULL;
9434 uint32_t tsns_sent = 0;
9435 int no_fragmentflg, bundle_at, cnt_thru;
9436 unsigned int mtu;
9437 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9438 struct sctp_auth_chunk *auth = NULL;
9439 uint32_t auth_offset = 0;
9440 uint16_t auth_keyid;
9441 int override_ok = 1;
9442 int data_auth_reqd = 0;
9443 uint32_t dmtu = 0;
9444
9446 tmr_started = ctl_cnt = 0;
9447 no_fragmentflg = 1;
9448 fwd_tsn = 0;
9449 *cnt_out = 0;
9450 fwd = NULL;
9451 endofchain = m = NULL;
9452 auth_keyid = stcb->asoc.authinfo.active_keyid;
9453#ifdef SCTP_AUDITING_ENABLED
9454 sctp_audit_log(0xC3, 1);
9455#endif
9456 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9457 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9458 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
9459 asoc->sent_queue_retran_cnt);
9460 asoc->sent_queue_cnt = 0;
9461 asoc->sent_queue_cnt_removeable = 0;
9462 /* send back 0/0 so we enter normal transmission */
9463 *cnt_out = 0;
9464 return (0);
9465 }
9466 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9467 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9468 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9469 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9470 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9471 continue;
9472 }
9473 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9474 if (chk != asoc->str_reset) {
9475 /*
9476 * not eligible for retran if its
9477 * not ours
9478 */
9479 continue;
9480 }
9481 }
9482 ctl_cnt++;
9483 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9484 fwd_tsn = 1;
9485 }
9486 /*
9487 * Add an AUTH chunk, if chunk requires it save the
9488 * offset into the chain for AUTH
9489 */
9490 if ((auth == NULL) &&
9492 stcb->asoc.peer_auth_chunks))) {
9493 m = sctp_add_auth_chunk(m, &endofchain,
9494 &auth, &auth_offset,
9495 stcb,
9496 chk->rec.chunk_id.id);
9497 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9498 }
9499 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9500 break;
9501 }
9502 }
9503 one_chunk = 0;
9504 cnt_thru = 0;
9505 /* do we have control chunks to retransmit? */
9506 if (m != NULL) {
9507 /* Start a timer no matter if we succeed or fail */
9508 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9510 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9512 chk->snd_count++; /* update our count */
9513 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9514 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9515 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9516 no_fragmentflg, 0, 0,
9517 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9518 chk->whoTo->port, NULL,
9519 0, 0,
9520 so_locked))) {
9521 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9522 if (error == ENOBUFS) {
9523 asoc->ifp_had_enobuf = 1;
9524 SCTP_STAT_INCR(sctps_lowlevelerr);
9525 }
9526 return (error);
9527 } else {
9528 asoc->ifp_had_enobuf = 0;
9529 }
9530 endofchain = NULL;
9531 auth = NULL;
9532 auth_offset = 0;
9533 /*
9534 * We don't want to mark the net->sent time here since this
9535 * we use this for HB and retrans cannot measure RTT
9536 */
9537 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9538 *cnt_out += 1;
9539 chk->sent = SCTP_DATAGRAM_SENT;
9541 if (fwd_tsn == 0) {
9542 return (0);
9543 } else {
9544 /* Clean up the fwd-tsn list */
9545 sctp_clean_up_ctl(stcb, asoc, so_locked);
9546 return (0);
9547 }
9548 }
9549 /*
9550 * Ok, it is just data retransmission we need to do or that and a
9551 * fwd-tsn with it all.
9552 */
9553 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9554 return (SCTP_RETRAN_DONE);
9555 }
9558 /* not yet open, resend the cookie and that is it */
9559 return (1);
9560 }
9561#ifdef SCTP_AUDITING_ENABLED
9562 sctp_auditing(20, inp, stcb, NULL);
9563#endif
9565 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9566 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9567 /* No, not sent to this net or not ready for rtx */
9568 continue;
9569 }
9570 if (chk->data == NULL) {
9571 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9572 chk->rec.data.tsn, chk->snd_count, chk->sent);
9573 continue;
9574 }
9575 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9576 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9577 struct mbuf *op_err;
9578 char msg[SCTP_DIAG_INFO_LEN];
9579
9580 SCTP_SNPRINTF(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
9581 chk->rec.data.tsn, chk->snd_count);
9582 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
9583 msg);
9584 atomic_add_int(&stcb->asoc.refcnt, 1);
9585 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err,
9586 false, so_locked);
9587 SCTP_TCB_LOCK(stcb);
9588 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9589 return (SCTP_RETRAN_EXIT);
9590 }
9591 /* pick up the net */
9592 net = chk->whoTo;
9593 switch (net->ro._l_addr.sa.sa_family) {
9594#ifdef INET
9595 case AF_INET:
9596 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9597 break;
9598#endif
9599#ifdef INET6
9600 case AF_INET6:
9601 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9602 break;
9603#endif
9604 default:
9605 /* TSNH */
9606 mtu = net->mtu;
9607 break;
9608 }
9609
9610 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9611 /* No room in peers rwnd */
9612 uint32_t tsn;
9613
9614 tsn = asoc->last_acked_seq + 1;
9615 if (tsn == chk->rec.data.tsn) {
9616 /*
9617 * we make a special exception for this
9618 * case. The peer has no rwnd but is missing
9619 * the lowest chunk.. which is probably what
9620 * is holding up the rwnd.
9621 */
9622 goto one_chunk_around;
9623 }
9624 return (1);
9625 }
9626one_chunk_around:
9627 if (asoc->peers_rwnd < mtu) {
9628 one_chunk = 1;
9629 if ((asoc->peers_rwnd == 0) &&
9630 (asoc->total_flight == 0)) {
9631 chk->window_probe = 1;
9632 chk->whoTo->window_probe = 1;
9633 }
9634 }
9635#ifdef SCTP_AUDITING_ENABLED
9636 sctp_audit_log(0xC3, 2);
9637#endif
9638 bundle_at = 0;
9639 m = NULL;
9640 net->fast_retran_ip = 0;
9641 if (chk->rec.data.doing_fast_retransmit == 0) {
9642 /*
9643 * if no FR in progress skip destination that have
9644 * flight_size > cwnd.
9645 */
9646 if (net->flight_size >= net->cwnd) {
9647 continue;
9648 }
9649 } else {
9650 /*
9651 * Mark the destination net to have FR recovery
9652 * limits put on it.
9653 */
9654 *fr_done = 1;
9655 net->fast_retran_ip = 1;
9656 }
9657
9658 /*
9659 * if no AUTH is yet included and this chunk requires it,
9660 * make sure to account for it. We don't apply the size
9661 * until the AUTH chunk is actually added below in case
9662 * there is no room for this chunk.
9663 */
9664 if (data_auth_reqd && (auth == NULL)) {
9666 } else
9667 dmtu = 0;
9668
9669 if ((chk->send_size <= (mtu - dmtu)) ||
9670 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9671 /* ok we will add this one */
9672 if (data_auth_reqd) {
9673 if (auth == NULL) {
9674 m = sctp_add_auth_chunk(m,
9675 &endofchain,
9676 &auth,
9677 &auth_offset,
9678 stcb,
9679 SCTP_DATA);
9680 auth_keyid = chk->auth_keyid;
9681 override_ok = 0;
9682 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9683 } else if (override_ok) {
9684 auth_keyid = chk->auth_keyid;
9685 override_ok = 0;
9686 } else if (chk->auth_keyid != auth_keyid) {
9687 /* different keyid, so done bundling */
9688 break;
9689 }
9690 }
9691 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9692 if (m == NULL) {
9693 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9694 return (ENOMEM);
9695 }
9696 /* Do clear IP_DF ? */
9697 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9698 no_fragmentflg = 0;
9699 }
9700 /* upate our MTU size */
9701 if (mtu > (chk->send_size + dmtu))
9702 mtu -= (chk->send_size + dmtu);
9703 else
9704 mtu = 0;
9705 data_list[bundle_at++] = chk;
9706 if (one_chunk && (asoc->total_flight <= 0)) {
9707 SCTP_STAT_INCR(sctps_windowprobed);
9708 }
9709 }
9710 if (one_chunk == 0) {
9711 /*
9712 * now are there anymore forward from chk to pick
9713 * up?
9714 */
9715 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
9716 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9717 /* Nope, not for retran */
9718 continue;
9719 }
9720 if (fwd->whoTo != net) {
9721 /* Nope, not the net in question */
9722 continue;
9723 }
9724 if (data_auth_reqd && (auth == NULL)) {
9726 } else
9727 dmtu = 0;
9728 if (fwd->send_size <= (mtu - dmtu)) {
9729 if (data_auth_reqd) {
9730 if (auth == NULL) {
9731 m = sctp_add_auth_chunk(m,
9732 &endofchain,
9733 &auth,
9734 &auth_offset,
9735 stcb,
9736 SCTP_DATA);
9737 auth_keyid = fwd->auth_keyid;
9738 override_ok = 0;
9739 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9740 } else if (override_ok) {
9741 auth_keyid = fwd->auth_keyid;
9742 override_ok = 0;
9743 } else if (fwd->auth_keyid != auth_keyid) {
9744 /*
9745 * different keyid,
9746 * so done bundling
9747 */
9748 break;
9749 }
9750 }
9751 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9752 if (m == NULL) {
9753 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9754 return (ENOMEM);
9755 }
9756 /* Do clear IP_DF ? */
9757 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9758 no_fragmentflg = 0;
9759 }
9760 /* upate our MTU size */
9761 if (mtu > (fwd->send_size + dmtu))
9762 mtu -= (fwd->send_size + dmtu);
9763 else
9764 mtu = 0;
9765 data_list[bundle_at++] = fwd;
9766 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9767 break;
9768 }
9769 } else {
9770 /* can't fit so we are done */
9771 break;
9772 }
9773 }
9774 }
9775 /* Is there something to send for this destination? */
9776 if (m) {
9777 /*
9778 * No matter if we fail/or succeed we should start a
9779 * timer. A failure is like a lost IP packet :-)
9780 */
9782 /*
9783 * no timer running on this destination
9784 * restart it.
9785 */
9786 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9787 tmr_started = 1;
9788 }
9789 /* Now lets send it, if there is anything to send :> */
9790 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9791 (struct sockaddr *)&net->ro._l_addr, m,
9792 auth_offset, auth, auth_keyid,
9793 no_fragmentflg, 0, 0,
9794 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9795 net->port, NULL,
9796 0, 0,
9797 so_locked))) {
9798 /* error, we could not output */
9799 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9800 if (error == ENOBUFS) {
9801 asoc->ifp_had_enobuf = 1;
9802 SCTP_STAT_INCR(sctps_lowlevelerr);
9803 }
9804 return (error);
9805 } else {
9806 asoc->ifp_had_enobuf = 0;
9807 }
9808 endofchain = NULL;
9809 auth = NULL;
9810 auth_offset = 0;
9811 /* For HB's */
9812 /*
9813 * We don't want to mark the net->sent time here
9814 * since this we use this for HB and retrans cannot
9815 * measure RTT
9816 */
9817 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9818
9819 /* For auto-close */
9820 cnt_thru++;
9821 if (*now_filled == 0) {
9823 *now = asoc->time_last_sent;
9824 *now_filled = 1;
9825 } else {
9826 asoc->time_last_sent = *now;
9827 }
9828 *cnt_out += bundle_at;
9829#ifdef SCTP_AUDITING_ENABLED
9830 sctp_audit_log(0xC4, bundle_at);
9831#endif
9832 if (bundle_at) {
9833 tsns_sent = data_list[0]->rec.data.tsn;
9834 }
9835 for (i = 0; i < bundle_at; i++) {
9836 SCTP_STAT_INCR(sctps_sendretransdata);
9837 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9838 /*
9839 * When we have a revoked data, and we
9840 * retransmit it, then we clear the revoked
9841 * flag since this flag dictates if we
9842 * subtracted from the fs
9843 */
9844 if (data_list[i]->rec.data.chunk_was_revoked) {
9845 /* Deflate the cwnd */
9846 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9847 data_list[i]->rec.data.chunk_was_revoked = 0;
9848 }
9849 data_list[i]->snd_count++;
9851 /* record the time */
9852 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9853 if (data_list[i]->book_size_scale) {
9854 /*
9855 * need to double the book size on
9856 * this one
9857 */
9858 data_list[i]->book_size_scale = 0;
9859 /*
9860 * Since we double the booksize, we
9861 * must also double the output queue
9862 * size, since this get shrunk when
9863 * we free by this amount.
9864 */
9865 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9866 data_list[i]->book_size *= 2;
9867 } else {
9868 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9870 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9871 }
9873 (uint32_t)(data_list[i]->send_size +
9874 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9875 }
9876 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9878 data_list[i]->whoTo->flight_size,
9879 data_list[i]->book_size,
9880 (uint32_t)(uintptr_t)data_list[i]->whoTo,
9881 data_list[i]->rec.data.tsn);
9882 }
9883 sctp_flight_size_increase(data_list[i]);
9884 sctp_total_flight_increase(stcb, data_list[i]);
9885 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9886 /* SWS sender side engages */
9887 asoc->peers_rwnd = 0;
9888 }
9889 if ((i == 0) &&
9890 (data_list[i]->rec.data.doing_fast_retransmit)) {
9891 SCTP_STAT_INCR(sctps_sendfastretrans);
9892 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9893 (tmr_started == 0)) {
9894 /*-
9895 * ok we just fast-retrans'd
9896 * the lowest TSN, i.e the
9897 * first on the list. In
9898 * this case we want to give
9899 * some more time to get a
9900 * SACK back without a
9901 * t3-expiring.
9902 */
9903 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9905 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9906 }
9907 }
9908 }
9909 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9910 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9911 }
9912#ifdef SCTP_AUDITING_ENABLED
9913 sctp_auditing(21, inp, stcb, NULL);
9914#endif
9915 } else {
9916 /* None will fit */
9917 return (1);
9918 }
9919 if (asoc->sent_queue_retran_cnt <= 0) {
9920 /* all done we have no more to retran */
9921 asoc->sent_queue_retran_cnt = 0;
9922 break;
9923 }
9924 if (one_chunk) {
9925 /* No more room in rwnd */
9926 return (1);
9927 }
9928 /* stop the for loop here. we sent out a packet */
9929 break;
9930 }
9931 return (0);
9932}
9933
9934static void
9936 struct sctp_tcb *stcb,
9937 struct sctp_association *asoc)
9938{
9939 struct sctp_nets *net;
9940
9941 /* Validate that a timer is running somewhere */
9942 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9944 /* Here is a timer */
9945 return;
9946 }
9947 }
9949 /* Gak, we did not have a timer somewhere */
9950 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9951 if (asoc->alternate) {
9953 } else {
9955 }
9956 return;
9957}
9958
9959void
9961 struct sctp_tcb *stcb,
9962 int from_where,
9963 int so_locked)
9964{
9965 /*-
9966 * Ok this is the generic chunk service queue. we must do the
9967 * following:
9968 * - See if there are retransmits pending, if so we must
9969 * do these first.
9970 * - Service the stream queue that is next, moving any
9971 * message (note I must get a complete message i.e.
9972 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9973 * TSN's
9974 * - Check to see if the cwnd/rwnd allows any output, if so we
9975 * go ahead and fomulate and send the low level chunks. Making sure
9976 * to combine any control in the control chunk queue also.
9977 */
9978 struct sctp_association *asoc;
9979 struct sctp_nets *net;
9980 int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
9981 unsigned int burst_cnt = 0;
9982 struct timeval now;
9983 int now_filled = 0;
9984 int nagle_on;
9985 uint32_t frag_point = sctp_get_frag_point(stcb);
9986 int un_sent = 0;
9987 int fr_done;
9988 unsigned int tot_frs = 0;
9989
9990 asoc = &stcb->asoc;
9991do_it_again:
9992 /* The Nagle algorithm is only applied when handling a send call. */
9993 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
9995 nagle_on = 0;
9996 } else {
9997 nagle_on = 1;
9998 }
9999 } else {
10000 nagle_on = 0;
10001 }
10003
10004 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
10005
10006 if ((un_sent <= 0) &&
10007 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
10008 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
10009 (asoc->sent_queue_retran_cnt == 0) &&
10010 (asoc->trigger_reset == 0)) {
10011 /* Nothing to do unless there is something to be sent left */
10012 return;
10013 }
10014 /*
10015 * Do we have something to send, data or control AND a sack timer
10016 * running, if so piggy-back the sack.
10017 */
10019 sctp_send_sack(stcb, so_locked);
10020 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
10022 }
10023 while (asoc->sent_queue_retran_cnt) {
10024 /*-
10025 * Ok, it is retransmission time only, we send out only ONE
10026 * packet with a single call off to the retran code.
10027 */
10028 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10029 /*-
10030 * Special hook for handling cookiess discarded
10031 * by peer that carried data. Send cookie-ack only
10032 * and then the next call with get the retran's.
10033 */
10034 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10035 from_where,
10036 &now, &now_filled, frag_point, so_locked);
10037 return;
10038 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10039 /* if its not from a HB then do it */
10040 fr_done = 0;
10041 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10042 if (fr_done) {
10043 tot_frs++;
10044 }
10045 } else {
10046 /*
10047 * its from any other place, we don't allow retran
10048 * output (only control)
10049 */
10050 ret = 1;
10051 }
10052 if (ret > 0) {
10053 /* Can't send anymore */
10054 /*-
10055 * now lets push out control by calling med-level
10056 * output once. this assures that we WILL send HB's
10057 * if queued too.
10058 */
10059 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10060 from_where,
10061 &now, &now_filled, frag_point, so_locked);
10062#ifdef SCTP_AUDITING_ENABLED
10063 sctp_auditing(8, inp, stcb, NULL);
10064#endif
10065 sctp_timer_validation(inp, stcb, asoc);
10066 return;
10067 }
10068 if (ret < 0) {
10069 /*-
10070 * The count was off.. retran is not happening so do
10071 * the normal retransmission.
10072 */
10073#ifdef SCTP_AUDITING_ENABLED
10074 sctp_auditing(9, inp, stcb, NULL);
10075#endif
10076 if (ret == SCTP_RETRAN_EXIT) {
10077 return;
10078 }
10079 break;
10080 }
10081 if (from_where == SCTP_OUTPUT_FROM_T3) {
10082 /* Only one transmission allowed out of a timeout */
10083#ifdef SCTP_AUDITING_ENABLED
10084 sctp_auditing(10, inp, stcb, NULL);
10085#endif
10086 /* Push out any control */
10087 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10088 &now, &now_filled, frag_point, so_locked);
10089 return;
10090 }
10091 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10092 /* Hit FR burst limit */
10093 return;
10094 }
10095 if ((num_out == 0) && (ret == 0)) {
10096 /* No more retrans to send */
10097 break;
10098 }
10099 }
10100#ifdef SCTP_AUDITING_ENABLED
10101 sctp_auditing(12, inp, stcb, NULL);
10102#endif
10103 /* Check for bad destinations, if they exist move chunks around. */
10104 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10105 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10106 /*-
10107 * if possible move things off of this address we
10108 * still may send below due to the dormant state but
10109 * we try to find an alternate address to send to
10110 * and if we have one we move all queued data on the
10111 * out wheel to this alternate address.
10112 */
10113 if (net->ref_count > 1)
10114 sctp_move_chunks_from_net(stcb, net);
10115 } else {
10116 /*-
10117 * if ((asoc->sat_network) || (net->addr_is_local))
10118 * { burst_limit = asoc->max_burst *
10119 * SCTP_SAT_NETWORK_BURST_INCR; }
10120 */
10121 if (asoc->max_burst > 0) {
10122 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10123 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10124 /*
10125 * JRS - Use the congestion
10126 * control given in the
10127 * congestion control module
10128 */
10130 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10132 }
10133 SCTP_STAT_INCR(sctps_maxburstqueued);
10134 }
10135 net->fast_retran_ip = 0;
10136 } else {
10137 if (net->flight_size == 0) {
10138 /*
10139 * Should be decaying the
10140 * cwnd here
10141 */
10142 ;
10143 }
10144 }
10145 }
10146 }
10147 }
10148 burst_cnt = 0;
10149 do {
10150 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10151 &reason_code, 0, from_where,
10152 &now, &now_filled, frag_point, so_locked);
10153 if (error) {
10154 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10155 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10156 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10157 }
10158 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10159 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10160 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10161 }
10162 break;
10163 }
10164 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10165
10166 tot_out += num_out;
10167 burst_cnt++;
10168 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10169 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10170 if (num_out == 0) {
10171 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10172 }
10173 }
10174 if (nagle_on) {
10175 /*
10176 * When the Nagle algorithm is used, look at how
10177 * much is unsent, then if its smaller than an MTU
10178 * and we have data in flight we stop, except if we
10179 * are handling a fragmented user message.
10180 */
10181 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
10182 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10183 (stcb->asoc.total_flight > 0)) {
10184/* && sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/
10185 break;
10186 }
10187 }
10188 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10189 TAILQ_EMPTY(&asoc->send_queue) &&
10190 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
10191 /* Nothing left to send */
10192 break;
10193 }
10194 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10195 /* Nothing left to send */
10196 break;
10197 }
10198 } while (num_out &&
10199 ((asoc->max_burst == 0) ||
10200 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10201 (burst_cnt < asoc->max_burst)));
10202
10203 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10204 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10205 SCTP_STAT_INCR(sctps_maxburstqueued);
10206 asoc->burst_limit_applied = 1;
10207 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10209 }
10210 } else {
10211 asoc->burst_limit_applied = 0;
10212 }
10213 }
10214 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10215 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10216 }
10217 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10218 tot_out);
10219
10220 /*-
10221 * Now we need to clean up the control chunk chain if a ECNE is on
10222 * it. It must be marked as UNSENT again so next call will continue
10223 * to send it until such time that we get a CWR, to remove it.
10224 */
10225 if (stcb->asoc.ecn_echo_cnt_onq)
10226 sctp_fix_ecn_echo(asoc);
10227
10228 if (stcb->asoc.trigger_reset) {
10229 if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0) {
10230 goto do_it_again;
10231 }
10232 }
10233 return;
10234}
10235
10236int
10238 struct sctp_inpcb *inp,
10239 struct mbuf *m,
10240 struct sockaddr *addr,
10241 struct mbuf *control,
10242 struct thread *p,
10243 int flags)
10244{
10245 if (inp == NULL) {
10246 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10247 return (EINVAL);
10248 }
10249
10250 if (inp->sctp_socket == NULL) {
10251 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10252 return (EINVAL);
10253 }
10254 return (sctp_sosend(inp->sctp_socket,
10255 addr,
10256 (struct uio *)NULL,
10257 m,
10258 control,
10259 flags, p
10260 ));
10261}
10262
10263void
10265 struct sctp_association *asoc)
10266{
10267 struct sctp_tmit_chunk *chk, *at, *tp1, *last;
10268 struct sctp_forward_tsn_chunk *fwdtsn;
10269 struct sctp_strseq *strseq;
10270 struct sctp_strseq_mid *strseq_m;
10271 uint32_t advance_peer_ack_point;
10272 unsigned int cnt_of_space, i, ovh;
10273 unsigned int space_needed;
10274 unsigned int cnt_of_skipped = 0;
10275
10277 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10278 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10279 /* mark it to unsent */
10281 chk->snd_count = 0;
10282 /* Do we correct its output location? */
10283 if (chk->whoTo) {
10285 chk->whoTo = NULL;
10286 }
10287 goto sctp_fill_in_rest;
10288 }
10289 }
10290 /* Ok if we reach here we must build one */
10291 sctp_alloc_a_chunk(stcb, chk);
10292 if (chk == NULL) {
10293 return;
10294 }
10295 asoc->fwd_tsn_cnt++;
10296 chk->copy_by_ref = 0;
10297 /*
10298 * We don't do the old thing here since this is used not for on-wire
10299 * but to tell if we are sending a fwd-tsn by the stack during
10300 * output. And if its a IFORWARD or a FORWARD it is a fwd-tsn.
10301 */
10303 chk->rec.chunk_id.can_take_data = 0;
10304 chk->flags = 0;
10305 chk->asoc = asoc;
10306 chk->whoTo = NULL;
10307 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10308 if (chk->data == NULL) {
10310 return;
10311 }
10314 chk->snd_count = 0;
10315 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10316 asoc->ctrl_queue_cnt++;
10317sctp_fill_in_rest:
10318 /*-
10319 * Here we go through and fill out the part that deals with
10320 * stream/seq of the ones we skip.
10321 */
10322 SCTP_BUF_LEN(chk->data) = 0;
10323 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10324 if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10325 (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10326 /* no more to look at */
10327 break;
10328 }
10329 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10330 /* We don't report these */
10331 continue;
10332 }
10333 cnt_of_skipped++;
10334 }
10335 if (asoc->idata_supported) {
10336 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10337 (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
10338 } else {
10339 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10340 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10341 }
10342 cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
10343
10345 ovh = SCTP_MIN_OVERHEAD;
10346 } else {
10348 }
10349 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10350 /* trim to a mtu size */
10351 cnt_of_space = asoc->smallest_mtu - ovh;
10352 }
10353 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10355 0xff, 0, cnt_of_skipped,
10357 }
10358 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10359 if (cnt_of_space < space_needed) {
10360 /*-
10361 * ok we must trim down the chunk by lowering the
10362 * advance peer ack point.
10363 */
10364 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10366 0xff, 0xff, cnt_of_space,
10367 space_needed);
10368 }
10369 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10370 if (asoc->idata_supported) {
10371 cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
10372 } else {
10373 cnt_of_skipped /= sizeof(struct sctp_strseq);
10374 }
10375 /*-
10376 * Go through and find the TSN that will be the one
10377 * we report.
10378 */
10379 at = TAILQ_FIRST(&asoc->sent_queue);
10380 if (at != NULL) {
10381 for (i = 0; i < cnt_of_skipped; i++) {
10382 tp1 = TAILQ_NEXT(at, sctp_next);
10383 if (tp1 == NULL) {
10384 break;
10385 }
10386 at = tp1;
10387 }
10388 }
10389 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10391 0xff, cnt_of_skipped, at->rec.data.tsn,
10393 }
10394 last = at;
10395 /*-
10396 * last now points to last one I can report, update
10397 * peer ack point
10398 */
10399 if (last) {
10400 advance_peer_ack_point = last->rec.data.tsn;
10401 }
10402 if (asoc->idata_supported) {
10403 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10404 cnt_of_skipped * sizeof(struct sctp_strseq_mid);
10405 } else {
10406 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10407 cnt_of_skipped * sizeof(struct sctp_strseq);
10408 }
10409 }
10410 chk->send_size = space_needed;
10411 /* Setup the chunk */
10412 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10413 fwdtsn->ch.chunk_length = htons(chk->send_size);
10414 fwdtsn->ch.chunk_flags = 0;
10415 if (asoc->idata_supported) {
10417 } else {
10419 }
10420 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10421 SCTP_BUF_LEN(chk->data) = chk->send_size;
10422 fwdtsn++;
10423 /*-
10424 * Move pointer to after the fwdtsn and transfer to the
10425 * strseq pointer.
10426 */
10427 if (asoc->idata_supported) {
10428 strseq_m = (struct sctp_strseq_mid *)fwdtsn;
10429 strseq = NULL;
10430 } else {
10431 strseq = (struct sctp_strseq *)fwdtsn;
10432 strseq_m = NULL;
10433 }
10434 /*-
10435 * Now populate the strseq list. This is done blindly
10436 * without pulling out duplicate stream info. This is
10437 * inefficient but won't harm the process since the peer will
10438 * look at these in sequence and will thus release anything.
10439 * It could mean we exceed the PMTU and chop off some that
10440 * we could have included.. but this is unlikely (aka 1432/4
10441 * would mean 300+ stream seq's would have to be reported in
10442 * one FWD-TSN. With a bit of work we can later FIX this to
10443 * optimize and pull out duplicates.. but it does add more
10444 * overhead. So for now... not!
10445 */
10446 i = 0;
10447 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10448 if (i >= cnt_of_skipped) {
10449 break;
10450 }
10451 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10452 /* We don't report these */
10453 continue;
10454 }
10455 if (at->rec.data.tsn == advance_peer_ack_point) {
10456 at->rec.data.fwd_tsn_cnt = 0;
10457 }
10458 if (asoc->idata_supported) {
10459 strseq_m->sid = htons(at->rec.data.sid);
10461 strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG);
10462 } else {
10463 strseq_m->flags = 0;
10464 }
10465 strseq_m->mid = htonl(at->rec.data.mid);
10466 strseq_m++;
10467 } else {
10468 strseq->sid = htons(at->rec.data.sid);
10469 strseq->ssn = htons((uint16_t)at->rec.data.mid);
10470 strseq++;
10471 }
10472 i++;
10473 }
10474 return;
10475}
10476
10477void
10478sctp_send_sack(struct sctp_tcb *stcb, int so_locked)
10479{
10480 /*-
10481 * Queue up a SACK or NR-SACK in the control queue.
10482 * We must first check to see if a SACK or NR-SACK is
10483 * somehow on the control queue.
10484 * If so, we will take and and remove the old one.
10485 */
10486 struct sctp_association *asoc;
10487 struct sctp_tmit_chunk *chk, *a_chk;
10488 struct sctp_sack_chunk *sack;
10490 struct sctp_gap_ack_block *gap_descriptor;
10491 const struct sack_track *selector;
10492 int mergeable = 0;
10493 int offset;
10494 caddr_t limit;
10495 uint32_t *dup;
10496 int limit_reached = 0;
10497 unsigned int i, siz, j;
10498 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10499 int num_dups = 0;
10500 int space_req;
10501 uint32_t highest_tsn;
10502 uint8_t flags;
10503 uint8_t type;
10504 uint8_t tsn_map;
10505
10506 if (stcb->asoc.nrsack_supported == 1) {
10507 type = SCTP_NR_SELECTIVE_ACK;
10508 } else {
10509 type = SCTP_SELECTIVE_ACK;
10510 }
10511 a_chk = NULL;
10512 asoc = &stcb->asoc;
10514 if (asoc->last_data_chunk_from == NULL) {
10515 /* Hmm we never received anything */
10516 return;
10517 }
10519 sctp_set_rwnd(stcb, asoc);
10520 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10521 if (chk->rec.chunk_id.id == type) {
10522 /* Hmm, found a sack already on queue, remove it */
10523 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10524 asoc->ctrl_queue_cnt--;
10525 a_chk = chk;
10526 if (a_chk->data) {
10527 sctp_m_freem(a_chk->data);
10528 a_chk->data = NULL;
10529 }
10530 if (a_chk->whoTo) {
10532 a_chk->whoTo = NULL;
10533 }
10534 break;
10535 }
10536 }
10537 if (a_chk == NULL) {
10538 sctp_alloc_a_chunk(stcb, a_chk);
10539 if (a_chk == NULL) {
10540 /* No memory so we drop the idea, and set a timer */
10541 if (stcb->asoc.delayed_ack) {
10543 stcb->sctp_ep, stcb, NULL,
10546 stcb->sctp_ep, stcb, NULL);
10547 } else {
10548 stcb->asoc.send_sack = 1;
10549 }
10550 return;
10551 }
10552 a_chk->copy_by_ref = 0;
10553 a_chk->rec.chunk_id.id = type;
10554 a_chk->rec.chunk_id.can_take_data = 1;
10555 }
10556 /* Clear our pkt counts */
10557 asoc->data_pkts_seen = 0;
10558
10559 a_chk->flags = 0;
10560 a_chk->asoc = asoc;
10561 a_chk->snd_count = 0;
10562 a_chk->send_size = 0; /* fill in later */
10563 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10564 a_chk->whoTo = NULL;
10565
10567 /*-
10568 * Ok, the destination for the SACK is unreachable, lets see if
10569 * we can select an alternate to asoc->last_data_chunk_from
10570 */
10571 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10572 if (a_chk->whoTo == NULL) {
10573 /* Nope, no alternate */
10574 a_chk->whoTo = asoc->last_data_chunk_from;
10575 }
10576 } else {
10577 a_chk->whoTo = asoc->last_data_chunk_from;
10578 }
10579 if (a_chk->whoTo) {
10580 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10581 }
10583 highest_tsn = asoc->highest_tsn_inside_map;
10584 } else {
10585 highest_tsn = asoc->highest_tsn_inside_nr_map;
10586 }
10587 if (highest_tsn == asoc->cumulative_tsn) {
10588 /* no gaps */
10589 if (type == SCTP_SELECTIVE_ACK) {
10590 space_req = sizeof(struct sctp_sack_chunk);
10591 } else {
10592 space_req = sizeof(struct sctp_nr_sack_chunk);
10593 }
10594 } else {
10595 /* gaps get a cluster */
10596 space_req = MCLBYTES;
10597 }
10598 /* Ok now lets formulate a MBUF with our sack */
10599 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
10600 if ((a_chk->data == NULL) ||
10601 (a_chk->whoTo == NULL)) {
10602 /* rats, no mbuf memory */
10603 if (a_chk->data) {
10604 /* was a problem with the destination */
10605 sctp_m_freem(a_chk->data);
10606 a_chk->data = NULL;
10607 }
10608 sctp_free_a_chunk(stcb, a_chk, so_locked);
10609 /* sa_ignore NO_NULL_CHK */
10610 if (stcb->asoc.delayed_ack) {
10612 stcb->sctp_ep, stcb, NULL,
10615 stcb->sctp_ep, stcb, NULL);
10616 } else {
10617 stcb->asoc.send_sack = 1;
10618 }
10619 return;
10620 }
10621 /* ok, lets go through and fill it in */
10623 space = (unsigned int)M_TRAILINGSPACE(a_chk->data);
10624 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10625 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10626 }
10627 limit = mtod(a_chk->data, caddr_t);
10628 limit += space;
10629
10630 flags = 0;
10631
10632 if ((asoc->sctp_cmt_on_off > 0) &&
10633 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10634 /*-
10635 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10636 * received, then set high bit to 1, else 0. Reset
10637 * pkts_rcvd.
10638 */
10639 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10640 asoc->cmt_dac_pkts_rcvd = 0;
10641 }
10642#ifdef SCTP_ASOCLOG_OF_TSNS
10643 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10644 stcb->asoc.cumack_log_atsnt++;
10645 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10646 stcb->asoc.cumack_log_atsnt = 0;
10647 }
10648#endif
10649 /* reset the readers interpretation */
10650 stcb->freed_by_sorcv_sincelast = 0;
10651
10652 if (type == SCTP_SELECTIVE_ACK) {
10653 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10654 nr_sack = NULL;
10655 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10656 if (highest_tsn > asoc->mapping_array_base_tsn) {
10657 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10658 } else {
10659 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + highest_tsn + 7) / 8;
10660 }
10661 } else {
10662 sack = NULL;
10663 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10664 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10666 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10667 } else {
10668 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10669 }
10670 }
10671
10673 offset = 1;
10674 } else {
10675 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10676 }
10677 if (((type == SCTP_SELECTIVE_ACK) &&
10678 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10679 ((type == SCTP_NR_SELECTIVE_ACK) &&
10681 /* we have a gap .. maybe */
10682 for (i = 0; i < siz; i++) {
10683 tsn_map = asoc->mapping_array[i];
10684 if (type == SCTP_SELECTIVE_ACK) {
10685 tsn_map |= asoc->nr_mapping_array[i];
10686 }
10687 if (i == 0) {
10688 /*
10689 * Clear all bits corresponding to TSNs
10690 * smaller or equal to the cumulative TSN.
10691 */
10692 tsn_map &= (~0U << (1 - offset));
10693 }
10694 selector = &sack_array[tsn_map];
10695 if (mergeable && selector->right_edge) {
10696 /*
10697 * Backup, left and right edges were ok to
10698 * merge.
10699 */
10700 num_gap_blocks--;
10701 gap_descriptor--;
10702 }
10703 if (selector->num_entries == 0)
10704 mergeable = 0;
10705 else {
10706 for (j = 0; j < selector->num_entries; j++) {
10707 if (mergeable && selector->right_edge) {
10708 /*
10709 * do a merge by NOT setting
10710 * the left side
10711 */
10712 mergeable = 0;
10713 } else {
10714 /*
10715 * no merge, set the left
10716 * side
10717 */
10718 mergeable = 0;
10719 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10720 }
10721 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10722 num_gap_blocks++;
10723 gap_descriptor++;
10724 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10725 /* no more room */
10726 limit_reached = 1;
10727 break;
10728 }
10729 }
10730 if (selector->left_edge) {
10731 mergeable = 1;
10732 }
10733 }
10734 if (limit_reached) {
10735 /* Reached the limit stop */
10736 break;
10737 }
10738 offset += 8;
10739 }
10740 }
10741 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10742 (limit_reached == 0)) {
10743 mergeable = 0;
10744
10746 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10747 } else {
10748 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10749 }
10750
10752 offset = 1;
10753 } else {
10754 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10755 }
10757 /* we have a gap .. maybe */
10758 for (i = 0; i < siz; i++) {
10759 tsn_map = asoc->nr_mapping_array[i];
10760 if (i == 0) {
10761 /*
10762 * Clear all bits corresponding to
10763 * TSNs smaller or equal to the
10764 * cumulative TSN.
10765 */
10766 tsn_map &= (~0U << (1 - offset));
10767 }
10768 selector = &sack_array[tsn_map];
10769 if (mergeable && selector->right_edge) {
10770 /*
10771 * Backup, left and right edges were
10772 * ok to merge.
10773 */
10774 num_nr_gap_blocks--;
10775 gap_descriptor--;
10776 }
10777 if (selector->num_entries == 0)
10778 mergeable = 0;
10779 else {
10780 for (j = 0; j < selector->num_entries; j++) {
10781 if (mergeable && selector->right_edge) {
10782 /*
10783 * do a merge by NOT
10784 * setting the left
10785 * side
10786 */
10787 mergeable = 0;
10788 } else {
10789 /*
10790 * no merge, set the
10791 * left side
10792 */
10793 mergeable = 0;
10794 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10795 }
10796 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10797 num_nr_gap_blocks++;
10798 gap_descriptor++;
10799 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10800 /* no more room */
10801 limit_reached = 1;
10802 break;
10803 }
10804 }
10805 if (selector->left_edge) {
10806 mergeable = 1;
10807 }
10808 }
10809 if (limit_reached) {
10810 /* Reached the limit stop */
10811 break;
10812 }
10813 offset += 8;
10814 }
10815 }
10816 }
10817 /* now we must add any dups we are going to report. */
10818 if ((limit_reached == 0) && (asoc->numduptsns)) {
10819 dup = (uint32_t *)gap_descriptor;
10820 for (i = 0; i < asoc->numduptsns; i++) {
10821 *dup = htonl(asoc->dup_tsns[i]);
10822 dup++;
10823 num_dups++;
10824 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10825 /* no more room */
10826 break;
10827 }
10828 }
10829 asoc->numduptsns = 0;
10830 }
10831 /*
10832 * now that the chunk is prepared queue it to the control chunk
10833 * queue.
10834 */
10835 if (type == SCTP_SELECTIVE_ACK) {
10836 a_chk->send_size = (uint16_t)(sizeof(struct sctp_sack_chunk) +
10837 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10838 num_dups * sizeof(int32_t));
10839 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10840 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10841 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10842 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10843 sack->sack.num_dup_tsns = htons(num_dups);
10844 sack->ch.chunk_type = type;
10845 sack->ch.chunk_flags = flags;
10846 sack->ch.chunk_length = htons(a_chk->send_size);
10847 } else {
10848 a_chk->send_size = (uint16_t)(sizeof(struct sctp_nr_sack_chunk) +
10849 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10850 num_dups * sizeof(int32_t));
10851 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10852 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10853 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10854 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10855 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10856 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10857 nr_sack->nr_sack.reserved = 0;
10858 nr_sack->ch.chunk_type = type;
10859 nr_sack->ch.chunk_flags = flags;
10860 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10861 }
10862 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10863 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10864 asoc->ctrl_queue_cnt++;
10865 asoc->send_sack = 0;
10866 SCTP_STAT_INCR(sctps_sendsacks);
10867 return;
10868}
10869
10870void
10871sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked)
10872{
10873 struct mbuf *m_abort, *m, *m_last;
10874 struct mbuf *m_out, *m_end = NULL;
10875 struct sctp_abort_chunk *abort;
10876 struct sctp_auth_chunk *auth = NULL;
10877 struct sctp_nets *net;
10878 uint32_t vtag;
10879 uint32_t auth_offset = 0;
10880 int error;
10881 uint16_t cause_len, chunk_len, padding_len;
10882
10884 /*-
10885 * Add an AUTH chunk, if chunk requires it and save the offset into
10886 * the chain for AUTH
10887 */
10889 stcb->asoc.peer_auth_chunks)) {
10890 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
10892 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10893 } else {
10894 m_out = NULL;
10895 }
10896 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
10897 if (m_abort == NULL) {
10898 if (m_out) {
10899 sctp_m_freem(m_out);
10900 }
10901 if (operr) {
10902 sctp_m_freem(operr);
10903 }
10904 return;
10905 }
10906 /* link in any error */
10907 SCTP_BUF_NEXT(m_abort) = operr;
10908 cause_len = 0;
10909 m_last = NULL;
10910 for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
10911 cause_len += (uint16_t)SCTP_BUF_LEN(m);
10912 if (SCTP_BUF_NEXT(m) == NULL) {
10913 m_last = m;
10914 }
10915 }
10916 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
10917 chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
10918 padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
10919 if (m_out == NULL) {
10920 /* NO Auth chunk prepended, so reserve space in front */
10922 m_out = m_abort;
10923 } else {
10924 /* Put AUTH chunk at the front of the chain */
10925 SCTP_BUF_NEXT(m_end) = m_abort;
10926 }
10927 if (stcb->asoc.alternate) {
10928 net = stcb->asoc.alternate;
10929 } else {
10930 net = stcb->asoc.primary_destination;
10931 }
10932 /* Fill in the ABORT chunk header. */
10933 abort = mtod(m_abort, struct sctp_abort_chunk *);
10935 if (stcb->asoc.peer_vtag == 0) {
10936 /* This happens iff the assoc is in COOKIE-WAIT state. */
10937 vtag = stcb->asoc.my_vtag;
10939 } else {
10940 vtag = stcb->asoc.peer_vtag;
10941 abort->ch.chunk_flags = 0;
10942 }
10943 abort->ch.chunk_length = htons(chunk_len);
10944 /* Add padding, if necessary. */
10945 if (padding_len > 0) {
10946 if ((m_last == NULL) ||
10947 (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
10948 sctp_m_freem(m_out);
10949 return;
10950 }
10951 }
10952 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10953 (struct sockaddr *)&net->ro._l_addr,
10954 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
10955 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
10956 stcb->asoc.primary_destination->port, NULL,
10957 0, 0,
10958 so_locked))) {
10959 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10960 if (error == ENOBUFS) {
10961 stcb->asoc.ifp_had_enobuf = 1;
10962 SCTP_STAT_INCR(sctps_lowlevelerr);
10963 }
10964 } else {
10965 stcb->asoc.ifp_had_enobuf = 0;
10966 }
10967 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10968}
10969
10970void
10972 struct sctp_nets *net,
10973 int reflect_vtag)
10974{
10975 /* formulate and SEND a SHUTDOWN-COMPLETE */
10976 struct mbuf *m_shutdown_comp;
10977 struct sctp_shutdown_complete_chunk *shutdown_complete;
10978 uint32_t vtag;
10979 int error;
10980 uint8_t flags;
10981
10982 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
10983 if (m_shutdown_comp == NULL) {
10984 /* no mbuf's */
10985 return;
10986 }
10987 if (reflect_vtag) {
10988 flags = SCTP_HAD_NO_TCB;
10989 vtag = stcb->asoc.my_vtag;
10990 } else {
10991 flags = 0;
10992 vtag = stcb->asoc.peer_vtag;
10993 }
10994 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
10995 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10996 shutdown_complete->ch.chunk_flags = flags;
10997 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10998 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
10999 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11000 (struct sockaddr *)&net->ro._l_addr,
11001 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
11002 stcb->sctp_ep->sctp_lport, stcb->rport,
11003 htonl(vtag),
11004 net->port, NULL,
11005 0, 0,
11007 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11008 if (error == ENOBUFS) {
11009 stcb->asoc.ifp_had_enobuf = 1;
11010 SCTP_STAT_INCR(sctps_lowlevelerr);
11011 }
11012 } else {
11013 stcb->asoc.ifp_had_enobuf = 0;
11014 }
11015 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11016 return;
11017}
11018
11019static void
11020sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11021 struct sctphdr *sh, uint32_t vtag,
11022 uint8_t type, struct mbuf *cause,
11023 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11024 uint32_t vrf_id, uint16_t port)
11025{
11026 struct mbuf *o_pak;
11027 struct mbuf *mout;
11028 struct sctphdr *shout;
11029 struct sctp_chunkhdr *ch;
11030#if defined(INET) || defined(INET6)
11031 struct udphdr *udp;
11032#endif
11033 int ret, len, cause_len, padding_len;
11034#ifdef INET
11035 struct sockaddr_in *src_sin, *dst_sin;
11036 struct ip *ip;
11037#endif
11038#ifdef INET6
11039 struct sockaddr_in6 *src_sin6, *dst_sin6;
11040 struct ip6_hdr *ip6;
11041#endif
11042
11043 /* Compute the length of the cause and add final padding. */
11044 cause_len = 0;
11045 if (cause != NULL) {
11046 struct mbuf *m_at, *m_last = NULL;
11047
11048 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
11049 if (SCTP_BUF_NEXT(m_at) == NULL)
11050 m_last = m_at;
11051 cause_len += SCTP_BUF_LEN(m_at);
11052 }
11053 padding_len = cause_len % 4;
11054 if (padding_len != 0) {
11055 padding_len = 4 - padding_len;
11056 }
11057 if (padding_len != 0) {
11058 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
11059 sctp_m_freem(cause);
11060 return;
11061 }
11062 }
11063 } else {
11064 padding_len = 0;
11065 }
11066 /* Get an mbuf for the header. */
11067 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11068 switch (dst->sa_family) {
11069#ifdef INET
11070 case AF_INET:
11071 len += sizeof(struct ip);
11072 break;
11073#endif
11074#ifdef INET6
11075 case AF_INET6:
11076 len += sizeof(struct ip6_hdr);
11077 break;
11078#endif
11079 default:
11080 break;
11081 }
11082#if defined(INET) || defined(INET6)
11083 if (port) {
11084 len += sizeof(struct udphdr);
11085 }
11086#endif
11087 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11088 if (mout == NULL) {
11089 if (cause) {
11090 sctp_m_freem(cause);
11091 }
11092 return;
11093 }
11094 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11095 SCTP_BUF_LEN(mout) = len;
11096 SCTP_BUF_NEXT(mout) = cause;
11097 M_SETFIB(mout, fibnum);
11098 mout->m_pkthdr.flowid = mflowid;
11099 M_HASHTYPE_SET(mout, mflowtype);
11100#ifdef INET
11101 ip = NULL;
11102#endif
11103#ifdef INET6
11104 ip6 = NULL;
11105#endif
11106 switch (dst->sa_family) {
11107#ifdef INET
11108 case AF_INET:
11109 src_sin = (struct sockaddr_in *)src;
11110 dst_sin = (struct sockaddr_in *)dst;
11111 ip = mtod(mout, struct ip *);
11112 ip->ip_v = IPVERSION;
11113 ip->ip_hl = (sizeof(struct ip) >> 2);
11114 ip->ip_tos = 0;
11115 ip->ip_off = htons(IP_DF);
11116 ip_fillid(ip);
11117 ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11118 if (port) {
11119 ip->ip_p = IPPROTO_UDP;
11120 } else {
11121 ip->ip_p = IPPROTO_SCTP;
11122 }
11123 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11124 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11125 ip->ip_sum = 0;
11126 len = sizeof(struct ip);
11127 shout = (struct sctphdr *)((caddr_t)ip + len);
11128 break;
11129#endif
11130#ifdef INET6
11131 case AF_INET6:
11132 src_sin6 = (struct sockaddr_in6 *)src;
11133 dst_sin6 = (struct sockaddr_in6 *)dst;
11134 ip6 = mtod(mout, struct ip6_hdr *);
11135 ip6->ip6_flow = htonl(0x60000000);
11136 if (V_ip6_auto_flowlabel) {
11137 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11138 }
11139 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11140 if (port) {
11141 ip6->ip6_nxt = IPPROTO_UDP;
11142 } else {
11143 ip6->ip6_nxt = IPPROTO_SCTP;
11144 }
11145 ip6->ip6_src = dst_sin6->sin6_addr;
11146 ip6->ip6_dst = src_sin6->sin6_addr;
11147 len = sizeof(struct ip6_hdr);
11148 shout = (struct sctphdr *)((caddr_t)ip6 + len);
11149 break;
11150#endif
11151 default:
11152 len = 0;
11153 shout = mtod(mout, struct sctphdr *);
11154 break;
11155 }
11156#if defined(INET) || defined(INET6)
11157 if (port) {
11158 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11159 sctp_m_freem(mout);
11160 return;
11161 }
11162 udp = (struct udphdr *)shout;
11163 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11164 udp->uh_dport = port;
11165 udp->uh_sum = 0;
11166 udp->uh_ulen = htons((uint16_t)(sizeof(struct udphdr) +
11167 sizeof(struct sctphdr) +
11168 sizeof(struct sctp_chunkhdr) +
11169 cause_len + padding_len));
11170 len += sizeof(struct udphdr);
11171 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11172 } else {
11173 udp = NULL;
11174 }
11175#endif
11176 shout->src_port = sh->dest_port;
11177 shout->dest_port = sh->src_port;
11178 shout->checksum = 0;
11179 if (vtag) {
11180 shout->v_tag = htonl(vtag);
11181 } else {
11182 shout->v_tag = sh->v_tag;
11183 }
11184 len += sizeof(struct sctphdr);
11185 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11186 ch->chunk_type = type;
11187 if (vtag) {
11188 ch->chunk_flags = 0;
11189 } else {
11191 }
11192 ch->chunk_length = htons((uint16_t)(sizeof(struct sctp_chunkhdr) + cause_len));
11193 len += sizeof(struct sctp_chunkhdr);
11194 len += cause_len + padding_len;
11195
11196 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11197 sctp_m_freem(mout);
11198 return;
11199 }
11200 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11201 switch (dst->sa_family) {
11202#ifdef INET
11203 case AF_INET:
11204 if (port) {
11205 if (V_udp_cksum) {
11206 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11207 } else {
11208 udp->uh_sum = 0;
11209 }
11210 }
11211 ip->ip_len = htons(len);
11212 if (port) {
11213 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11214 SCTP_STAT_INCR(sctps_sendswcrc);
11215 if (V_udp_cksum) {
11216 SCTP_ENABLE_UDP_CSUM(o_pak);
11217 }
11218 } else {
11219 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11220 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11221 SCTP_STAT_INCR(sctps_sendhwcrc);
11222 }
11223#ifdef SCTP_PACKET_LOGGING
11224 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11225 sctp_packet_log(o_pak);
11226 }
11227#endif
11228 SCTP_PROBE5(send, NULL, NULL, ip, NULL, shout);
11229 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11230 break;
11231#endif
11232#ifdef INET6
11233 case AF_INET6:
11234 ip6->ip6_plen = htons((uint16_t)(len - sizeof(struct ip6_hdr)));
11235 if (port) {
11236 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11237 SCTP_STAT_INCR(sctps_sendswcrc);
11238 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11239 udp->uh_sum = 0xffff;
11240 }
11241 } else {
11242 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11243 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11244 SCTP_STAT_INCR(sctps_sendhwcrc);
11245 }
11246#ifdef SCTP_PACKET_LOGGING
11247 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11248 sctp_packet_log(o_pak);
11249 }
11250#endif
11251 SCTP_PROBE5(send, NULL, NULL, ip6, NULL, shout);
11252 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11253 break;
11254#endif
11255 default:
11256 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11257 dst->sa_family);
11258 sctp_m_freem(mout);
11259 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11260 return;
11261 }
11262 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
11263 if (port) {
11264 UDPSTAT_INC(udps_opackets);
11265 }
11266 SCTP_STAT_INCR(sctps_sendpackets);
11267 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11268 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11269 if (ret) {
11270 SCTP_STAT_INCR(sctps_senderrors);
11271 }
11272 return;
11273}
11274
11275void
11276sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
11277 struct sctphdr *sh,
11278 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11279 uint32_t vrf_id, uint16_t port)
11280{
11281 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11282 mflowtype, mflowid, fibnum,
11283 vrf_id, port);
11284}
11285
11286void
11287sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked)
11288{
11289 struct sctp_tmit_chunk *chk;
11290 struct sctp_heartbeat_chunk *hb;
11291 struct timeval now;
11292
11294 if (net == NULL) {
11295 return;
11296 }
11297 (void)SCTP_GETTIME_TIMEVAL(&now);
11298 switch (net->ro._l_addr.sa.sa_family) {
11299#ifdef INET
11300 case AF_INET:
11301 break;
11302#endif
11303#ifdef INET6
11304 case AF_INET6:
11305 break;
11306#endif
11307 default:
11308 return;
11309 }
11310 sctp_alloc_a_chunk(stcb, chk);
11311 if (chk == NULL) {
11312 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11313 return;
11314 }
11315
11316 chk->copy_by_ref = 0;
11318 chk->rec.chunk_id.can_take_data = 1;
11319 chk->flags = 0;
11320 chk->asoc = &stcb->asoc;
11321 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11322
11323 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11324 if (chk->data == NULL) {
11325 sctp_free_a_chunk(stcb, chk, so_locked);
11326 return;
11327 }
11329 SCTP_BUF_LEN(chk->data) = chk->send_size;
11331 chk->snd_count = 0;
11332 chk->whoTo = net;
11333 atomic_add_int(&chk->whoTo->ref_count, 1);
11334 /* Now we have a mbuf that we can fill in with the details */
11335 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11336 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11337 /* fill out chunk header */
11339 hb->ch.chunk_flags = 0;
11340 hb->ch.chunk_length = htons(chk->send_size);
11341 /* Fill out hb parameter */
11343 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11344 hb->heartbeat.hb_info.time_value_1 = (uint32_t)now.tv_sec;
11345 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11346 /* Did our user request this one, put it in */
11347 hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family;
11348 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11349 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11350 /*
11351 * we only take from the entropy pool if the address is not
11352 * confirmed.
11353 */
11356 } else {
11359 }
11360 switch (net->ro._l_addr.sa.sa_family) {
11361#ifdef INET
11362 case AF_INET:
11363 memcpy(hb->heartbeat.hb_info.address,
11364 &net->ro._l_addr.sin.sin_addr,
11365 sizeof(net->ro._l_addr.sin.sin_addr));
11366 break;
11367#endif
11368#ifdef INET6
11369 case AF_INET6:
11370 memcpy(hb->heartbeat.hb_info.address,
11371 &net->ro._l_addr.sin6.sin6_addr,
11372 sizeof(net->ro._l_addr.sin6.sin6_addr));
11373 break;
11374#endif
11375 default:
11376 if (chk->data) {
11377 sctp_m_freem(chk->data);
11378 chk->data = NULL;
11379 }
11380 sctp_free_a_chunk(stcb, chk, so_locked);
11381 return;
11382 break;
11383 }
11384 net->hb_responded = 0;
11385 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11386 stcb->asoc.ctrl_queue_cnt++;
11387 SCTP_STAT_INCR(sctps_sendheartbeat);
11388 return;
11389}
11390
11391void
11392sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11393 uint32_t high_tsn)
11394{
11395 struct sctp_association *asoc;
11396 struct sctp_ecne_chunk *ecne;
11397 struct sctp_tmit_chunk *chk;
11398
11399 if (net == NULL) {
11400 return;
11401 }
11402 asoc = &stcb->asoc;
11404 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11405 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11406 /* found a previous ECN_ECHO update it if needed */
11407 uint32_t cnt, ctsn;
11408
11409 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11410 ctsn = ntohl(ecne->tsn);
11411 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11412 ecne->tsn = htonl(high_tsn);
11413 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11414 }
11415 cnt = ntohl(ecne->num_pkts_since_cwr);
11416 cnt++;
11417 ecne->num_pkts_since_cwr = htonl(cnt);
11418 return;
11419 }
11420 }
11421 /* nope could not find one to update so we must build one */
11422 sctp_alloc_a_chunk(stcb, chk);
11423 if (chk == NULL) {
11424 return;
11425 }
11426 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11427 chk->copy_by_ref = 0;
11429 chk->rec.chunk_id.can_take_data = 0;
11430 chk->flags = 0;
11431 chk->asoc = &stcb->asoc;
11432 chk->send_size = sizeof(struct sctp_ecne_chunk);
11433 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11434 if (chk->data == NULL) {
11436 return;
11437 }
11439 SCTP_BUF_LEN(chk->data) = chk->send_size;
11441 chk->snd_count = 0;
11442 chk->whoTo = net;
11443 atomic_add_int(&chk->whoTo->ref_count, 1);
11444
11445 stcb->asoc.ecn_echo_cnt_onq++;
11446 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11447 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11448 ecne->ch.chunk_flags = 0;
11449 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11450 ecne->tsn = htonl(high_tsn);
11451 ecne->num_pkts_since_cwr = htonl(1);
11452 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11453 asoc->ctrl_queue_cnt++;
11454}
11455
11456void
11458 struct mbuf *m, int len, int iphlen, int bad_crc)
11459{
11460 struct sctp_association *asoc;
11461 struct sctp_pktdrop_chunk *drp;
11462 struct sctp_tmit_chunk *chk;
11463 uint8_t *datap;
11464 int was_trunc = 0;
11465 int fullsz = 0;
11466 long spc;
11467 int offset;
11468 struct sctp_chunkhdr *ch, chunk_buf;
11469 unsigned int chk_length;
11470
11471 if (!stcb) {
11472 return;
11473 }
11474 asoc = &stcb->asoc;
11476 if (asoc->pktdrop_supported == 0) {
11477 /*-
11478 * peer must declare support before I send one.
11479 */
11480 return;
11481 }
11482 if (stcb->sctp_socket == NULL) {
11483 return;
11484 }
11485 sctp_alloc_a_chunk(stcb, chk);
11486 if (chk == NULL) {
11487 return;
11488 }
11489 chk->copy_by_ref = 0;
11491 chk->rec.chunk_id.can_take_data = 1;
11492 chk->flags = 0;
11493 len -= iphlen;
11494 chk->send_size = len;
11495 /* Validate that we do not have an ABORT in here. */
11496 offset = iphlen + sizeof(struct sctphdr);
11497 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11498 sizeof(*ch), (uint8_t *)&chunk_buf);
11499 while (ch != NULL) {
11500 chk_length = ntohs(ch->chunk_length);
11501 if (chk_length < sizeof(*ch)) {
11502 /* break to abort land */
11503 break;
11504 }
11505 switch (ch->chunk_type) {
11516 return;
11517 default:
11518 break;
11519 }
11520 offset += SCTP_SIZE32(chk_length);
11521 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11522 sizeof(*ch), (uint8_t *)&chunk_buf);
11523 }
11524
11525 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11526 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11527 /*
11528 * only send 1 mtu worth, trim off the excess on the end.
11529 */
11530 fullsz = len;
11531 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11532 was_trunc = 1;
11533 }
11534 chk->asoc = &stcb->asoc;
11535 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11536 if (chk->data == NULL) {
11537jump_out:
11539 return;
11540 }
11542 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11543 if (drp == NULL) {
11544 sctp_m_freem(chk->data);
11545 chk->data = NULL;
11546 goto jump_out;
11547 }
11548 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11549 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11550 chk->book_size_scale = 0;
11551 if (was_trunc) {
11553 drp->trunc_len = htons(fullsz);
11554 /*
11555 * Len is already adjusted to size minus overhead above take
11556 * out the pkt_drop chunk itself from it.
11557 */
11558 chk->send_size = (uint16_t)(len - sizeof(struct sctp_pktdrop_chunk));
11559 len = chk->send_size;
11560 } else {
11561 /* no truncation needed */
11562 drp->ch.chunk_flags = 0;
11563 drp->trunc_len = htons(0);
11564 }
11565 if (bad_crc) {
11566 drp->ch.chunk_flags |= SCTP_BADCRC;
11567 }
11568 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11569 SCTP_BUF_LEN(chk->data) = chk->send_size;
11571 chk->snd_count = 0;
11572 if (net) {
11573 /* we should hit here */
11574 chk->whoTo = net;
11575 atomic_add_int(&chk->whoTo->ref_count, 1);
11576 } else {
11577 chk->whoTo = NULL;
11578 }
11580 drp->ch.chunk_length = htons(chk->send_size);
11581 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11582 if (spc < 0) {
11583 spc = 0;
11584 }
11585 drp->bottle_bw = htonl(spc);
11586 if (asoc->my_rwnd) {
11587 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11588 asoc->size_on_all_streams +
11589 asoc->my_rwnd_control_len +
11590 stcb->sctp_socket->so_rcv.sb_cc);
11591 } else {
11592 /*-
11593 * If my rwnd is 0, possibly from mbuf depletion as well as
11594 * space used, tell the peer there is NO space aka onq == bw
11595 */
11596 drp->current_onq = htonl(spc);
11597 }
11598 drp->reserved = 0;
11599 datap = drp->data;
11600 m_copydata(m, iphlen, len, (caddr_t)datap);
11601 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11602 asoc->ctrl_queue_cnt++;
11603}
11604
11605void
11606sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
11607{
11608 struct sctp_association *asoc;
11609 struct sctp_cwr_chunk *cwr;
11610 struct sctp_tmit_chunk *chk;
11611
11613 if (net == NULL) {
11614 return;
11615 }
11616 asoc = &stcb->asoc;
11617 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11618 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
11619 /*
11620 * found a previous CWR queued to same destination
11621 * update it if needed
11622 */
11623 uint32_t ctsn;
11624
11625 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11626 ctsn = ntohl(cwr->tsn);
11627 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11628 cwr->tsn = htonl(high_tsn);
11629 }
11630 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11631 /* Make sure override is carried */
11633 }
11634 return;
11635 }
11636 }
11637 sctp_alloc_a_chunk(stcb, chk);
11638 if (chk == NULL) {
11639 return;
11640 }
11641 chk->copy_by_ref = 0;
11642 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11643 chk->rec.chunk_id.can_take_data = 1;
11644 chk->flags = 0;
11645 chk->asoc = &stcb->asoc;
11646 chk->send_size = sizeof(struct sctp_cwr_chunk);
11647 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11648 if (chk->data == NULL) {
11650 return;
11651 }
11653 SCTP_BUF_LEN(chk->data) = chk->send_size;
11655 chk->snd_count = 0;
11656 chk->whoTo = net;
11657 atomic_add_int(&chk->whoTo->ref_count, 1);
11658 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11659 cwr->ch.chunk_type = SCTP_ECN_CWR;
11660 cwr->ch.chunk_flags = override;
11661 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11662 cwr->tsn = htonl(high_tsn);
11663 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11664 asoc->ctrl_queue_cnt++;
11665}
11666
11667static int
11669 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11670{
11671 uint16_t len, old_len, i;
11672 struct sctp_stream_reset_out_request *req_out;
11673 struct sctp_chunkhdr *ch;
11674 int at;
11675 int number_entries = 0;
11676
11677 ch = mtod(chk->data, struct sctp_chunkhdr *);
11678 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11679 /* get to new offset for the param. */
11680 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11681 /* now how long will this param be? */
11682 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11683 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
11684 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
11685 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
11686 number_entries++;
11687 }
11688 }
11689 if (number_entries == 0) {
11690 return (0);
11691 }
11692 if (number_entries == stcb->asoc.streamoutcnt) {
11693 number_entries = 0;
11694 }
11695 if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) {
11696 number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET;
11697 }
11698 len = (uint16_t)(sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11699 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11700 req_out->ph.param_length = htons(len);
11701 req_out->request_seq = htonl(seq);
11702 req_out->response_seq = htonl(resp_seq);
11703 req_out->send_reset_at_tsn = htonl(last_sent);
11704 at = 0;
11705 if (number_entries) {
11706 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11707 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
11708 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
11709 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
11710 req_out->list_of_streams[at] = htons(i);
11711 at++;
11713 if (at >= number_entries) {
11714 break;
11715 }
11716 }
11717 }
11718 } else {
11719 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11721 }
11722 }
11723 if (SCTP_SIZE32(len) > len) {
11724 /*-
11725 * Need to worry about the pad we may end up adding to the
11726 * end. This is easy since the struct is either aligned to 4
11727 * bytes or 2 bytes off.
11728 */
11729 req_out->list_of_streams[number_entries] = 0;
11730 }
11731 /* now fix the chunk length */
11732 ch->chunk_length = htons(len + old_len);
11733 chk->book_size = len + old_len;
11734 chk->book_size_scale = 0;
11735 chk->send_size = SCTP_SIZE32(chk->book_size);
11736 SCTP_BUF_LEN(chk->data) = chk->send_size;
11737 return (1);
11738}
11739
11740static void
11742 int number_entries, uint16_t *list,
11743 uint32_t seq)
11744{
11745 uint16_t len, old_len, i;
11746 struct sctp_stream_reset_in_request *req_in;
11747 struct sctp_chunkhdr *ch;
11748
11749 ch = mtod(chk->data, struct sctp_chunkhdr *);
11750 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11751
11752 /* get to new offset for the param. */
11753 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11754 /* now how long will this param be? */
11755 len = (uint16_t)(sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11756 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11757 req_in->ph.param_length = htons(len);
11758 req_in->request_seq = htonl(seq);
11759 if (number_entries) {
11760 for (i = 0; i < number_entries; i++) {
11761 req_in->list_of_streams[i] = htons(list[i]);
11762 }
11763 }
11764 if (SCTP_SIZE32(len) > len) {
11765 /*-
11766 * Need to worry about the pad we may end up adding to the
11767 * end. This is easy since the struct is either aligned to 4
11768 * bytes or 2 bytes off.
11769 */
11770 req_in->list_of_streams[number_entries] = 0;
11771 }
11772 /* now fix the chunk length */
11773 ch->chunk_length = htons(len + old_len);
11774 chk->book_size = len + old_len;
11775 chk->book_size_scale = 0;
11776 chk->send_size = SCTP_SIZE32(chk->book_size);
11777 SCTP_BUF_LEN(chk->data) = chk->send_size;
11778 return;
11779}
11780
11781static void
11783 uint32_t seq)
11784{
11785 uint16_t len, old_len;
11786 struct sctp_stream_reset_tsn_request *req_tsn;
11787 struct sctp_chunkhdr *ch;
11788
11789 ch = mtod(chk->data, struct sctp_chunkhdr *);
11790 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11791
11792 /* get to new offset for the param. */
11793 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11794 /* now how long will this param be? */
11795 len = sizeof(struct sctp_stream_reset_tsn_request);
11796 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11797 req_tsn->ph.param_length = htons(len);
11798 req_tsn->request_seq = htonl(seq);
11799
11800 /* now fix the chunk length */
11801 ch->chunk_length = htons(len + old_len);
11802 chk->send_size = len + old_len;
11803 chk->book_size = SCTP_SIZE32(chk->send_size);
11804 chk->book_size_scale = 0;
11805 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11806 return;
11807}
11808
11809void
11811 uint32_t resp_seq, uint32_t result)
11812{
11813 uint16_t len, old_len;
11814 struct sctp_stream_reset_response *resp;
11815 struct sctp_chunkhdr *ch;
11816
11817 ch = mtod(chk->data, struct sctp_chunkhdr *);
11818 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11819
11820 /* get to new offset for the param. */
11821 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11822 /* now how long will this param be? */
11823 len = sizeof(struct sctp_stream_reset_response);
11824 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11825 resp->ph.param_length = htons(len);
11826 resp->response_seq = htonl(resp_seq);
11827 resp->result = ntohl(result);
11828
11829 /* now fix the chunk length */
11830 ch->chunk_length = htons(len + old_len);
11831 chk->book_size = len + old_len;
11832 chk->book_size_scale = 0;
11833 chk->send_size = SCTP_SIZE32(chk->book_size);
11834 SCTP_BUF_LEN(chk->data) = chk->send_size;
11835 return;
11836}
11837
11838void
11840 struct sctp_stream_reset_list *ent,
11841 int response)
11842{
11843 struct sctp_association *asoc;
11844 struct sctp_tmit_chunk *chk;
11845 struct sctp_chunkhdr *ch;
11846
11847 asoc = &stcb->asoc;
11848
11849 /*
11850 * Reset our last reset action to the new one IP -> response
11851 * (PERFORMED probably). This assures that if we fail to send, a
11852 * retran from the peer will get the new response.
11853 */
11854 asoc->last_reset_action[0] = response;
11855 if (asoc->stream_reset_outstanding) {
11856 return;
11857 }
11858 sctp_alloc_a_chunk(stcb, chk);
11859 if (chk == NULL) {
11860 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11861 return;
11862 }
11863 chk->copy_by_ref = 0;
11865 chk->rec.chunk_id.can_take_data = 0;
11866 chk->flags = 0;
11867 chk->asoc = &stcb->asoc;
11868 chk->book_size = sizeof(struct sctp_chunkhdr);
11869 chk->send_size = SCTP_SIZE32(chk->book_size);
11870 chk->book_size_scale = 0;
11871 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11872 if (chk->data == NULL) {
11874 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11875 return;
11876 }
11878 /* setup chunk parameters */
11880 chk->snd_count = 0;
11881 if (stcb->asoc.alternate) {
11882 chk->whoTo = stcb->asoc.alternate;
11883 } else {
11884 chk->whoTo = stcb->asoc.primary_destination;
11885 }
11886 ch = mtod(chk->data, struct sctp_chunkhdr *);
11888 ch->chunk_flags = 0;
11889 ch->chunk_length = htons(chk->book_size);
11890 atomic_add_int(&chk->whoTo->ref_count, 1);
11891 SCTP_BUF_LEN(chk->data) = chk->send_size;
11892 sctp_add_stream_reset_result(chk, ent->seq, response);
11893 /* insert the chunk for sending */
11894 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11895 chk,
11896 sctp_next);
11897 asoc->ctrl_queue_cnt++;
11898}
11899
11900void
11902 uint32_t resp_seq, uint32_t result,
11903 uint32_t send_una, uint32_t recv_next)
11904{
11905 uint16_t len, old_len;
11906 struct sctp_stream_reset_response_tsn *resp;
11907 struct sctp_chunkhdr *ch;
11908
11909 ch = mtod(chk->data, struct sctp_chunkhdr *);
11910 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11911
11912 /* get to new offset for the param. */
11913 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11914 /* now how long will this param be? */
11915 len = sizeof(struct sctp_stream_reset_response_tsn);
11916 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11917 resp->ph.param_length = htons(len);
11918 resp->response_seq = htonl(resp_seq);
11919 resp->result = htonl(result);
11920 resp->senders_next_tsn = htonl(send_una);
11921 resp->receivers_next_tsn = htonl(recv_next);
11922
11923 /* now fix the chunk length */
11924 ch->chunk_length = htons(len + old_len);
11925 chk->book_size = len + old_len;
11926 chk->send_size = SCTP_SIZE32(chk->book_size);
11927 chk->book_size_scale = 0;
11928 SCTP_BUF_LEN(chk->data) = chk->send_size;
11929 return;
11930}
11931
11932static void
11934 uint32_t seq,
11935 uint16_t adding)
11936{
11937 uint16_t len, old_len;
11938 struct sctp_chunkhdr *ch;
11939 struct sctp_stream_reset_add_strm *addstr;
11940
11941 ch = mtod(chk->data, struct sctp_chunkhdr *);
11942 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11943
11944 /* get to new offset for the param. */
11945 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11946 /* now how long will this param be? */
11947 len = sizeof(struct sctp_stream_reset_add_strm);
11948
11949 /* Fill it out. */
11951 addstr->ph.param_length = htons(len);
11952 addstr->request_seq = htonl(seq);
11953 addstr->number_of_streams = htons(adding);
11954 addstr->reserved = 0;
11955
11956 /* now fix the chunk length */
11957 ch->chunk_length = htons(len + old_len);
11958 chk->send_size = len + old_len;
11959 chk->book_size = SCTP_SIZE32(chk->send_size);
11960 chk->book_size_scale = 0;
11961 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11962 return;
11963}
11964
11965static void
11967 uint32_t seq,
11968 uint16_t adding)
11969{
11970 uint16_t len, old_len;
11971 struct sctp_chunkhdr *ch;
11972 struct sctp_stream_reset_add_strm *addstr;
11973
11974 ch = mtod(chk->data, struct sctp_chunkhdr *);
11975 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11976
11977 /* get to new offset for the param. */
11978 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11979 /* now how long will this param be? */
11980 len = sizeof(struct sctp_stream_reset_add_strm);
11981 /* Fill it out. */
11983 addstr->ph.param_length = htons(len);
11984 addstr->request_seq = htonl(seq);
11985 addstr->number_of_streams = htons(adding);
11986 addstr->reserved = 0;
11987
11988 /* now fix the chunk length */
11989 ch->chunk_length = htons(len + old_len);
11990 chk->send_size = len + old_len;
11991 chk->book_size = SCTP_SIZE32(chk->send_size);
11992 chk->book_size_scale = 0;
11993 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11994 return;
11995}
11996
11997int
11999{
12000 struct sctp_association *asoc;
12001 struct sctp_tmit_chunk *chk;
12002 struct sctp_chunkhdr *ch;
12003 uint32_t seq;
12004
12005 asoc = &stcb->asoc;
12006 asoc->trigger_reset = 0;
12007 if (asoc->stream_reset_outstanding) {
12008 return (EALREADY);
12009 }
12010 sctp_alloc_a_chunk(stcb, chk);
12011 if (chk == NULL) {
12012 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12013 return (ENOMEM);
12014 }
12015 chk->copy_by_ref = 0;
12017 chk->rec.chunk_id.can_take_data = 0;
12018 chk->flags = 0;
12019 chk->asoc = &stcb->asoc;
12020 chk->book_size = sizeof(struct sctp_chunkhdr);
12021 chk->send_size = SCTP_SIZE32(chk->book_size);
12022 chk->book_size_scale = 0;
12023 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12024 if (chk->data == NULL) {
12025 sctp_free_a_chunk(stcb, chk, so_locked);
12026 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12027 return (ENOMEM);
12028 }
12030
12031 /* setup chunk parameters */
12033 chk->snd_count = 0;
12034 if (stcb->asoc.alternate) {
12035 chk->whoTo = stcb->asoc.alternate;
12036 } else {
12037 chk->whoTo = stcb->asoc.primary_destination;
12038 }
12039 ch = mtod(chk->data, struct sctp_chunkhdr *);
12041 ch->chunk_flags = 0;
12042 ch->chunk_length = htons(chk->book_size);
12043 atomic_add_int(&chk->whoTo->ref_count, 1);
12044 SCTP_BUF_LEN(chk->data) = chk->send_size;
12045 seq = stcb->asoc.str_reset_seq_out;
12046 if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) {
12047 seq++;
12049 } else {
12050 m_freem(chk->data);
12051 chk->data = NULL;
12052 sctp_free_a_chunk(stcb, chk, so_locked);
12053 return (ENOENT);
12054 }
12055 asoc->str_reset = chk;
12056 /* insert the chunk for sending */
12057 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12058 chk,
12059 sctp_next);
12060 asoc->ctrl_queue_cnt++;
12061
12062 if (stcb->asoc.send_sack) {
12063 sctp_send_sack(stcb, so_locked);
12064 }
12066 return (0);
12067}
12068
12069int
12071 uint16_t number_entries, uint16_t *list,
12072 uint8_t send_in_req,
12073 uint8_t send_tsn_req,
12074 uint8_t add_stream,
12075 uint16_t adding_o,
12076 uint16_t adding_i, uint8_t peer_asked)
12077{
12078 struct sctp_association *asoc;
12079 struct sctp_tmit_chunk *chk;
12080 struct sctp_chunkhdr *ch;
12081 int can_send_out_req = 0;
12082 uint32_t seq;
12083
12084 asoc = &stcb->asoc;
12085 if (asoc->stream_reset_outstanding) {
12086 /*-
12087 * Already one pending, must get ACK back to clear the flag.
12088 */
12089 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12090 return (EBUSY);
12091 }
12092 if ((send_in_req == 0) && (send_tsn_req == 0) &&
12093 (add_stream == 0)) {
12094 /* nothing to do */
12095 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12096 return (EINVAL);
12097 }
12098 if (send_tsn_req && send_in_req) {
12099 /* error, can't do that */
12100 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12101 return (EINVAL);
12102 } else if (send_in_req) {
12103 can_send_out_req = 1;
12104 }
12105 if (number_entries > (MCLBYTES -
12107 sizeof(struct sctp_chunkhdr) -
12108 sizeof(struct sctp_stream_reset_out_request)) /
12109 sizeof(uint16_t)) {
12110 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12111 return (ENOMEM);
12112 }
12113 sctp_alloc_a_chunk(stcb, chk);
12114 if (chk == NULL) {
12115 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12116 return (ENOMEM);
12117 }
12118 chk->copy_by_ref = 0;
12120 chk->rec.chunk_id.can_take_data = 0;
12121 chk->flags = 0;
12122 chk->asoc = &stcb->asoc;
12123 chk->book_size = sizeof(struct sctp_chunkhdr);
12124 chk->send_size = SCTP_SIZE32(chk->book_size);
12125 chk->book_size_scale = 0;
12126 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12127 if (chk->data == NULL) {
12129 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12130 return (ENOMEM);
12131 }
12133
12134 /* setup chunk parameters */
12136 chk->snd_count = 0;
12137 if (stcb->asoc.alternate) {
12138 chk->whoTo = stcb->asoc.alternate;
12139 } else {
12140 chk->whoTo = stcb->asoc.primary_destination;
12141 }
12142 atomic_add_int(&chk->whoTo->ref_count, 1);
12143 ch = mtod(chk->data, struct sctp_chunkhdr *);
12145 ch->chunk_flags = 0;
12146 ch->chunk_length = htons(chk->book_size);
12147 SCTP_BUF_LEN(chk->data) = chk->send_size;
12148
12149 seq = stcb->asoc.str_reset_seq_out;
12150 if (can_send_out_req) {
12151 int ret;
12152
12153 ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
12154 if (ret) {
12155 seq++;
12157 }
12158 }
12159 if ((add_stream & 1) &&
12160 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
12161 /* Need to allocate more */
12162 struct sctp_stream_out *oldstream;
12163 struct sctp_stream_queue_pending *sp, *nsp;
12164 int i;
12165#if defined(SCTP_DETAILED_STR_STATS)
12166 int j;
12167#endif
12168
12169 oldstream = stcb->asoc.strmout;
12170 /* get some more */
12171 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
12172 (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out),
12173 SCTP_M_STRMO);
12174 if (stcb->asoc.strmout == NULL) {
12175 uint8_t x;
12176
12177 stcb->asoc.strmout = oldstream;
12178 /* Turn off the bit */
12179 x = add_stream & 0xfe;
12180 add_stream = x;
12181 goto skip_stuff;
12182 }
12183 /*
12184 * Ok now we proceed with copying the old out stuff and
12185 * initializing the new stuff.
12186 */
12187 SCTP_TCB_SEND_LOCK(stcb);
12188 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, false);
12189 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12190 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12191 /* FIX ME FIX ME */
12192 /*
12193 * This should be a SS_COPY operation FIX ME STREAM
12194 * SCHEDULER EXPERT
12195 */
12196 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]);
12197 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
12198#if defined(SCTP_DETAILED_STR_STATS)
12199 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
12200 stcb->asoc.strmout[i].abandoned_sent[j] = oldstream[i].abandoned_sent[j];
12201 stcb->asoc.strmout[i].abandoned_unsent[j] = oldstream[i].abandoned_unsent[j];
12202 }
12203#else
12204 stcb->asoc.strmout[i].abandoned_sent[0] = oldstream[i].abandoned_sent[0];
12205 stcb->asoc.strmout[i].abandoned_unsent[0] = oldstream[i].abandoned_unsent[0];
12206#endif
12207 stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered;
12208 stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered;
12209 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
12210 stcb->asoc.strmout[i].sid = i;
12211 stcb->asoc.strmout[i].state = oldstream[i].state;
12212 /* now anything on those queues? */
12213 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
12214 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
12215 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
12216 }
12217 }
12218 /* now the new streams */
12219 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc);
12220 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
12221 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12222 stcb->asoc.strmout[i].chunks_on_queues = 0;
12223#if defined(SCTP_DETAILED_STR_STATS)
12224 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
12225 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
12226 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
12227 }
12228#else
12229 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
12230 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
12231#endif
12232 stcb->asoc.strmout[i].next_mid_ordered = 0;
12233 stcb->asoc.strmout[i].next_mid_unordered = 0;
12234 stcb->asoc.strmout[i].sid = i;
12235 stcb->asoc.strmout[i].last_msg_incomplete = 0;
12236 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
12238 }
12239 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
12240 SCTP_FREE(oldstream, SCTP_M_STRMO);
12242 }
12243skip_stuff:
12244 if ((add_stream & 1) && (adding_o > 0)) {
12245 asoc->strm_pending_add_size = adding_o;
12246 asoc->peer_req_out = peer_asked;
12247 sctp_add_an_out_stream(chk, seq, adding_o);
12248 seq++;
12250 }
12251 if ((add_stream & 2) && (adding_i > 0)) {
12252 sctp_add_an_in_stream(chk, seq, adding_i);
12253 seq++;
12255 }
12256 if (send_in_req) {
12257 sctp_add_stream_reset_in(chk, number_entries, list, seq);
12258 seq++;
12260 }
12261 if (send_tsn_req) {
12262 sctp_add_stream_reset_tsn(chk, seq);
12264 }
12265 asoc->str_reset = chk;
12266 /* insert the chunk for sending */
12267 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12268 chk,
12269 sctp_next);
12270 asoc->ctrl_queue_cnt++;
12271 if (stcb->asoc.send_sack) {
12273 }
12275 return (0);
12276}
12277
12278void
12279sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
12280 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12281 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12282 uint32_t vrf_id, uint16_t port)
12283{
12284 /* Don't respond to an ABORT with an ABORT. */
12285 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
12286 if (cause)
12287 sctp_m_freem(cause);
12288 return;
12289 }
12290 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
12291 mflowtype, mflowid, fibnum,
12292 vrf_id, port);
12293 return;
12294}
12295
12296void
12297sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
12298 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12299 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12300 uint32_t vrf_id, uint16_t port)
12301{
12302 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
12303 mflowtype, mflowid, fibnum,
12304 vrf_id, port);
12305 return;
12306}
12307
12308static struct mbuf *
12309sctp_copy_resume(struct uio *uio,
12310 int max_send_len,
12311 int user_marks_eor,
12312 int *error,
12313 uint32_t *sndout,
12314 struct mbuf **new_tail)
12315{
12316 struct mbuf *m;
12317
12318 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12319 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12320 if (m == NULL) {
12321 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12322 *error = ENOBUFS;
12323 } else {
12324 *sndout = m_length(m, NULL);
12325 *new_tail = m_last(m);
12326 }
12327 return (m);
12328}
12329
12330static int
12332 struct uio *uio,
12333 int resv_upfront)
12334{
12335 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12336 resv_upfront, 0);
12337 if (sp->data == NULL) {
12338 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12339 return (ENOBUFS);
12340 }
12341
12342 sp->tail_mbuf = m_last(sp->data);
12343 return (0);
12344}
12345
12346static struct sctp_stream_queue_pending *
12348 struct sctp_association *asoc,
12349 struct sctp_sndrcvinfo *srcv,
12350 struct uio *uio,
12351 struct sctp_nets *net,
12352 ssize_t max_send_len,
12353 int user_marks_eor,
12354 int *error)
12355{
12356
12357 /*-
12358 * This routine must be very careful in its work. Protocol
12359 * processing is up and running so care must be taken to spl...()
12360 * when you need to do something that may effect the stcb/asoc. The
12361 * sb is locked however. When data is copied the protocol processing
12362 * should be enabled since this is a slower operation...
12363 */
12364 struct sctp_stream_queue_pending *sp = NULL;
12365 int resv_in_first;
12366
12367 *error = 0;
12368 /* Now can we send this? */
12373 /* got data while shutting down */
12374 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12375 *error = ECONNRESET;
12376 goto out_now;
12377 }
12378 sctp_alloc_a_strmoq(stcb, sp);
12379 if (sp == NULL) {
12380 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12381 *error = ENOMEM;
12382 goto out_now;
12383 }
12384 sp->act_flags = 0;
12385 sp->sender_all_done = 0;
12386 sp->sinfo_flags = srcv->sinfo_flags;
12387 sp->timetolive = srcv->sinfo_timetolive;
12388 sp->ppid = srcv->sinfo_ppid;
12389 sp->context = srcv->sinfo_context;
12390 sp->fsn = 0;
12391 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12392
12393 sp->sid = srcv->sinfo_stream;
12394 sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
12395 if ((sp->length == (uint32_t)uio->uio_resid) &&
12396 ((user_marks_eor == 0) ||
12397 (srcv->sinfo_flags & SCTP_EOF) ||
12398 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12399 sp->msg_is_complete = 1;
12400 } else {
12401 sp->msg_is_complete = 0;
12402 }
12403 sp->sender_all_done = 0;
12404 sp->some_taken = 0;
12405 sp->put_last_out = 0;
12406 resv_in_first = SCTP_DATA_CHUNK_OVERHEAD(stcb);
12407 sp->data = sp->tail_mbuf = NULL;
12408 if (sp->length == 0) {
12409 goto skip_copy;
12410 }
12411 if (srcv->sinfo_keynumber_valid) {
12412 sp->auth_keyid = srcv->sinfo_keynumber;
12413 } else {
12415 }
12418 sp->holds_key_ref = 1;
12419 }
12420 *error = sctp_copy_one(sp, uio, resv_in_first);
12421skip_copy:
12422 if (*error) {
12424 sp = NULL;
12425 } else {
12426 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12427 sp->net = net;
12428 atomic_add_int(&sp->net->ref_count, 1);
12429 } else {
12430 sp->net = NULL;
12431 }
12433 }
12434out_now:
12435 return (sp);
12436}
12437
12438int
12439sctp_sosend(struct socket *so,
12440 struct sockaddr *addr,
12441 struct uio *uio,
12442 struct mbuf *top,
12443 struct mbuf *control,
12444 int flags,
12445 struct thread *p
12446)
12447{
12448 int error, use_sndinfo = 0;
12449 struct sctp_sndrcvinfo sndrcvninfo;
12450 struct sockaddr *addr_to_use;
12451#if defined(INET) && defined(INET6)
12452 struct sockaddr_in sin;
12453#endif
12454
12455 if (control) {
12456 /* process cmsg snd/rcv info (maybe a assoc-id) */
12457 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12458 sizeof(sndrcvninfo))) {
12459 /* got one */
12460 use_sndinfo = 1;
12461 }
12462 }
12463 addr_to_use = addr;
12464#if defined(INET) && defined(INET6)
12465 if ((addr != NULL) && (addr->sa_family == AF_INET6)) {
12466 struct sockaddr_in6 *sin6;
12467
12468 if (addr->sa_len != sizeof(struct sockaddr_in6)) {
12469 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12470 return (EINVAL);
12471 }
12472 sin6 = (struct sockaddr_in6 *)addr;
12473 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12474 in6_sin6_2_sin(&sin, sin6);
12475 addr_to_use = (struct sockaddr *)&sin;
12476 }
12477 }
12478#endif
12479 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12480 control,
12481 flags,
12482 use_sndinfo ? &sndrcvninfo : NULL
12483 ,p
12484 );
12485 return (error);
12486}
12487
12488int
12489sctp_lower_sosend(struct socket *so,
12490 struct sockaddr *addr,
12491 struct uio *uio,
12492 struct mbuf *i_pak,
12493 struct mbuf *control,
12494 int flags,
12495 struct sctp_sndrcvinfo *srcv
12496 ,
12497 struct thread *p
12498)
12499{
12500 struct epoch_tracker et;
12501 ssize_t sndlen = 0, max_len, local_add_more;
12502 int error;
12503 struct mbuf *top = NULL;
12504 int queue_only = 0, queue_only_for_init = 0;
12505 bool free_cnt_applied = false;
12506 int un_sent;
12507 int now_filled = 0;
12508 unsigned int inqueue_bytes = 0;
12509 struct sctp_block_entry be;
12510 struct sctp_inpcb *inp;
12511 struct sctp_tcb *stcb = NULL;
12512 struct timeval now;
12513 struct sctp_nets *net;
12514 struct sctp_association *asoc;
12515 struct sctp_inpcb *t_inp;
12516 int user_marks_eor;
12517 bool create_lock_applied = false;
12518 int nagle_applies = 0;
12519 int some_on_control = 0;
12520 int got_all_of_the_send = 0;
12521 bool hold_tcblock = false;
12522 int non_blocking = 0;
12523 ssize_t local_soresv = 0;
12524 uint16_t port;
12525 uint16_t sinfo_flags;
12526 sctp_assoc_t sinfo_assoc_id;
12527
12528 error = 0;
12529 net = NULL;
12530 stcb = NULL;
12531 asoc = NULL;
12532
12533 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12534 if (inp == NULL) {
12535 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12536 error = EINVAL;
12537 if (i_pak != NULL) {
12538 SCTP_RELEASE_PKT(i_pak);
12539 }
12540 return (error);
12541 }
12542 if ((uio == NULL) && (i_pak == NULL)) {
12543 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12544 return (EINVAL);
12545 }
12547 atomic_add_int(&inp->total_sends, 1);
12548 if (uio != NULL) {
12549 if (uio->uio_resid < 0) {
12550 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12551 return (EINVAL);
12552 }
12553 sndlen = uio->uio_resid;
12554 } else {
12555 top = SCTP_HEADER_TO_CHAIN(i_pak);
12556 sndlen = SCTP_HEADER_LEN(i_pak);
12557 }
12558 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %zd\n",
12559 (void *)addr,
12560 sndlen);
12561 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12563 /* The listener can NOT send */
12564 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12565 error = ENOTCONN;
12566 goto out_unlocked;
12567 }
12572 if (addr != NULL) {
12573 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12574
12575 switch (raddr->sa.sa_family) {
12576#ifdef INET
12577 case AF_INET:
12578 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12579 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12580 error = EINVAL;
12581 goto out_unlocked;
12582 }
12583 port = raddr->sin.sin_port;
12584 break;
12585#endif
12586#ifdef INET6
12587 case AF_INET6:
12588 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12589 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12590 error = EINVAL;
12591 goto out_unlocked;
12592 }
12593 port = raddr->sin6.sin6_port;
12594 break;
12595#endif
12596 default:
12597 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12598 error = EAFNOSUPPORT;
12599 goto out_unlocked;
12600 }
12601 } else {
12602 port = 0;
12603 }
12604
12605 if (srcv != NULL) {
12606 sinfo_flags = srcv->sinfo_flags;
12607 sinfo_assoc_id = srcv->sinfo_assoc_id;
12608 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12609 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12610 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12611 error = EINVAL;
12612 goto out_unlocked;
12613 }
12614 if (srcv->sinfo_flags != 0)
12615 SCTP_STAT_INCR(sctps_sends_with_flags);
12616 } else {
12617 sinfo_flags = inp->def_send.sinfo_flags;
12618 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12619 }
12620 if (flags & MSG_EOR) {
12621 sinfo_flags |= SCTP_EOR;
12622 }
12623 if (flags & MSG_EOF) {
12624 sinfo_flags |= SCTP_EOF;
12625 }
12626 if (sinfo_flags & SCTP_SENDALL) {
12627 /* its a sendall */
12628 error = sctp_sendall(inp, uio, top, srcv);
12629 top = NULL;
12630 goto out_unlocked;
12631 }
12632 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12633 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12634 error = EINVAL;
12635 goto out_unlocked;
12636 }
12637 /* now we must find the assoc */
12638 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12640 SCTP_INP_RLOCK(inp);
12641 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12642 if (stcb != NULL) {
12643 SCTP_TCB_LOCK(stcb);
12644 hold_tcblock = true;
12646 SCTP_INP_RUNLOCK(inp);
12647 error = ENOTCONN;
12648 goto out_unlocked;
12649 }
12650 }
12651 SCTP_INP_RUNLOCK(inp);
12652 } else if (sinfo_assoc_id > SCTP_ALL_ASSOC) {
12653 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 1);
12654 if (stcb != NULL) {
12656 hold_tcblock = true;
12657 }
12658 } else if (addr != NULL) {
12659 /*-
12660 * Since we did not use findep we must
12661 * increment it, and if we don't find a tcb
12662 * decrement it.
12663 */
12664 SCTP_INP_WLOCK(inp);
12665 SCTP_INP_INCR_REF(inp);
12666 SCTP_INP_WUNLOCK(inp);
12667 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12668 if (stcb == NULL) {
12669 SCTP_INP_WLOCK(inp);
12670 SCTP_INP_DECR_REF(inp);
12671 SCTP_INP_WUNLOCK(inp);
12672 } else {
12674 hold_tcblock = true;
12675 }
12676 }
12677#ifdef INVARIANTS
12678 if (stcb != NULL) {
12680 KASSERT(hold_tcblock, ("tcb lock hold, hold_tcblock is false"));
12681 } else {
12682 KASSERT(!hold_tcblock, ("hold_tcblock is true, but stcb is NULL"));
12683 }
12684#endif
12685 if ((stcb == NULL) && (addr != NULL)) {
12686 /* Possible implicit send? */
12688 create_lock_applied = true;
12691 /* Should I really unlock ? */
12692 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12693 error = EINVAL;
12694 goto out_unlocked;
12695 }
12696 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12697 (addr->sa_family == AF_INET6)) {
12698 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12699 error = EINVAL;
12700 goto out_unlocked;
12701 }
12702 SCTP_INP_WLOCK(inp);
12703 SCTP_INP_INCR_REF(inp);
12704 SCTP_INP_WUNLOCK(inp);
12705 /* With the lock applied look again */
12706 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12707#if defined(INET) || defined(INET6)
12708 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
12709 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
12710 }
12711#endif
12712 if (stcb == NULL) {
12713 SCTP_INP_WLOCK(inp);
12714 SCTP_INP_DECR_REF(inp);
12715 SCTP_INP_WUNLOCK(inp);
12716 } else {
12718 hold_tcblock = true;
12720 create_lock_applied = false;
12721 }
12722 if (error) {
12723 goto out_unlocked;
12724 }
12725 if (t_inp != inp) {
12726 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12727 error = ENOTCONN;
12728 goto out_unlocked;
12729 }
12730 }
12731 if (stcb == NULL) {
12732 if (addr == NULL) {
12733 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12734 error = ENOENT;
12735 goto out_unlocked;
12736 } else {
12737 /* We must go ahead and start the INIT process */
12738 uint32_t vrf_id;
12739
12740 if ((sinfo_flags & SCTP_ABORT) ||
12741 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12742 /*-
12743 * User asks to abort a non-existent assoc,
12744 * or EOF a non-existent assoc with no data
12745 */
12746 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12747 error = ENOENT;
12748 goto out_unlocked;
12749 }
12750 /* get an asoc/stcb struct */
12751 vrf_id = inp->def_vrf_id;
12752 KASSERT(create_lock_applied, ("create_lock_applied is false"));
12753 stcb = sctp_aloc_assoc_connected(inp, addr, &error, 0, 0, vrf_id,
12755 inp->sctp_ep.port,
12756 p,
12758 if (stcb == NULL) {
12759 /* Error is setup for us in the call */
12760 goto out_unlocked;
12761 }
12763 hold_tcblock = true;
12765 create_lock_applied = false;
12766 /*
12767 * Turn on queue only flag to prevent data from
12768 * being sent
12769 */
12770 queue_only = 1;
12773 if (control != NULL) {
12774 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
12777 hold_tcblock = false;
12778 stcb = NULL;
12779 goto out_unlocked;
12780 }
12781 }
12782 /* out with the INIT */
12783 queue_only_for_init = 1;
12784 /*-
12785 * we may want to dig in after this call and adjust the MTU
12786 * value. It defaulted to 1500 (constant) but the ro
12787 * structure may now have an update and thus we may need to
12788 * change it BEFORE we append the message.
12789 */
12790 }
12791 }
12792
12793 KASSERT(!create_lock_applied, ("create_lock_applied is true"));
12794 KASSERT(stcb != NULL, ("stcb is NULL"));
12795 KASSERT(hold_tcblock, ("hold_tcblock is false"));
12797 asoc = &stcb->asoc;
12798 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0,
12799 ("Association about to be freed"));
12800 /* Keep the stcb from being freed under our feet. */
12801 atomic_add_int(&asoc->refcnt, 1);
12802 free_cnt_applied = true;
12803
12804 if (srcv == NULL) {
12805 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12806 sinfo_flags = srcv->sinfo_flags;
12807 if (flags & MSG_EOR) {
12809 }
12810 if (flags & MSG_EOF) {
12812 }
12813 }
12815 if (addr != NULL)
12816 net = sctp_findnet(stcb, addr);
12817 else
12818 net = NULL;
12819 if ((net == NULL) ||
12820 ((port != 0) && (port != stcb->rport))) {
12821 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12822 error = EINVAL;
12823 goto out_unlocked;
12824 }
12825 } else {
12826 if (asoc->alternate != NULL) {
12827 net = asoc->alternate;
12828 } else {
12829 net = asoc->primary_destination;
12830 }
12831 }
12832 atomic_add_int(&stcb->total_sends, 1);
12833
12835 if (sndlen > (ssize_t)asoc->smallest_mtu) {
12836 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12837 error = EMSGSIZE;
12838 goto out_unlocked;
12839 }
12840 }
12841 if (SCTP_SO_IS_NBIO(so)
12842 || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0
12843 ) {
12844 non_blocking = 1;
12845 }
12846 /* would we block? */
12847 if (non_blocking) {
12848 ssize_t amount;
12849
12850 inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
12851 if (user_marks_eor == 0) {
12852 amount = sndlen;
12853 } else {
12854 amount = 1;
12855 }
12856 if ((SCTP_SB_LIMIT_SND(so) < (amount + inqueue_bytes + asoc->sb_send_resv)) ||
12857 (asoc->chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12858 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12859 if (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(so))
12860 error = EMSGSIZE;
12861 else
12862 error = EWOULDBLOCK;
12863 goto out_unlocked;
12864 }
12865 asoc->sb_send_resv += (uint32_t)sndlen;
12866 } else {
12867 atomic_add_int(&asoc->sb_send_resv, (int)sndlen);
12868 }
12869 local_soresv = sndlen;
12870 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12871 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12872 error = ECONNRESET;
12873 goto out_unlocked;
12874 }
12875 /* Is the stream no. valid? */
12876 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12877 /* Invalid stream number */
12878 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12879 error = EINVAL;
12880 goto out_unlocked;
12881 }
12882 if ((asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) &&
12883 (asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) {
12884 /*
12885 * Can't queue any data while stream reset is underway.
12886 */
12887 if (asoc->strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) {
12888 error = EAGAIN;
12889 } else {
12890 error = EINVAL;
12891 }
12892 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error);
12893 goto out_unlocked;
12894 }
12895 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
12897 queue_only = 1;
12898 }
12899 /* we are now done with all control */
12900 if (control) {
12901 sctp_m_freem(control);
12902 control = NULL;
12903 }
12908 if (sinfo_flags & SCTP_ABORT) {
12909 ;
12910 } else {
12911 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12912 error = ECONNRESET;
12913 goto out_unlocked;
12914 }
12915 }
12916 /* Ok, we will attempt a msgsnd :> */
12917 if (p != NULL) {
12918 p->td_ru.ru_msgsnd++;
12919 }
12920
12921 KASSERT(stcb != NULL, ("stcb is NULL"));
12922 KASSERT(hold_tcblock, ("hold_tcblock is false"));
12924 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0,
12925 ("Association about to be freed"));
12926
12927 /* Are we aborting? */
12928 if (sinfo_flags & SCTP_ABORT) {
12929 struct mbuf *mm;
12930 struct sctp_paramhdr *ph;
12931 ssize_t tot_demand, tot_out = 0, max_out;
12932
12933 SCTP_STAT_INCR(sctps_sends_with_abort);
12934 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
12936 /* It has to be up before we abort */
12937 /* how big is the user initiated abort? */
12938 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12939 error = EINVAL;
12940 goto out;
12941 }
12942 if (top != NULL) {
12943 struct mbuf *cntm;
12944
12945 if (sndlen != 0) {
12946 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
12947 tot_out += SCTP_BUF_LEN(cntm);
12948 }
12949 }
12950 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA);
12951 } else {
12952 /* Must fit in a MTU */
12953 tot_out = sndlen;
12954 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12955 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12956 /* To big */
12957 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12958 error = EMSGSIZE;
12959 goto out;
12960 }
12961 mm = sctp_get_mbuf_for_msg((unsigned int)tot_demand, 0, M_NOWAIT, 1, MT_DATA);
12962 }
12963 if (mm == NULL) {
12964 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12965 error = ENOMEM;
12966 goto out;
12967 }
12968 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12969 max_out -= sizeof(struct sctp_abort_msg);
12970 if (tot_out > max_out) {
12971 tot_out = max_out;
12972 }
12973 ph = mtod(mm, struct sctp_paramhdr *);
12975 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + tot_out));
12976 ph++;
12977 SCTP_BUF_LEN(mm) = (int)(tot_out + sizeof(struct sctp_paramhdr));
12978 if (top == NULL) {
12979 SCTP_TCB_UNLOCK(stcb);
12980 hold_tcblock = false;
12981 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12982 SCTP_TCB_LOCK(stcb);
12983 hold_tcblock = true;
12984 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12985 sctp_m_freem(mm);
12986 goto out_unlocked;
12987 }
12988 if (error != 0) {
12989 /*-
12990 * Here if we can't get his data we
12991 * still abort we just don't get to
12992 * send the users note :-0
12993 */
12994 sctp_m_freem(mm);
12995 mm = NULL;
12996 }
12997 } else {
12998 if (sndlen != 0) {
12999 SCTP_BUF_NEXT(mm) = top;
13000 }
13001 }
13002 atomic_subtract_int(&asoc->refcnt, 1);
13003 free_cnt_applied = false;
13004 /* release this lock, otherwise we hang on ourselves */
13005 NET_EPOCH_ENTER(et);
13006 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, false, SCTP_SO_LOCKED);
13007 NET_EPOCH_EXIT(et);
13008 stcb = NULL;
13009 /*
13010 * In this case top is already chained to mm avoid double
13011 * free, since we free it below if top != NULL and driver
13012 * would free it after sending the packet out
13013 */
13014 if (sndlen != 0) {
13015 top = NULL;
13016 }
13017 goto out_unlocked;
13018 }
13019
13020 KASSERT(stcb != NULL, ("stcb is NULL"));
13021 KASSERT(hold_tcblock, ("hold_tcblock is false"));
13023 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0,
13024 ("Association about to be freed"));
13025
13026 /* Calculate the maximum we can send */
13027 inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13028 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13029 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13030 } else {
13031 max_len = 0;
13032 }
13033 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
13034 if ((user_marks_eor == 0) &&
13035 (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
13036 /* It will NEVER fit */
13037 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13038 error = EMSGSIZE;
13039 goto out_unlocked;
13040 }
13041 if ((uio == NULL) && user_marks_eor) {
13042 /*-
13043 * We do not support eeor mode for
13044 * sending with mbuf chains (like sendfile).
13045 */
13046 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13047 error = EINVAL;
13048 goto out_unlocked;
13049 }
13050
13051 if (user_marks_eor) {
13052 local_add_more = (ssize_t)min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
13053 } else {
13054 /*-
13055 * For non-eeor the whole message must fit in
13056 * the socket send buffer.
13057 */
13058 local_add_more = sndlen;
13059 }
13060 if (non_blocking) {
13061 goto skip_preblock;
13062 }
13063 if (((max_len <= local_add_more) &&
13064 ((ssize_t)SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
13065 (max_len == 0) ||
13066 ((asoc->chunks_on_out_queue + asoc->stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13067 /* No room right now ! */
13068 SOCKBUF_LOCK(&so->so_snd);
13069 inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13070 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
13071 ((asoc->stream_queue_cnt + asoc->chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13072 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %zd) || (%d+%d > %d)\n",
13073 (unsigned int)SCTP_SB_LIMIT_SND(so),
13074 inqueue_bytes,
13075 local_add_more,
13076 asoc->stream_queue_cnt,
13077 asoc->chunks_on_out_queue,
13078 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
13079 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13081 }
13082 be.error = 0;
13083 stcb->block_entry = &be;
13084 SCTP_TCB_UNLOCK(stcb);
13085 hold_tcblock = false;
13086 error = sbwait(&so->so_snd);
13087 SCTP_TCB_LOCK(stcb);
13088 hold_tcblock = true;
13089 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13090 SOCKBUF_UNLOCK(&so->so_snd);
13091 goto out_unlocked;
13092 }
13093 stcb->block_entry = NULL;
13094 if (error || so->so_error || be.error) {
13095 if (error == 0) {
13096 if (so->so_error)
13097 error = so->so_error;
13098 if (be.error) {
13099 error = be.error;
13100 }
13101 }
13102 SOCKBUF_UNLOCK(&so->so_snd);
13103 goto out_unlocked;
13104 }
13105 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13107 asoc, asoc->total_output_queue_size);
13108 }
13109 inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13110 }
13111 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13112 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13113 } else {
13114 max_len = 0;
13115 }
13116 SOCKBUF_UNLOCK(&so->so_snd);
13117 }
13118
13119skip_preblock:
13120 KASSERT(stcb != NULL, ("stcb is NULL"));
13121 KASSERT(hold_tcblock, ("hold_tcblock is false"));
13123 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0,
13124 ("Association about to be freed"));
13125
13126 /*
13127 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
13128 * case NOTE: uio will be null when top/mbuf is passed
13129 */
13130 if (sndlen == 0) {
13131 if (sinfo_flags & SCTP_EOF) {
13132 got_all_of_the_send = 1;
13133 goto dataless_eof;
13134 } else {
13135 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13136 error = EINVAL;
13137 goto out;
13138 }
13139 }
13140 if (top == NULL) {
13141 struct sctp_stream_queue_pending *sp;
13142 struct sctp_stream_out *strm;
13143 uint32_t sndout;
13144
13145 /*
13146 * XXX: This will change soon, when the TCP send lock is
13147 * retired.
13148 */
13149 SCTP_TCB_UNLOCK(stcb);
13150 hold_tcblock = false;
13151 SCTP_TCB_SEND_LOCK(stcb);
13152 if ((asoc->stream_locked) &&
13153 (asoc->stream_locked_on != srcv->sinfo_stream)) {
13155 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13156 error = EINVAL;
13157 goto out;
13158 }
13159 strm = &asoc->strmout[srcv->sinfo_stream];
13160 if (strm->last_msg_incomplete == 0) {
13161 do_a_copy_in:
13163 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
13164 if (error) {
13165 goto out;
13166 }
13167 SCTP_TCB_SEND_LOCK(stcb);
13168 /* The out streams might be reallocated. */
13169 strm = &asoc->strmout[srcv->sinfo_stream];
13170 if (sp->msg_is_complete) {
13171 strm->last_msg_incomplete = 0;
13172 asoc->stream_locked = 0;
13173 } else {
13174 /*
13175 * Just got locked to this guy in case of an
13176 * interrupt.
13177 */
13178 strm->last_msg_incomplete = 1;
13179 if (asoc->idata_supported == 0) {
13180 asoc->stream_locked = 1;
13181 asoc->stream_locked_on = srcv->sinfo_stream;
13182 }
13183 sp->sender_all_done = 0;
13184 }
13185 sctp_snd_sb_alloc(stcb, sp->length);
13186 atomic_add_int(&asoc->stream_queue_cnt, 1);
13187 if (sinfo_flags & SCTP_UNORDERED) {
13188 SCTP_STAT_INCR(sctps_sends_with_unord);
13189 }
13190 sp->processing = 1;
13191 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
13192 asoc->ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp);
13193 } else {
13194 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
13195 if (sp == NULL) {
13196 /* ???? Huh ??? last msg is gone */
13197#ifdef INVARIANTS
13198 panic("Warning: Last msg marked incomplete, yet nothing left?");
13199#else
13200 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
13201 strm->last_msg_incomplete = 0;
13202#endif
13203 goto do_a_copy_in;
13204 }
13205 if (sp->processing) {
13207 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13208 error = EINVAL;
13209 goto out;
13210 } else {
13211 sp->processing = 1;
13212 }
13213 }
13215 while (uio->uio_resid > 0) {
13216 /* How much room do we have? */
13217 struct mbuf *new_tail, *mm;
13218
13219 inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13220 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
13221 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13222 else
13223 max_len = 0;
13224
13225 if ((max_len > (ssize_t)SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
13226 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
13227 (uio->uio_resid && (uio->uio_resid <= max_len))) {
13228 sndout = 0;
13229 new_tail = NULL;
13230 if (hold_tcblock) {
13231 SCTP_TCB_UNLOCK(stcb);
13232 hold_tcblock = false;
13233 }
13234 mm = sctp_copy_resume(uio, (int)max_len, user_marks_eor, &error, &sndout, &new_tail);
13235 if ((mm == NULL) || error) {
13236 if (mm) {
13237 sctp_m_freem(mm);
13238 }
13239 SCTP_TCB_SEND_LOCK(stcb);
13240 if (((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) &&
13241 ((asoc->state & SCTP_STATE_WAS_ABORTED) == 0) &&
13242 (sp != NULL)) {
13243 sp->processing = 0;
13244 }
13246 goto out;
13247 }
13248 /* Update the mbuf and count */
13249 SCTP_TCB_SEND_LOCK(stcb);
13250 if ((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
13251 (asoc->state & SCTP_STATE_WAS_ABORTED)) {
13252 /*
13253 * we need to get out. Peer probably
13254 * aborted.
13255 */
13256 sctp_m_freem(mm);
13257 if (asoc->state & SCTP_STATE_WAS_ABORTED) {
13258 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13259 error = ECONNRESET;
13260 }
13262 goto out;
13263 }
13264 if (sp->tail_mbuf) {
13265 /* tack it to the end */
13266 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
13267 sp->tail_mbuf = new_tail;
13268 } else {
13269 /* A stolen mbuf */
13270 sp->data = mm;
13271 sp->tail_mbuf = new_tail;
13272 }
13273 sctp_snd_sb_alloc(stcb, sndout);
13274 atomic_add_int(&sp->length, sndout);
13275 if (sinfo_flags & SCTP_SACK_IMMEDIATELY) {
13277 }
13278
13279 /* Did we reach EOR? */
13280 if ((uio->uio_resid == 0) &&
13281 ((user_marks_eor == 0) ||
13282 (sinfo_flags & SCTP_EOF) ||
13283 (user_marks_eor && (sinfo_flags & SCTP_EOR)))) {
13284 sp->msg_is_complete = 1;
13285 } else {
13286 sp->msg_is_complete = 0;
13287 }
13289 }
13290 if (uio->uio_resid == 0) {
13291 /* got it all? */
13292 continue;
13293 }
13294 /* PR-SCTP? */
13295 if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
13296 /*
13297 * This is ugly but we must assure locking
13298 * order
13299 */
13300 if (!hold_tcblock) {
13301 SCTP_TCB_LOCK(stcb);
13302 hold_tcblock = true;
13303 }
13304 sctp_prune_prsctp(stcb, asoc, srcv, (int)sndlen);
13305 inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13306 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
13307 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13308 else
13309 max_len = 0;
13310 if (max_len > 0) {
13311 continue;
13312 }
13313 SCTP_TCB_UNLOCK(stcb);
13314 hold_tcblock = false;
13315 }
13316 /* wait for space now */
13317 if (non_blocking) {
13318 /* Non-blocking io in place out */
13319 SCTP_TCB_SEND_LOCK(stcb);
13320 if (sp != NULL) {
13321 sp->processing = 0;
13322 }
13324 if (!hold_tcblock) {
13325 SCTP_TCB_LOCK(stcb);
13326 hold_tcblock = true;
13327 }
13328 goto skip_out_eof;
13329 }
13330 /* What about the INIT, send it maybe */
13331 if (queue_only_for_init) {
13332 if (!hold_tcblock) {
13333 SCTP_TCB_LOCK(stcb);
13334 hold_tcblock = true;
13335 }
13336 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
13337 /* a collision took us forward? */
13338 queue_only = 0;
13339 } else {
13340 NET_EPOCH_ENTER(et);
13342 NET_EPOCH_EXIT(et);
13344 queue_only = 1;
13345 }
13346 }
13347 if ((net->flight_size > net->cwnd) &&
13348 (asoc->sctp_cmt_on_off == 0)) {
13349 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13350 queue_only = 1;
13351 } else if (asoc->ifp_had_enobuf) {
13352 SCTP_STAT_INCR(sctps_ifnomemqueued);
13353 if (net->flight_size > (2 * net->mtu)) {
13354 queue_only = 1;
13355 }
13356 asoc->ifp_had_enobuf = 0;
13357 }
13358 un_sent = asoc->total_output_queue_size - asoc->total_flight;
13360 (asoc->total_flight > 0) &&
13362 (un_sent < (int)(asoc->smallest_mtu - SCTP_MIN_OVERHEAD))) {
13363 /*-
13364 * Ok, Nagle is set on and we have data outstanding.
13365 * Don't send anything and let SACKs drive out the
13366 * data unless we have a "full" segment to send.
13367 */
13368 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13370 }
13371 SCTP_STAT_INCR(sctps_naglequeued);
13372 nagle_applies = 1;
13373 } else {
13374 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13377 }
13378 SCTP_STAT_INCR(sctps_naglesent);
13379 nagle_applies = 0;
13380 }
13381 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13382 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13383 nagle_applies, un_sent);
13385 asoc->total_flight,
13387 }
13388 if (queue_only_for_init)
13389 queue_only_for_init = 0;
13390 if ((queue_only == 0) && (nagle_applies == 0)) {
13391 /*-
13392 * need to start chunk output
13393 * before blocking.. note that if
13394 * a lock is already applied, then
13395 * the input via the net is happening
13396 * and I don't need to start output :-D
13397 */
13398 NET_EPOCH_ENTER(et);
13399 if (!hold_tcblock) {
13400 if (SCTP_TCB_TRYLOCK(stcb)) {
13401 hold_tcblock = true;
13403 stcb,
13405 }
13406 } else {
13408 stcb,
13410 }
13411 NET_EPOCH_EXIT(et);
13412 }
13413 if (hold_tcblock) {
13414 SCTP_TCB_UNLOCK(stcb);
13415 hold_tcblock = false;
13416 }
13417 SOCKBUF_LOCK(&so->so_snd);
13418 /*-
13419 * This is a bit strange, but I think it will
13420 * work. The total_output_queue_size is locked and
13421 * protected by the TCB_LOCK, which we just released.
13422 * There is a race that can occur between releasing it
13423 * above, and me getting the socket lock, where sacks
13424 * come in but we have not put the SB_WAIT on the
13425 * so_snd buffer to get the wakeup. After the LOCK
13426 * is applied the sack_processing will also need to
13427 * LOCK the so->so_snd to do the actual sowwakeup(). So
13428 * once we have the socket buffer lock if we recheck the
13429 * size we KNOW we will get to sleep safely with the
13430 * wakeup flag in place.
13431 */
13432 inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13433 if (SCTP_SB_LIMIT_SND(so) <= (inqueue_bytes +
13434 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
13435 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13437 asoc, uio->uio_resid);
13438 }
13439 be.error = 0;
13440 stcb->block_entry = &be;
13441 error = sbwait(&so->so_snd);
13442 stcb->block_entry = NULL;
13443
13444 if (error || so->so_error || be.error) {
13445 if (error == 0) {
13446 if (so->so_error)
13447 error = so->so_error;
13448 if (be.error) {
13449 error = be.error;
13450 }
13451 }
13452 SOCKBUF_UNLOCK(&so->so_snd);
13453 SCTP_TCB_SEND_LOCK(stcb);
13454 if (((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) &&
13455 ((asoc->state & SCTP_STATE_WAS_ABORTED) == 0) &&
13456 (sp != NULL)) {
13457 sp->processing = 0;
13458 }
13460 goto out_unlocked;
13461 }
13462
13463 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13465 asoc, asoc->total_output_queue_size);
13466 }
13467 }
13468 SOCKBUF_UNLOCK(&so->so_snd);
13469 SCTP_TCB_SEND_LOCK(stcb);
13470 if ((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
13471 (asoc->state & SCTP_STATE_WAS_ABORTED)) {
13473 goto out_unlocked;
13474 }
13476 }
13477 SCTP_TCB_SEND_LOCK(stcb);
13478 if ((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
13479 (asoc->state & SCTP_STATE_WAS_ABORTED)) {
13481 goto out_unlocked;
13482 }
13483 if (sp) {
13484 if (sp->msg_is_complete == 0) {
13485 strm->last_msg_incomplete = 1;
13486 if (asoc->idata_supported == 0) {
13487 asoc->stream_locked = 1;
13488 asoc->stream_locked_on = srcv->sinfo_stream;
13489 }
13490 } else {
13491 sp->sender_all_done = 1;
13492 strm->last_msg_incomplete = 0;
13493 asoc->stream_locked = 0;
13494 }
13495 sp->processing = 0;
13496 } else {
13497 SCTP_PRINTF("Huh no sp TSNH?\n");
13498 strm->last_msg_incomplete = 0;
13499 asoc->stream_locked = 0;
13500 }
13502 if (uio->uio_resid == 0) {
13503 got_all_of_the_send = 1;
13504 }
13505 if (!hold_tcblock) {
13506 SCTP_TCB_LOCK(stcb);
13507 hold_tcblock = true;
13508 }
13509 } else {
13510 /* We send in a 1, since we do have the stcb lock. */
13511 error = sctp_msg_append(stcb, net, top, srcv, 1);
13512 top = NULL;
13513 if (sinfo_flags & SCTP_EOF) {
13514 got_all_of_the_send = 1;
13515 }
13516 }
13517 if (error != 0) {
13518 goto out;
13519 }
13520
13521dataless_eof:
13522 KASSERT(stcb != NULL, ("stcb is NULL"));
13523 KASSERT(hold_tcblock, ("hold_tcblock is false"));
13525 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0,
13526 ("Association about to be freed"));
13527
13528 /* EOF thing ? */
13529 if ((sinfo_flags & SCTP_EOF) &&
13530 (got_all_of_the_send == 1)) {
13531 SCTP_STAT_INCR(sctps_sends_with_eof);
13532 error = 0;
13533 if (TAILQ_EMPTY(&asoc->send_queue) &&
13534 TAILQ_EMPTY(&asoc->sent_queue) &&
13536 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
13537 goto abort_anyway;
13538 }
13539 /* there is nothing queued to send, so I'm done... */
13543 struct sctp_nets *netp;
13544
13545 /* only send SHUTDOWN the first time through */
13546 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
13547 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13548 }
13551 if (asoc->alternate != NULL) {
13552 netp = asoc->alternate;
13553 } else {
13554 netp = asoc->primary_destination;
13555 }
13556 sctp_send_shutdown(stcb, netp);
13558 netp);
13560 NULL);
13561 }
13562 } else {
13563 /*-
13564 * we still got (or just got) data to send, so set
13565 * SHUTDOWN_PENDING
13566 */
13567 /*-
13568 * XXX sockets draft says that SCTP_EOF should be
13569 * sent with no data. currently, we will allow user
13570 * data to be sent first and move to
13571 * SHUTDOWN-PENDING
13572 */
13576 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
13578 }
13580 if (TAILQ_EMPTY(&asoc->send_queue) &&
13581 TAILQ_EMPTY(&asoc->sent_queue) &&
13583 struct mbuf *op_err;
13584 char msg[SCTP_DIAG_INFO_LEN];
13585
13586 abort_anyway:
13587 if (free_cnt_applied) {
13588 atomic_subtract_int(&asoc->refcnt, 1);
13589 free_cnt_applied = false;
13590 }
13591 SCTP_SNPRINTF(msg, sizeof(msg),
13592 "%s:%d at %s", __FILE__, __LINE__, __func__);
13593 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
13594 msg);
13595 NET_EPOCH_ENTER(et);
13597 op_err, false, SCTP_SO_LOCKED);
13598 NET_EPOCH_EXIT(et);
13599 hold_tcblock = false;
13600 stcb = NULL;
13601 goto out;
13602 }
13604 NULL);
13606 }
13607 }
13608 }
13609
13610skip_out_eof:
13611 KASSERT(stcb != NULL, ("stcb is NULL"));
13612 KASSERT(hold_tcblock, ("hold_tcblock is false"));
13614 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0,
13615 ("Association about to be freed"));
13616
13617 if (!TAILQ_EMPTY(&asoc->control_send_queue)) {
13618 some_on_control = 1;
13619 }
13620 if (queue_only_for_init) {
13621 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
13622 /* a collision took us forward? */
13623 queue_only = 0;
13624 } else {
13625 NET_EPOCH_ENTER(et);
13627 NET_EPOCH_EXIT(et);
13629 queue_only = 1;
13630 }
13631 }
13632 if ((net->flight_size > net->cwnd) &&
13633 (asoc->sctp_cmt_on_off == 0)) {
13634 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13635 queue_only = 1;
13636 } else if (asoc->ifp_had_enobuf) {
13637 SCTP_STAT_INCR(sctps_ifnomemqueued);
13638 if (net->flight_size > (2 * net->mtu)) {
13639 queue_only = 1;
13640 }
13641 asoc->ifp_had_enobuf = 0;
13642 }
13643 un_sent = asoc->total_output_queue_size - asoc->total_flight;
13645 (asoc->total_flight > 0) &&
13647 (un_sent < (int)(asoc->smallest_mtu - SCTP_MIN_OVERHEAD))) {
13648 /*-
13649 * Ok, Nagle is set on and we have data outstanding.
13650 * Don't send anything and let SACKs drive out the
13651 * data unless wen have a "full" segment to send.
13652 */
13653 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13655 }
13656 SCTP_STAT_INCR(sctps_naglequeued);
13657 nagle_applies = 1;
13658 } else {
13659 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13662 }
13663 SCTP_STAT_INCR(sctps_naglesent);
13664 nagle_applies = 0;
13665 }
13666 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13667 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13668 nagle_applies, un_sent);
13670 asoc->total_flight,
13672 }
13673 NET_EPOCH_ENTER(et);
13674 if ((queue_only == 0) && (nagle_applies == 0) && (asoc->peers_rwnd && un_sent)) {
13676 } else if ((queue_only == 0) &&
13677 (asoc->peers_rwnd == 0) &&
13678 (asoc->total_flight == 0)) {
13679 /* We get to have a probe outstanding */
13681 } else if (some_on_control) {
13682 int num_out, reason;
13683
13684 /* Here we do control only */
13685 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out,
13686 &reason, 1, 1, &now, &now_filled,
13687 sctp_get_frag_point(stcb),
13689 }
13690 NET_EPOCH_EXIT(et);
13691 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13692 queue_only, asoc->peers_rwnd, un_sent,
13693 asoc->total_flight, asoc->chunks_on_out_queue,
13694 asoc->total_output_queue_size, error);
13695
13696 KASSERT(stcb != NULL, ("stcb is NULL"));
13697 KASSERT(hold_tcblock, ("hold_tcblock is false"));
13699
13700out:
13701out_unlocked:
13702 if (create_lock_applied) {
13704 }
13705 if (stcb != NULL) {
13706 if (local_soresv) {
13707 atomic_subtract_int(&asoc->sb_send_resv, (int)sndlen);
13708 }
13709 if (hold_tcblock) {
13710 SCTP_TCB_UNLOCK(stcb);
13711 }
13712 if (free_cnt_applied) {
13713 atomic_subtract_int(&asoc->refcnt, 1);
13714 }
13715#ifdef INVARIANTS
13716 if (mtx_owned(&stcb->tcb_mtx)) {
13717 panic("Leaving with tcb mtx owned?");
13718 }
13719 if (mtx_owned(&stcb->tcb_send_mtx)) {
13720 panic("Leaving with tcb send mtx owned?");
13721 }
13722#endif
13723 }
13724 if (top != NULL) {
13725 sctp_m_freem(top);
13726 }
13727 if (control != NULL) {
13728 sctp_m_freem(control);
13729 }
13730 return (error);
13731}
13732
13733/*
13734 * generate an AUTHentication chunk, if required
13735 */
13736struct mbuf *
13737sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13738 struct sctp_auth_chunk **auth_ret, uint32_t *offset,
13739 struct sctp_tcb *stcb, uint8_t chunk)
13740{
13741 struct mbuf *m_auth;
13742 struct sctp_auth_chunk *auth;
13743 int chunk_len;
13744 struct mbuf *cn;
13745
13746 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13747 (stcb == NULL))
13748 return (m);
13749
13750 if (stcb->asoc.auth_supported == 0) {
13751 return (m);
13752 }
13753 /* does the requested chunk require auth? */
13755 return (m);
13756 }
13757 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
13758 if (m_auth == NULL) {
13759 /* no mbuf's */
13760 return (m);
13761 }
13762 /* reserve some space if this will be the first mbuf */
13763 if (m == NULL)
13765 /* fill in the AUTH chunk details */
13766 auth = mtod(m_auth, struct sctp_auth_chunk *);
13767 memset(auth, 0, sizeof(*auth));
13769 auth->ch.chunk_flags = 0;
13770 chunk_len = sizeof(*auth) +
13772 auth->ch.chunk_length = htons(chunk_len);
13773 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13774 /* key id and hmac digest will be computed and filled in upon send */
13775
13776 /* save the offset where the auth was inserted into the chain */
13777 *offset = 0;
13778 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
13779 *offset += SCTP_BUF_LEN(cn);
13780 }
13781
13782 /* update length and return pointer to the auth chunk */
13783 SCTP_BUF_LEN(m_auth) = chunk_len;
13784 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13785 if (auth_ret != NULL)
13786 *auth_ret = auth;
13787
13788 return (m);
13789}
13790
13791#ifdef INET6
13792int
13793sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
13794{
13795 struct nd_prefix *pfx = NULL;
13796 struct nd_pfxrouter *pfxrtr = NULL;
13797 struct sockaddr_in6 gw6;
13798
13799 if (ro == NULL || ro->ro_nh == NULL || src6->sin6_family != AF_INET6)
13800 return (0);
13801
13802 /* get prefix entry of address */
13803 ND6_RLOCK();
13804 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13805 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13806 continue;
13807 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13808 &src6->sin6_addr, &pfx->ndpr_mask))
13809 break;
13810 }
13811 /* no prefix entry in the prefix list */
13812 if (pfx == NULL) {
13813 ND6_RUNLOCK();
13814 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13815 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13816 return (0);
13817 }
13818
13819 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13820 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13821
13822 /* search installed gateway from prefix entry */
13823 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
13824 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13825 gw6.sin6_family = AF_INET6;
13826 gw6.sin6_len = sizeof(struct sockaddr_in6);
13827 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13828 sizeof(struct in6_addr));
13829 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13830 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13831 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13832 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa);
13833 if (sctp_cmpaddr((struct sockaddr *)&gw6, &ro->ro_nh->gw_sa)) {
13834 ND6_RUNLOCK();
13835 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13836 return (1);
13837 }
13838 }
13839 ND6_RUNLOCK();
13840 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13841 return (0);
13842}
13843#endif
13844
13845int
13847{
13848#ifdef INET
13849 struct sockaddr_in *sin, *mask;
13850 struct ifaddr *ifa;
13851 struct in_addr srcnetaddr, gwnetaddr;
13852
13853 if (ro == NULL || ro->ro_nh == NULL ||
13854 sifa->address.sa.sa_family != AF_INET) {
13855 return (0);
13856 }
13857 ifa = (struct ifaddr *)sifa->ifa;
13858 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13859 sin = &sifa->address.sin;
13860 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13861 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13863 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13864
13865 sin = &ro->ro_nh->gw4_sa;
13866 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13867 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13868 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa);
13869 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13870 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
13871 return (1);
13872 }
13873#endif
13874 return (0);
13875}
#define SCTP_UNUSED
Definition: alias_sctp.h:86
__uint32_t uint32_t
Definition: in.h:62
__uint16_t uint16_t
Definition: in.h:57
__uint8_t uint8_t
Definition: in.h:52
#define INADDR_BROADCAST
Definition: in.h:49
#define INADDR_ANY
Definition: in.h:48
#define IPPROTO_UDP
Definition: in.h:46
__sa_family_t sa_family_t
Definition: in.h:77
u_short in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
Definition: in_cksum.c:197
const struct encaptab * cookie
Definition: in_gif.c:396
int prison_check_ip4(const struct ucred *cred, const struct in_addr *ia)
Definition: in_jail.c:322
#define IPV6_FLOWLABEL_MASK
Definition: ip6.h:100
#define IPVERSION
Definition: ip.h:46
#define IP_DF
Definition: ip.h:13
ipfw_dyn_rule * next
Definition: ip_fw.h:0
void ip_fillid(struct ip *ip)
Definition: ip_id.c:243
#define SCTP_PCB_FLAGS_SND_ITERATOR_UP
Definition: sctp.h:508
#define SCTP_COOKIE_ECHO
Definition: sctp.h:441
#define SCTP_DATA_FIRST_FRAG
Definition: sctp.h:488
#define SCTP_ASCONF_ACK
Definition: sctp.h:454
#define SCTP_INITIATION_ACK
Definition: sctp.h:433
#define SCTP_ECN_CWR
Definition: sctp.h:444
#define SCTP_FLIGHT_LOGGING_ENABLE
Definition: sctp.h:608
#define SCTP_HEARTBEAT_ACK
Definition: sctp.h:436
#define SCTP_ASCONF
Definition: sctp.h:466
#define SCTP_PCB_FLAGS_BOUND_V6
Definition: sctp.h:519
#define SCTP_LOG_AT_SEND_2_OUTQ
Definition: sctp.h:630
#define SCTP_PCB_FLAGS_TCPTYPE
Definition: sctp.h:504
#define SCTP_BADCRC
Definition: sctp.h:474
#define SCTP_PCB_FLAGS_SOCKET_ALLGONE
Definition: sctp.h:522
#define SCTP_FORWARD_CUM_TSN
Definition: sctp.h:464
#define SCTP_OPERATION_ERROR
Definition: sctp.h:440
#define SCTP_DATA_UNORDERED
Definition: sctp.h:490
#define SCTP_COOKIE_ACK
Definition: sctp.h:442
#define SCTP_NAGLE_LOGGING_ENABLE
Definition: sctp.h:614
#define SCTP_PACKET_DROPPED
Definition: sctp.h:456
#define SCTP_LOG_TRY_ADVANCE
Definition: sctp.h:631
#define SCTP_SELECTIVE_ACK
Definition: sctp.h:434
#define SCTP_PCB_FLAGS_NODELAY
Definition: sctp.h:541
#define SCTP_LOG_MAXBURST_ENABLE
Definition: sctp.h:622
#define SCTP_BLK_LOGGING_ENABLE
Definition: sctp.h:605
#define SCTP_MOBILITY_BASE
Definition: sctp.h:571
#define SCTP_PCB_FLAGS_IN_TCPPOOL
Definition: sctp.h:515
#define SCTP_CAUSE_UNRESOLVABLE_ADDR
Definition: sctp.h:347
#define SCTP_PCB_FLAGS_SOCKET_GONE
Definition: sctp.h:521
#define SCTP_DATA_NOT_FRAG
Definition: sctp.h:489
#define SCTP_CAUSE_PROTOCOL_VIOLATION
Definition: sctp.h:355
#define SCTP_PCB_FLAGS_EXPLICIT_EOR
Definition: sctp.h:554
#define SCTP_HAD_NO_TCB
Definition: sctp.h:470
#define SCTP_STREAM_RESET
Definition: sctp.h:458
#define SCTP_DATA
Definition: sctp.h:431
#define SCTP_DATA_LAST_FRAG
Definition: sctp.h:487
#define SCTP_LOG_RWND_ENABLE
Definition: sctp.h:623
#define SCTP_PCB_FLAGS_CONNECTED
Definition: sctp.h:514
#define SCTP_AUTHENTICATION
Definition: sctp.h:447
#define SCTP_IDATA
Definition: sctp.h:451
#define SCTP_DATA_SACK_IMMEDIATELY
Definition: sctp.h:491
#define SCTP_SHUTDOWN_ACK
Definition: sctp.h:439
#define SCTP_SHUTDOWN
Definition: sctp.h:438
#define SCTP_SHUTDOWN_COMPLETE
Definition: sctp.h:445
#define SCTP_CAUSE_USER_INITIATED_ABT
Definition: sctp.h:354
#define SCTP_MBUF_LOGGING_ENABLE
Definition: sctp.h:613
#define SCTP_CWND_LOGGING_ENABLE
Definition: sctp.h:607
#define SCTP_CWR_REDUCE_OVERRIDE
Definition: sctp.h:478
#define SCTP_ECN_ECHO
Definition: sctp.h:443
#define SCTP_ABORT_ASSOCIATION
Definition: sctp.h:437
#define SCTP_PCB_FLAGS_NO_FRAGMENT
Definition: sctp.h:553
#define SCTP_HEARTBEAT_REQUEST
Definition: sctp.h:435
#define SCTP_NR_SELECTIVE_ACK
Definition: sctp.h:449
#define SCTP_PACKET_TRUNCATED
Definition: sctp.h:475
#define SCTP_LAST_PACKET_TRACING
Definition: sctp.h:627
#define SCTP_PCB_FLAGS_MULTIPLE_ASCONFS
Definition: sctp.h:556
#define SCTP_PCB_FLAGS_BOUNDALL
Definition: sctp.h:505
#define SCTP_IFORWARD_CUM_TSN
Definition: sctp.h:467
#define SCTP_INITIATION
Definition: sctp.h:432
int sctp_is_addr_pending(struct sctp_tcb *stcb, struct sctp_ifa *sctp_ifa)
Definition: sctp_asconf.c:2328
struct mbuf * sctp_compose_asconf(struct sctp_tcb *stcb, int *retlen, int addr_locked)
Definition: sctp_asconf.c:2554
int sctp_serialize_auth_chunks(const sctp_auth_chklist_t *list, uint8_t *ptr)
Definition: sctp_auth.c:160
uint32_t sctp_get_hmac_digest_len(uint16_t hmac_algo)
Definition: sctp_auth.c:823
size_t sctp_auth_get_chklist_size(const sctp_auth_chklist_t *list)
Definition: sctp_auth.c:147
int sctp_serialize_hmaclist(sctp_hmaclist_t *list, uint8_t *ptr)
Definition: sctp_auth.c:749
void sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t key_id)
Definition: sctp_auth.c:547
void sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset, struct sctp_auth_chunk *auth, struct sctp_tcb *stcb, uint16_t keyid)
Definition: sctp_auth.c:1502
uint32_t sctp_get_auth_chunk_len(uint16_t hmac_algo)
Definition: sctp_auth.c:814
uint32_t sctp_hmac_m(uint16_t hmac_algo, uint8_t *key, uint32_t keylen, struct mbuf *m, uint32_t m_offset, uint8_t *digest, uint32_t trailer)
Definition: sctp_auth.c:975
#define SCTP_AUTH_RANDOM_SIZE_DEFAULT
Definition: sctp_auth.h:49
#define sctp_auth_is_required_chunk(chunk, list)
Definition: sctp_auth.h:101
struct mbuf * sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, int how, int allonebuf, int type)
#define SCTP_STR_RESET_OUT_REQUEST
#define IN4_ISPRIVATE_ADDRESS(a)
#define SCTP_BLOCK_LOG_INTO_BLK
#define SCTP_DIAG_INFO_LEN
#define SCTP_DEBUG_OUTPUT1
#define SCTP_SUPPORTED_CHUNK_EXT
#define SCTP_CHUNK_LIST
#define SCTP_STATE_INUSE
#define SCTP_SECRET_SIZE
#define SCTP_NAGLE_APPLIED
#define SCTP_COUNT_LIMIT
#define SCTP_FIRST_MBUF_RESV
#define SCTP_STATE_COOKIE_ECHOED
#define SCTP_LOC_1
#define SCTP_ADD_IP_ADDRESS
#define SCTP_DATAGRAM_NR_ACKED
#define SCTP_SIZE32(x)
#define SCTP_HAS_NAT_SUPPORT
#define SCTP_STATE_WAS_ABORTED
#define SCTP_PRSCTP_SUPPORTED
#define SCTP_BLOCK_LOG_INTO_BLKA
#define SCTP_NORMAL_PROC
#define SCTP_ULP_ADAPTATION
#define SCTP_ADD_SUBSTATE(_stcb, _substate)
#define SCTP_TSN_GT(a, b)
#define SCTP_MAX_BURST_APPLIED
#define SCTP_OUTPUT_FROM_T3
#define SCTP_HOSTNAME_ADDRESS
#define SCTP_VERSION_STRING
#define SCTP_DATAGRAM_RESEND
#define SCTP_DEBUG_OUTPUT4
#define SCTP_FWD_TSN_CHECK
#define SCTP_SUCCESS_REPORT
#define SCTP_SO_NOT_LOCKED
#define SCTP_CWNDLOG_PRESEND
#define SCTP_OUTPUT_FROM_USR_SEND
#define SCTP_OUTPUT_FROM_COOKIE_ACK
#define SCTP_DEL_IP_ADDRESS
#define SCTP_STATE_COOKIE
#define SCTP_DEFAULT_ADD_MORE
#define SCTP_SIGNATURE_SIZE
#define SCTP_ADDR_PF
#define SCTP_TIMER_TYPE_INIT
#define SCTP_STR_RESET_IN_REQUEST
#define SCTP_ECN_CAPABLE
#define SCTP_CWND_LOG_FROM_SEND
#define SCTP_SEND_NOW_COMPLETES
#define SCTP_MAX_DATA_BUNDLING
#define SCTP_ADDR_NO_PMTUD
#define IN4_ISLOOPBACK_ADDRESS(a)
#define SCTP_STATE_SHUTDOWN_PENDING
#define SCTP_NAGLE_SKIPPED
#define SCTP_ADDR_UNCONFIRMED
#define SCTP_FLIGHT_LOG_UP
#define SCTP_TIMER_TYPE_STRRESET
#define SCTP_DECREASE_PEER_RWND
#define SCTP_TIMER_TYPE_RECV
#define SCTP_DONOT_SETSCOPE
#define SCTP_LOC_6
#define SCTP_FROM_SCTP_OUTPUT
#define SCTP_RECV_BUFFER_SPLITTING
#define SCTP_LOC_5
#define SCTP_LOC_4
#define SCTP_STATE_SHUTDOWN_ACK_SENT
#define PR_SCTP_UNORDERED_FLAG
#define SCTP_TIMER_TYPE_SHUTDOWNGUARD
#define SCTP_GET_STATE(_stcb)
#define SCTP_ADDR_IS_CONFIRMED
#define SCTP_GETTIME_TIMEVAL(x)
#define MAX_TSN
#define SCTP_LOC_2
#define SCTP_LOC_3
#define SCTP_STR_RESET_ADD_IN_STREAMS
#define SCTP_DATAGRAM_UNSENT
#define SCTP_RETRAN_DONE
#define SCTP_RETRAN_EXIT
#define SCTP_OUTPUT_FROM_HB_TMR
#define SCTP_DEBUG_OUTPUT3
#define SCTP_MBUF_ICOPY
#define SCTP_COOKIE_PRESERVE
#define SCTP_MAX_CHUNK_LENGTH
#define SCTP_IPV4_ADDRESS
#define SCTP_STATE_COOKIE_WAIT
#define SCTP_DATAGRAM_SENT
#define SCTP_TIMER_TYPE_SHUTDOWN
#define SCTP_ADDR_OUT_OF_SCOPE
#define SCTP_STATE_SHUTDOWN_SENT
#define SCTP_STR_RESET_TSN_REQUEST
#define SCTP_STATE_SHUTDOWN_RECEIVED
#define SCTP_SET_PRIM_ADDR
#define SCTP_CWND_LOG_FILL_OUTQ_CALLED
#define SCTP_STR_RESET_ADD_OUT_STREAMS
#define SCTP_BLOCK_LOG_OUTOF_BLK
#define SCTP_STATE_ABOUT_TO_BE_FREED
#define SCTP_IPV6_ADDRESS
#define SCTP_ADDR_REACHABLE
#define SCTP_NOTIFY_INTERFACE_DOWN
#define SCTP_ADDRESS_LIMIT
#define SCTP_STATE_OPEN
#define SCTP_CWND_LOG_FROM_RESEND
#define SCTP_SET_STATE(_stcb, _state)
#define SCTP_DEBUG_USRREQ1
#define SCTP_SO_LOCKED
#define SCTP_MAX_BURST_ERROR_STOP
#define SCTP_TIMER_TYPE_ASCONF
#define SCTP_STR_RESET_RESPONSE
#define SCTP_ERROR_CAUSE_IND
#define SCTP_FORWARD_TSN_SKIP
#define SCTP_ECT0_BIT
#define SCTP_SUPPORTED_ADDRTYPE
#define SCTP_TIMER_TYPE_SEND
#define SCTP_HEARTBEAT_INFO
#define SCTP_FLIGHT_LOG_UP_RSND
#define SCTP_SEND_BUFFER_SPLITTING
#define SCTP_HMAC_LIST
#define SCTP_MINIMAL_RWND
#define SCTP_RANDOM
#define SCTP_UNRECOG_PARAM
#define SCTP_TIMER_TYPE_COOKIE
#define SCTP_HMAC
#define SCTP_STATE_PARTIAL_MSG_LEFT
#define SCTP_STRMOUT_LOG_SEND
#define SCTP_DEBUG_OUTPUT2
#define IPPROTO_SCTP
uint32_t sctp_calculate_cksum(struct mbuf *m, int32_t offset)
Definition: sctp_crc32.c:93
#define SCTP_MED_OVERHEAD
Definition: sctp_header.h:552
#define SCTP_MIN_OVERHEAD
Definition: sctp_header.h:556
#define SCTP_RESERVE_SPACE
Definition: sctp_header.h:183
#define SCTP_MAX_ADDR_PARAMS_SIZE
Definition: sctp_header.h:79
#define SCTP_MAX_OVERHEAD
Definition: sctp_header.h:546
#define SCTP_RANDOM_MAX_SIZE
Definition: sctp_header.h:499
#define sctp_init_ack_chunk
Definition: sctp_header.h:235
#define SCTP_MAX_SUPPORTED_EXT
Definition: sctp_header.h:128
#define SCTP_MIN_V4_OVERHEAD
Definition: sctp_header.h:566
void sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
Definition: sctp_indata.c:72
void sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
Definition: sctp_indata.c:2395
int sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked)
Definition: sctp_input.c:175
#define SCTP_PROBE5(probe, arg0, arg1, arg2, arg3, arg4)
Definition: sctp_kdtrace.h:51
#define SCTP_TCB_LOCK(_tcb)
#define SCTP_IPI_ADDR_RLOCK()
#define SCTP_INP_RLOCK(_inp)
#define SCTP_INP_INFO_RLOCK()
Definition: sctp_lock_bsd.h:94
#define SCTP_ASOC_CREATE_UNLOCK(_inp)
#define SCTP_IPI_ADDR_RUNLOCK()
#define SCTP_INP_WUNLOCK(_inp)
#define SCTP_TCB_TRYLOCK(_tcb)
#define SCTP_TCB_SEND_UNLOCK(_tcb)
#define SCTP_INP_DECR_REF(_inp)
#define SCTP_INP_INFO_RUNLOCK()
#define SCTP_INP_WLOCK(_inp)
#define SCTP_TCB_LOCK_ASSERT(_tcb)
#define SCTP_INP_INCR_REF(_inp)
#define SCTP_TCB_UNLOCK(_tcb)
#define SCTP_ASOC_CREATE_LOCK(_inp)
#define SCTP_TCB_SEND_LOCK(_tcb)
#define SCTP_INP_RUNLOCK(_inp)
#define SCTP_LTRACE_ERR_RET(inp, stcb, net, file, err)
Definition: sctp_os_bsd.h:191
#define SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, file, err)
Definition: sctp_os_bsd.h:190
#define SCTP_IFN_IS_IFT_LOOP(ifn)
Definition: sctp_os_bsd.h:205
#define SCTP_BUF_AT(m, size)
Definition: sctp_os_bsd.h:294
#define SCTP_GET_IFN_VOID_FROM_ROUTE(ro)
Definition: sctp_os_bsd.h:212
#define SCTP_GET_HEADER_FOR_OUTPUT(o_pak)
Definition: sctp_os_bsd.h:339
#define MODULE_GLOBAL(__SYMBOL)
Definition: sctp_os_bsd.h:142
#define SCTP_HEADER_TO_CHAIN(m)
Definition: sctp_os_bsd.h:336
#define SCTP_SNPRINTF(...)
Definition: sctp_os_bsd.h:303
#define SCTP_ENABLE_UDP_CSUM(m)
Definition: sctp_os_bsd.h:342
#define SCTP_OS_TIMER_PENDING
Definition: sctp_os_bsd.h:278
struct route sctp_route_t
Definition: sctp_os_bsd.h:396
#define SCTP_BUF_RESV_UF(m, size)
Definition: sctp_os_bsd.h:293
#define SCTP_BUF_PREPEND
Definition: sctp_os_bsd.h:299
#define SCTP_GET_IF_INDEX_FROM_ROUTE(ro)
Definition: sctp_os_bsd.h:213
#define SCTP_READ_RANDOM(buf, len)
Definition: sctp_os_bsd.h:446
#define SCTP_MALLOC(var, type, size, name)
Definition: sctp_os_bsd.h:219
#define SCTP_IPV6_V6ONLY(sctp_inpcb)
Definition: sctp_os_bsd.h:368
#define SCTP_RELEASE_PKT(m)
Definition: sctp_os_bsd.h:341
#define SCTP_PRINTF(params...)
Definition: sctp_os_bsd.h:151
#define SCTP_ATTACH_CHAIN(pak, m, packet_length)
Definition: sctp_os_bsd.h:350
#define SCTP_RTALLOC(ro, vrf_id, fibnum)
Definition: sctp_os_bsd.h:398
#define SCTP_ALIGN_TO_END(m, len)
Definition: sctp_os_bsd.h:301
#define SCTP_ROUTE_IS_REAL_LOOP(ro)
Definition: sctp_os_bsd.h:206
#define SCTP_GATHER_MTU_FROM_ROUTE(sctp_ifa, sa, nh)
Definition: sctp_os_bsd.h:317
#define SCTP_GET_HLIM(inp, ro)
Definition: sctp_os_bsd.h:365
#define SCTP_FREE(var, type)
Definition: sctp_os_bsd.h:224
#define SCTP_BUF_LEN(m)
Definition: sctp_os_bsd.h:290
#define SCTP_BUF_IS_EXTENDED(m)
Definition: sctp_os_bsd.h:295
#define SCTP_HEADER_LEN(m)
Definition: sctp_os_bsd.h:338
#define SCTP_M_COPYM
Definition: sctp_os_bsd.h:261
#define SCTPDBG(level, params...)
Definition: sctp_os_bsd.h:170
#define SCTP_SO_IS_NBIO(so)
Definition: sctp_os_bsd.h:370
#define SCTP_IP_OUTPUT(result, o_pak, ro, _inp, vrf_id)
Definition: sctp_os_bsd.h:413
#define SCTP_IS_LISTENING(inp)
Definition: sctp_os_bsd.h:480
#define SCTP_BASE_SYSCTL(__m)
Definition: sctp_os_bsd.h:148
#define SCTPDBG_ADDR(level, addr)
Definition: sctp_os_bsd.h:171
#define SCTP_BUF_NEXT(m)
Definition: sctp_os_bsd.h:291
#define SCTP_SB_LIMIT_RCV(so)
Definition: sctp_os_bsd.h:390
#define SCTP_BUF_NEXT_PKT(m)
Definition: sctp_os_bsd.h:292
#define SCTP_IP6_OUTPUT(result, o_pak, ro, ifp, _inp, vrf_id)
Definition: sctp_os_bsd.h:425
#define SCTP_SB_LIMIT_SND(so)
Definition: sctp_os_bsd.h:391
static int sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
Definition: sctp_output.c:3457
static void sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk, int number_entries, uint16_t *list, uint32_t seq)
static uint32_t sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net, struct sctp_stream_out *strq, uint32_t space_left, uint32_t frag_point, int *giveup, int eeor_mode, int *bail, int so_locked)
Definition: sctp_output.c:7149
int sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
Definition: sctp_output.c:2407
void sctp_send_shutdown_complete(struct sctp_tcb *stcb, struct sctp_nets *net, int reflect_vtag)
int sctp_lower_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *i_pak, struct mbuf *control, int flags, struct sctp_sndrcvinfo *srcv, struct thread *p)
static int sctp_chunk_retransmission(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_association *asoc, int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked)
Definition: sctp_output.c:9413
void sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
Definition: sctp_output.c:9163
static int sctp_msg_append(struct sctp_tcb *stcb, struct sctp_nets *net, struct mbuf *m, struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
Definition: sctp_output.c:6318
int sctp_send_cookie_echo(struct mbuf *m, int offset, int limit, struct sctp_tcb *stcb, struct sctp_nets *net)
Definition: sctp_output.c:8973
static struct mbuf * sctp_copy_out_all(struct uio *uio, ssize_t len)
Definition: sctp_output.c:6791
void sctp_send_cookie_ack(struct sctp_tcb *stcb)
Definition: sctp_output.c:9116
static struct sctp_ifa * sctp_choose_boundspecific_inp(struct sctp_inpcb *inp, sctp_route_t *ro, uint32_t vrf_id, int non_asoc_addr_ok, uint8_t dest_is_priv, uint8_t dest_is_loop, sa_family_t fam)
Definition: sctp_output.c:2450
static struct mbuf * sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
Definition: sctp_output.c:1940
static int sctp_med_chunk_output(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_association *asoc, int *num_out, int *reason_code, int control_only, int from_where, struct timeval *now, int *now_filled, uint32_t frag_point, int so_locked)
Definition: sctp_output.c:7797
static int sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, struct sockaddr *to, struct mbuf *m, uint32_t auth_offset, struct sctp_auth_chunk *auth, uint16_t auth_keyid, int nofragment_flag, int ecn_ok, int out_of_asoc_ok, uint16_t src_port, uint16_t dest_port, uint32_t v_tag, uint16_t port, union sctp_sockstore *over_addr, uint8_t mflowtype, uint32_t mflowid, int so_locked)
Definition: sctp_output.c:3956
int sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
void sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
uint32_t sctp_get_frag_point(struct sctp_tcb *stcb)
Definition: sctp_output.c:6219
static void sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk, uint32_t seq)
struct mbuf * sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end, struct sctp_auth_chunk **auth_ret, uint32_t *offset, struct sctp_tcb *stcb, uint8_t chunk)
static struct sctp_ifa * sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn, struct sctp_inpcb *inp, struct sctp_tcb *stcb, int non_asoc_addr_ok, uint8_t dest_is_loop, uint8_t dest_is_priv, int addr_wanted, sa_family_t fam, sctp_route_t *ro)
Definition: sctp_output.c:2769
static void sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
Definition: sctp_output.c:6264
int sctp_output(struct sctp_inpcb *inp, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p, int flags)
void send_forward_tsn(struct sctp_tcb *stcb, struct sctp_association *asoc)
static struct mbuf * sctp_copy_mbufchain(struct mbuf *clonechain, struct mbuf *outchain, struct mbuf **endofchain, int can_take_mbuf, int sizeofcpy, uint8_t copy_by_ref)
Definition: sctp_output.c:6419
struct mbuf * sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_scoping *scope, struct mbuf *m_at, int cnt_inits_to, uint16_t *padding_len, uint16_t *chunk_len)
Definition: sctp_output.c:2027
struct sctp_ifa * sctp_source_address_selection(struct sctp_inpcb *inp, struct sctp_tcb *stcb, sctp_route_t *ro, struct sctp_nets *net, int non_asoc_addr_ok, uint32_t vrf_id)
Definition: sctp_output.c:3297
const struct sack_track sack_array[256]
Definition: sctp_output.c:71
static struct sctp_ifa * sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, struct sctp_tcb *stcb, sctp_route_t *ro, uint32_t vrf_id, uint8_t dest_is_priv, uint8_t dest_is_loop, int non_asoc_addr_ok, sa_family_t fam)
Definition: sctp_output.c:2582
void sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk, uint32_t resp_seq, uint32_t result, uint32_t send_una, uint32_t recv_next)
static void sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, uint32_t val SCTP_UNUSED)
Definition: sctp_output.c:6582
static void sctp_prune_prsctp(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_sndrcvinfo *srcv, int dataout)
Definition: sctp_output.c:6138
static struct sctp_ifa * sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa, uint8_t dest_is_loop, uint8_t dest_is_priv, sa_family_t fam)
Definition: sctp_output.c:2321
void sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
Definition: sctp_output.c:6912
void sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
Definition: sctp_output.c:8909
struct mbuf * sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt, int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly, int *cookie_found)
Definition: sctp_output.c:4901
static void sctp_fill_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t frag_point, int eeor_mode, int *quit_now, int so_locked)
Definition: sctp_output.c:7682
static int sctp_copy_one(struct sctp_stream_queue_pending *sp, struct uio *uio, int resv_upfront)
static int sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, struct sctp_sndrcvinfo *srcv)
Definition: sctp_output.c:6833
void sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net, struct mbuf *m, int len, int iphlen, int bad_crc)
int sctp_send_str_reset_req(struct sctp_tcb *stcb, uint16_t number_entries, uint16_t *list, uint8_t send_in_req, uint8_t send_tsn_req, uint8_t add_stream, uint16_t adding_o, uint16_t adding_i, uint8_t peer_asked)
static int sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk, uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
static struct mbuf * sctp_copy_resume(struct uio *uio, int max_send_len, int user_marks_eor, int *error, uint32_t *sndout, struct mbuf **new_tail)
int sctp_is_address_in_scope(struct sctp_ifa *ifa, struct sctp_scoping *scope, int do_update)
Definition: sctp_output.c:1867
static struct sctp_ifa * sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa, uint8_t dest_is_loop, uint8_t dest_is_priv, sa_family_t fam)
Definition: sctp_output.c:2221
void sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked)
void sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *src_net, struct mbuf *init_pkt, int iphlen, int offset, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, struct sctp_init_chunk *init_chk, uint8_t mflowtype, uint32_t mflowid, uint32_t vrf_id, uint16_t port)
Definition: sctp_output.c:5457
static struct sctp_stream_queue_pending * sctp_copy_it_in(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_sndrcvinfo *srcv, struct uio *uio, struct sctp_nets *net, ssize_t max_send_len, int user_marks_eor, int *error)
void sctp_fix_ecn_echo(struct sctp_association *asoc)
Definition: sctp_output.c:7757
void sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked)
Definition: sctp_output.c:4615
void sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
Definition: sctp_output.c:9271
void sctp_toss_old_asconf(struct sctp_tcb *stcb)
Definition: sctp_output.c:6930
static uint32_t sctp_can_we_split_this(struct sctp_tcb *stcb, uint32_t length, uint32_t space_left, uint32_t frag_point, int eeor_on)
Definition: sctp_output.c:7090
void sctp_send_deferred_reset_response(struct sctp_tcb *stcb, struct sctp_stream_reset_list *ent, int response)
void sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk, uint32_t resp_seq, uint32_t result)
__FBSDID("$FreeBSD$")
void sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
Definition: sctp_output.c:7769
static void sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked)
Definition: sctp_output.c:7051
static void sctp_clean_up_datalist(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_tmit_chunk **data_list, int bundle_at, struct sctp_nets *net)
Definition: sctp_output.c:6959
int sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
Definition: sctp_output.c:2430
void sctp_send_sack(struct sctp_tcb *stcb, int so_locked)
void sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, uint32_t vtag, struct mbuf *cause, uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, uint32_t vrf_id, uint16_t port)
void sctp_send_heartbeat_ack(struct sctp_tcb *stcb, struct mbuf *m, int offset, int chk_length, struct sctp_nets *net)
Definition: sctp_output.c:9061
static int sctp_count_num_preferred_boundall(struct sctp_ifn *ifn, struct sctp_inpcb *inp, struct sctp_tcb *stcb, int non_asoc_addr_ok, uint8_t dest_is_loop, uint8_t dest_is_priv, sa_family_t fam)
Definition: sctp_output.c:2887
static int sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
Definition: sctp_output.c:3558
static uint8_t sctp_get_ect(struct sctp_tcb *stcb)
Definition: sctp_output.c:3900
#define SCTP_MAX_GAPS_INARRAY
Definition: sctp_output.c:62
int sctp_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *p)
static void sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
Definition: sctp_output.c:6765
void sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked)
void sctp_send_asconf_ack(struct sctp_tcb *stcb)
Definition: sctp_output.c:9321
void sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
Definition: sctp_output.c:9206
static void sctp_add_an_in_stream(struct sctp_tmit_chunk *chk, uint32_t seq, uint16_t adding)
void sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, uint32_t vrf_id, uint16_t port)
static bool sctp_are_there_new_addresses(struct sctp_association *asoc, struct mbuf *in_initpkt, int offset, int limit, struct sockaddr *src, struct mbuf **op_err)
Definition: sctp_output.c:5242
static void sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, uint32_t vtag, uint8_t type, struct mbuf *cause, uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, uint32_t vrf_id, uint16_t port)
void sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, uint32_t vtag, struct mbuf *cause, uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, uint32_t vrf_id, uint16_t port)
static struct sctp_ifa * sctp_choose_boundall(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, sctp_route_t *ro, uint32_t vrf_id, uint8_t dest_is_priv, uint8_t dest_is_loop, int non_asoc_addr_ok, sa_family_t fam)
Definition: sctp_output.c:2945
void sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn)
static void sctp_add_an_out_stream(struct sctp_tmit_chunk *chk, uint32_t seq, uint16_t adding)
static struct mbuf * sctp_add_cookie(struct mbuf *init, int init_offset, struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
Definition: sctp_output.c:3815
int sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked)
static void sctp_timer_validation(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_association *asoc)
Definition: sctp_output.c:9935
void sctp_chunk_output(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_where, int so_locked)
Definition: sctp_output.c:9960
int sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
#define SCTP_DATA_CHUNK_OVERHEAD(stcb)
Definition: sctp_output.h:130
int sctp_is_address_on_local_host(struct sockaddr *addr, uint32_t vrf_id)
Definition: sctp_pcb.c:3703
struct sctp_tcb * sctp_aloc_assoc_connected(struct sctp_inpcb *inp, struct sockaddr *firstaddr, int *error, uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id, uint16_t o_streams, uint16_t port, struct thread *p, int initialize_auth_params)
Definition: sctp_pcb.c:4418
void sctp_free_ifa(struct sctp_ifa *sctp_ifap)
Definition: sctp_pcb.c:270
struct sctp_tcb * sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
Definition: sctp_pcb.c:1583
int sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr, struct sctp_nets **netp, uint16_t port, int set_scope, int from)
Definition: sctp_pcb.c:3721
struct sctp_ifn * sctp_find_ifn(void *ifn, uint32_t ifn_index)
Definition: sctp_pcb.c:193
int sctp_initiate_iterator(inp_func inpf, asoc_func af, inp_func inpe, uint32_t pcb_state, uint32_t pcb_features, uint32_t asoc_state, void *argp, uint32_t argi, end_func ef, struct sctp_inpcb *s_inp, uint8_t chunk_output_off)
Definition: sctp_pcb.c:7007
struct sctp_nets * sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr)
Definition: sctp_pcb.c:3690
int sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfree, int from_location)
Definition: sctp_pcb.c:4689
struct sctp_tcb * sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote, struct sctp_nets **netp, struct sockaddr *local, struct sctp_tcb *locked_tcb)
Definition: sctp_pcb.c:1259
void sctp_add_local_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
Definition: sctp_pcb.c:5477
int sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa, struct sctp_nets *net)
Definition: sctp_pcb.c:6683
struct sctp_vrf * sctp_find_vrf(uint32_t vrf_id)
Definition: sctp_pcb.c:216
#define SCTP_INITIALIZE_AUTH_PARAMS
Definition: sctp_pcb.h:568
#define SCTP_ADDR_DEFER_USE
Definition: sctp_pcb.h:95
#define SCTP_ADDR_IFA_UNUSEABLE
Definition: sctp_pcb.h:96
#define SCTP_BEING_DELETED
Definition: sctp_pcb.h:94
#define CHUNK_FLAGS_FRAGMENT_OK
Definition: sctp_structs.h:415
#define SCTP_PCB_ANY_FLAGS
Definition: sctp_structs.h:101
#define SCTP_MAX_STREAMS_AT_ONCE_RESET
Definition: sctp_structs.h:633
#define SCTP_STREAM_OPEN
Definition: sctp_structs.h:605
#define SCTP_STREAM_OPENING
Definition: sctp_structs.h:604
#define SCTP_ASOC_ANY_STATE
Definition: sctp_structs.h:103
#define SCTP_STREAM_RESET_IN_FLIGHT
Definition: sctp_structs.h:607
#define SCTP_STREAM_RESET_PENDING
Definition: sctp_structs.h:606
#define SCTP_STREAM_CLOSED
Definition: sctp_structs.h:603
#define CHUNK_FLAGS_PR_SCTP_TTL
Definition: sctp_structs.h:410
#define CHUNK_FLAGS_PR_SCTP_BUF
Definition: sctp_structs.h:411
#define SCTP_PCB_ANY_FEATURES
Definition: sctp_structs.h:102
#define CHUNK_FLAGS_PR_SCTP_RTX
Definition: sctp_structs.h:412
#define SCTP_TSN_LOG_SIZE
Definition: sctp_structs.h:654
struct sctp_nets * sctp_find_alternate_net(struct sctp_tcb *stcb, struct sctp_nets *net, int mode)
Definition: sctp_timer.c:171
#define PR_SCTP_ENABLED(x)
Definition: sctp_uio.h:268
#define SCTP_UNORDERED
Definition: sctp_uio.h:246
#define SCTP_STAT_INCR_BY(_x, _d)
Definition: sctp_uio.h:1131
#define SCTP_AUTHINFO
Definition: sctp_uio.h:86
#define SCTP_ABORT
Definition: sctp_uio.h:245
#define SCTP_ADDR_CONFIRMED
Definition: sctp_uio.h:347
#define SCTP_SNDINFO
Definition: sctp_uio.h:82
#define SCTP_SENDALL
Definition: sctp_uio.h:248
#define PR_SCTP_INVALID_POLICY(x)
Definition: sctp_uio.h:273
#define PR_SCTP_BUF_ENABLED(x)
Definition: sctp_uio.h:271
#define SCTP_DSTADDRV6
Definition: sctp_uio.h:88
#define SCTP_EOF
Definition: sctp_uio.h:244
uint32_t sctp_assoc_t
Definition: sctp_uio.h:48
#define SCTP_INIT
Definition: sctp_uio.h:79
#define INVALID_SINFO_FLAG(x)
Definition: sctp_uio.h:252
#define SCTP_STAT_INCR_COUNTER64(_x)
Definition: sctp_uio.h:1136
#define SCTP_PR_SCTP_MAX
Definition: sctp_uio.h:264
#define SCTP_PR_SCTP_TTL
Definition: sctp_uio.h:260
#define SCTP_SNDRCV
Definition: sctp_uio.h:80
#define SCTP_PR_SCTP_NONE
Definition: sctp_uio.h:259
#define SCTP_SACK_IMMEDIATELY
Definition: sctp_uio.h:250
#define PR_SCTP_POLICY(x)
Definition: sctp_uio.h:267
#define SCTP_EOR
Definition: sctp_uio.h:249
#define SCTP_STAT_INCR(_x)
Definition: sctp_uio.h:1125
#define SCTP_PRINFO
Definition: sctp_uio.h:85
#define SCTP_ALL_ASSOC
Definition: sctp_uio.h:52
#define SCTP_DSTADDRV4
Definition: sctp_uio.h:87
#define SCTP_STAT_DECR_GAUGE32(_x)
Definition: sctp_uio.h:1140
#define SCTP_ADDR_OVER
Definition: sctp_uio.h:247
void sctp_pathmtu_adjustment(struct sctp_tcb *stcb, uint32_t mtu, bool resend)
Definition: sctp_usrreq.c:109
#define sctp_free_a_chunk(_stcb, _chk, _so_locked)
Definition: sctp_var.h:140
#define sctp_free_a_strmoq(_stcb, _strmoq, _so_locked)
Definition: sctp_var.h:122
#define sctp_is_feature_on(inp, feature)
Definition: sctp_var.h:49
#define sctp_is_feature_off(inp, feature)
Definition: sctp_var.h:50
#define sctp_feature_off(inp, feature)
Definition: sctp_var.h:48
#define sctp_total_flight_increase(stcb, tp1)
Definition: sctp_var.h:308
#define sctp_is_mobility_feature_on(inp, feature)
Definition: sctp_var.h:81
#define sctp_free_remote_addr(__net)
Definition: sctp_var.h:184
#define sctp_ucount_decr(val)
Definition: sctp_var.h:228
#define sctp_alloc_a_chunk(_stcb, _chk)
Definition: sctp_var.h:166
#define sctp_alloc_a_strmoq(_stcb, _strmoq)
Definition: sctp_var.h:131
#define sctp_sbspace_sub(a, b)
Definition: sctp_var.h:90
#define sctp_flight_size_increase(tp1)
Definition: sctp_var.h:254
void sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
Definition: sctputil.c:489
struct mbuf * sctp_add_pad_tombuf(struct mbuf *m, int padlen)
Definition: sctputil.c:3084
uint32_t sctp_calculate_len(struct mbuf *m)
Definition: sctputil.c:2896
void sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t from)
Definition: sctputil.c:2615
void sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net)
Definition: sctputil.c:2157
void sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
Definition: sctputil.c:398
struct sctp_paramhdr * sctp_get_next_param(struct mbuf *m, int offset, struct sctp_paramhdr *pull, int pull_limit)
Definition: sctputil.c:3073
void sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
Definition: sctputil.c:427
struct mbuf * sctp_generate_cause(uint16_t code, char *info)
Definition: sctputil.c:4951
void sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, uint32_t error, void *data, int so_locked)
Definition: sctputil.c:4042
uint32_t sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
Definition: sctputil.c:1049
void sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct mbuf *op_err, bool timedout, int so_locked)
Definition: sctputil.c:4465
void sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
Definition: sctputil.c:548
uint32_t sctp_select_initial_TSN(struct sctp_pcb *inp)
Definition: sctputil.c:1011
void sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
Definition: sctputil.c:315
int sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
Definition: sctputil.c:4676
caddr_t sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
Definition: sctputil.c:3033
void sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
Definition: sctputil.c:163
struct mbuf * sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
Definition: sctputil.c:3115
int sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, uint8_t sent, int so_locked)
Definition: sctputil.c:5031
void sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
Definition: sctputil.c:853
int sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag)
Definition: sctputil.c:4578
#define sctp_snd_sb_alloc(stcb, sz)
Definition: sctputil.h:289
#define sctp_m_freem
Definition: sctputil.h:55
#define sctp_m_free
Definition: sctputil.h:54
uint8_t id
Definition: sctp_structs.h:418
uint8_t can_take_data
Definition: sctp_structs.h:419
Definition: in.h:83
in_addr_t s_addr
Definition: in.h:84
Definition: in_pcb.h:217
struct ucred * inp_cred
Definition: in_pcb.h:258
u_char inp_ip_ttl
Definition: in_pcb.h:261
u_char inp_ip_tos
Definition: in_pcb.h:278
Definition: ip6.h:74
struct in6_addr ip6_dst
Definition: ip6.h:85
struct in6_addr ip6_src
Definition: ip6.h:84
Definition: ip.h:51
u_char ip_p
Definition: ip.h:69
struct in_addr ip_src ip_dst
Definition: ip.h:71
u_char ip_tos
Definition: ip.h:60
u_char ip_hl
Definition: ip.h:53
u_short ip_sum
Definition: ip.h:70
u_short ip_len
Definition: ip.h:61
u_char ip_v
Definition: ip.h:54
u_char ip_ttl
Definition: ip.h:68
u_short ip_off
Definition: ip.h:63
uint8_t left_edge
Definition: sctp_output.c:65
uint8_t right_edge
Definition: sctp_output.c:64
struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY]
Definition: sctp_output.c:68
uint8_t num_entries
Definition: sctp_output.c:66
uint8_t spare
Definition: sctp_output.c:67
struct sctp_chunkhdr ch
Definition: sctp_header.h:290
struct sctp_nets * last_sent_to
Definition: sctp_structs.h:774
struct mbuf * data
Definition: sctp_structs.h:775
uint32_t serial_number
Definition: sctp_header.h:361
uint16_t peer_hmac_id
uint32_t cookie_preserve_req
Definition: sctp_structs.h:891
uint32_t sending_seq
Definition: sctp_structs.h:930
unsigned int numnets
uint16_t strm_realoutsize
struct timeval time_entered
Definition: sctp_structs.h:792
uint32_t total_output_queue_size
unsigned int sent_queue_retran_cnt
unsigned int sent_queue_cnt_removeable
uint16_t stream_locked_on
uint8_t peer_supports_nat
uint32_t sctp_frag_point
sctp_auth_chklist_t * local_auth_chunks
uint32_t highest_tsn_inside_map
Definition: sctp_structs.h:953
struct sctp_nets * alternate
Definition: sctp_structs.h:861
uint32_t str_reset_seq_out
Definition: sctp_structs.h:899
uint32_t my_last_reported_rwnd
uint16_t max_init_times
struct sctpchunk_listhead send_queue
Definition: sctp_structs.h:833
uint8_t ifp_had_enobuf
struct sctp_stream_out * strmout
Definition: sctp_structs.h:857
uint32_t sb_send_resv
uint32_t peer_vtag_nonce
Definition: sctp_structs.h:910
struct sctpladdr sctp_restricted_addrs
Definition: sctp_structs.h:807
unsigned int total_flight_count
struct sctpnetlisthead nets
Definition: sctp_structs.h:814
uint8_t reconfig_supported
uint8_t auth_supported
struct sctp_nets * last_control_chunk_from
Definition: sctp_structs.h:867
uint8_t burst_limit_applied
sctp_authinfo_t authinfo
uint32_t pr_sctp_cnt
uint8_t idata_supported
uint32_t last_reset_action[SCTP_MAX_RESET_PARAMS]
Definition: sctp_structs.h:974
struct sctp_laddr * last_used_address
Definition: sctp_structs.h:853
uint32_t asconf_seq_out_acked
Definition: sctp_structs.h:894
struct sctp_timer dack_timer
Definition: sctp_structs.h:799
struct sctp_asconf_ackhead asconf_ack_sent
Definition: sctp_structs.h:842
uint32_t cumulative_tsn
Definition: sctp_structs.h:943
uint32_t mapping_array_base_tsn
Definition: sctp_structs.h:948
uint8_t cmt_dac_pkts_rcvd
uint32_t str_reset_seq_in
Definition: sctp_structs.h:901
unsigned int total_flight
unsigned int stream_queue_cnt
uint8_t sctp_cmt_on_off
uint32_t cookie_life
struct sctpchunk_listhead asconf_send_queue
Definition: sctp_structs.h:823
struct sctp_nets * last_net_cmt_send_started
Definition: sctp_structs.h:863
unsigned int fwd_tsn_cnt
unsigned int delayed_ack
struct sctp_ss_functions ss_functions
Definition: sctp_structs.h:886
uint8_t * nr_mapping_array
Definition: sctp_structs.h:956
uint32_t my_rwnd_control_len
uint16_t streamoutcnt
struct sctpchunk_listhead control_send_queue
Definition: sctp_structs.h:820
unsigned int size_on_all_streams
uint8_t asconf_supported
uint32_t highest_tsn_inside_nr_map
Definition: sctp_structs.h:957
uint32_t smallest_mtu
Definition: sctp_structs.h:915
unsigned int max_inbound_streams
uint32_t last_acked_seq
Definition: sctp_structs.h:927
sctp_hmaclist_t * local_hmacs
unsigned int numduptsns
unsigned int data_pkts_seen
uint8_t nrsack_supported
struct sctp_scoping scope
uint8_t pktdrop_supported
uint8_t * mapping_array
Definition: sctp_structs.h:858
struct sctp_tmit_chunk * str_reset
Definition: sctp_structs.h:848
uint32_t chunks_on_out_queue
uint8_t prsctp_supported
unsigned int send_queue_cnt
int dup_tsns[SCTP_MAX_DUP_TSNS]
struct timeval time_last_sent
Definition: sctp_structs.h:794
struct sctp_nonpad_sndrcvinfo def_send
Definition: sctp_structs.h:796
uint16_t ecn_echo_cnt_onq
struct sctp_nets * primary_destination
Definition: sctp_structs.h:860
struct sctp_cc_functions cc_functions
Definition: sctp_structs.h:879
struct sctp_nets * last_data_chunk_from
Definition: sctp_structs.h:865
uint32_t init_seq_number
Definition: sctp_structs.h:933
struct sctpchunk_listhead sent_queue
Definition: sctp_structs.h:832
unsigned int pre_open_streams
sctp_auth_chklist_t * peer_auth_chunks
unsigned int size_on_reasm_queue
uint8_t stream_reset_outstanding
uint32_t initial_init_rto_max
uint32_t fr_max_burst
uint32_t my_vtag_nonce
Definition: sctp_structs.h:909
uint16_t strm_pending_add_size
uint32_t advanced_peer_ack_point
Definition: sctp_structs.h:937
uint32_t peer_vtag
Definition: sctp_structs.h:907
unsigned int sent_queue_cnt
struct sctp_paramhdr ph
Definition: sctp_header.h:506
uint16_t hmac_id
Definition: sctp_header.h:518
struct sctp_chunkhdr ch
Definition: sctp_header.h:516
uint16_t hmac_ids[]
Definition: sctp_header.h:512
struct sctp_paramhdr ph
Definition: sctp_header.h:511
struct sctp_paramhdr ph
Definition: sctp_header.h:501
uint8_t random_data[]
Definition: sctp_header.h:502
sctp_key_t * random
Definition: sctp_auth.h:88
uint16_t active_keyid
Definition: sctp_auth.h:93
uint32_t random_len
Definition: sctp_auth.h:89
Definition: sctp_pcb.h:125
int error
Definition: sctp_pcb.h:126
void(* sctp_cwnd_update_packet_transmitted)(struct sctp_tcb *stcb, struct sctp_nets *net)
Definition: sctp_structs.h:720
void(* sctp_cwnd_new_transmission_begins)(struct sctp_tcb *stcb, struct sctp_nets *net)
Definition: sctp_structs.h:724
void(* sctp_cwnd_update_after_output)(struct sctp_tcb *stcb, struct sctp_nets *net, int burst_limit)
Definition: sctp_structs.h:718
uint8_t chunk_type
Definition: sctp.h:60
uint8_t chunk_flags
Definition: sctp.h:61
uint16_t chunk_length
Definition: sctp.h:62
struct sctp_sndrcvinfo sndrcv
Definition: sctp_structs.h:162
ssize_t sndlen
Definition: sctp_structs.h:163
struct mbuf * m
Definition: sctp_structs.h:161
struct sctp_inpcb * inp
Definition: sctp_structs.h:160
uint32_t tsn
Definition: sctp_header.h:342
struct sctp_chunkhdr ch
Definition: sctp_header.h:341
struct sctp_data dp
Definition: sctp_header.h:148
struct sctp_chunkhdr ch
Definition: sctp_header.h:147
struct timeval timetodrop
Definition: sctp_structs.h:397
uint32_t fast_retran_tsn
Definition: sctp_structs.h:396
uint8_t doing_fast_retransmit
Definition: sctp_structs.h:399
uint8_t chunk_was_revoked
Definition: sctp_structs.h:403
uint16_t sid
Definition: sctp_header.h:140
uint16_t ssn
Definition: sctp_header.h:141
uint32_t tsn
Definition: sctp_header.h:139
uint32_t ppid
Definition: sctp_header.h:142
uint32_t num_pkts_since_cwr
Definition: sctp_header.h:336
struct sctp_chunkhdr ch
Definition: sctp_header.h:334
struct sctp_chunkhdr ch
Definition: sctp_header.h:376
uint16_t length
Definition: sctp.h:376
uint16_t code
Definition: sctp.h:375
struct sctp_heartbeat heartbeat
Definition: sctp_header.h:281
struct sctp_chunkhdr ch
Definition: sctp_header.h:280
char address[SCTP_ADDRMAX]
Definition: sctp_header.h:97
struct sctp_paramhdr ph
Definition: sctp_header.h:88
struct sctp_heartbeat_info_param hb_info
Definition: sctp_header.h:276
uint16_t num_algo
Definition: sctp_auth.h:82
struct sctp_idata dp
Definition: sctp_header.h:165
struct sctp_chunkhdr ch
Definition: sctp_header.h:164
uint32_t ppid
Definition: sctp_header.h:157
uint32_t mid
Definition: sctp_header.h:155
uint16_t sid
Definition: sctp_header.h:153
union sctp_idata::@32 ppid_fsn
uint16_t reserved
Definition: sctp_header.h:154
uint32_t fsn
Definition: sctp_header.h:158
uint32_t tsn
Definition: sctp_header.h:152
uint8_t src_is_loop
Definition: sctp_pcb.h:110
uint32_t localifa_flags
Definition: sctp_pcb.h:108
union sctp_sockstore address
Definition: sctp_pcb.h:105
struct sctp_ifn * ifn_p
Definition: sctp_pcb.h:101
uint32_t refcount
Definition: sctp_pcb.h:106
uint32_t vrf_id
Definition: sctp_pcb.h:109
void * ifa
Definition: sctp_pcb.h:102
uint8_t src_is_priv
Definition: sctp_pcb.h:111
struct sctp_ifalist ifalist
Definition: sctp_pcb.h:75
char ifn_name[SCTP_IFNAMSIZ]
Definition: sctp_pcb.h:89
struct sctp_vrf * vrf
Definition: sctp_pcb.h:76
struct sctp_chunkhdr ch
Definition: sctp_header.h:224
struct sctp_init init
Definition: sctp_header.h:225
uint32_t initial_tsn
Definition: sctp_header.h:178
uint16_t num_inbound_streams
Definition: sctp_header.h:177
uint16_t num_outbound_streams
Definition: sctp_header.h:176
uint32_t a_rwnd
Definition: sctp_header.h:175
uint32_t initiate_tag
Definition: sctp_header.h:174
uint16_t fibnum
Definition: sctp_pcb.h:422
uint8_t auth_supported
Definition: sctp_pcb.h:400
struct inpcb inp
Definition: sctp_pcb.h:356
uint8_t prsctp_supported
Definition: sctp_pcb.h:399
uint32_t sctp_flags
Definition: sctp_pcb.h:381
uint8_t ecn_supported
Definition: sctp_pcb.h:398
uint8_t asconf_supported
Definition: sctp_pcb.h:402
uint8_t idata_supported
Definition: sctp_pcb.h:401
struct socket * sctp_socket
Definition: sctp_pcb.h:379
union sctp_inpcb::@33 ip_inp
uint8_t nrsack_supported
Definition: sctp_pcb.h:404
struct sctpasochead sctp_asoc_list
Definition: sctp_pcb.h:388
uint8_t reconfig_supported
Definition: sctp_pcb.h:403
struct sctp_nonpad_sndrcvinfo def_send
Definition: sctp_pcb.h:406
struct sctpladdr sctp_addr_list
Definition: sctp_pcb.h:371
struct sctp_laddr * next_addr_touse
Definition: sctp_pcb.h:376
uint32_t def_vrf_id
Definition: sctp_pcb.h:421
uint8_t pktdrop_supported
Definition: sctp_pcb.h:405
struct sctp_pcb sctp_ep
Definition: sctp_pcb.h:383
uint8_t addr[SCTP_V6_ADDR_BYTES]
Definition: sctp_header.h:59
uint8_t key[]
Definition: sctp_auth.h:60
uint32_t action
Definition: sctp_pcb.h:119
struct sctp_ifa * ifa
Definition: sctp_pcb.h:118
union sctp_sockstore _l_addr
Definition: sctp_structs.h:193
struct sctp_ifa * _s_addr
Definition: sctp_structs.h:194
uint32_t heartbeat_random1
Definition: sctp_structs.h:320
uint8_t addr_is_local
Definition: sctp_structs.h:352
uint32_t flight_size
Definition: sctp_structs.h:289
uint8_t indx_of_eligible_next_to_use
Definition: sctp_structs.h:351
uint32_t heartbeat_random2
Definition: sctp_structs.h:321
uint8_t fast_retran_ip
Definition: sctp_structs.h:347
uint8_t dscp
Definition: sctp_structs.h:325
uint16_t port
Definition: sctp_structs.h:342
struct timeval last_sent_time
Definition: sctp_structs.h:277
uint8_t hb_responded
Definition: sctp_structs.h:348
uint32_t mtu
Definition: sctp_structs.h:261
uint8_t flowtype
Definition: sctp_structs.h:382
uint16_t dest_state
Definition: sctp_structs.h:334
struct sctp_net_route ro
Definition: sctp_structs.h:258
uint8_t rto_needed
Definition: sctp_structs.h:380
uint32_t flowid
Definition: sctp_structs.h:381
uint8_t window_probe
Definition: sctp_structs.h:376
struct sctp_timer rxt_timer
Definition: sctp_structs.h:274
uint8_t src_addr_selected
Definition: sctp_structs.h:350
uint32_t cwnd
Definition: sctp_structs.h:290
sctp_assoc_t sinfo_assoc_id
Definition: sctp_structs.h:693
struct sctp_nr_sack nr_sack
Definition: sctp_header.h:271
uint32_t cum_tsn_ack
Definition: sctp_header.h:259
uint32_t a_rwnd
Definition: sctp_header.h:260
uint16_t num_nr_gap_ack_blks
Definition: sctp_header.h:262
uint16_t reserved
Definition: sctp_header.h:264
uint16_t num_dup_tsns
Definition: sctp_header.h:263
uint16_t num_gap_ack_blks
Definition: sctp_header.h:261
uint16_t param_length
Definition: sctp.h:71
uint16_t param_type
Definition: sctp.h:70
uint32_t secret_key[SCTP_HOW_MANY_SECRETS][SCTP_NUMBER_OF_SECRETS]
Definition: sctp_pcb.h:265
uint16_t port
Definition: sctp_pcb.h:328
uint8_t default_dscp
Definition: sctp_pcb.h:325
char current_secret_number
Definition: sctp_pcb.h:326
uint32_t sctp_sws_sender
Definition: sctp_pcb.h:275
uint32_t def_cookie_life
Definition: sctp_pcb.h:313
sctp_auth_chklist_t * local_auth_chunks
Definition: sctp_pcb.h:282
uint16_t pre_open_stream_count
Definition: sctp_pcb.h:299
uint32_t adaptation_layer_indicator
Definition: sctp_pcb.h:317
sctp_hmaclist_t * local_hmacs
Definition: sctp_pcb.h:283
uint16_t max_open_streams_intome
Definition: sctp_pcb.h:300
uint8_t adaptation_layer_indicator_provided
Definition: sctp_pcb.h:318
struct sctp_chunkhdr ch
Definition: sctp_header.h:408
uint32_t current_onq
Definition: sctp_header.h:410
struct sctp_sack sack
Definition: sctp_header.h:255
struct sctp_chunkhdr ch
Definition: sctp_header.h:254
uint16_t num_dup_tsns
Definition: sctp_header.h:248
uint32_t a_rwnd
Definition: sctp_header.h:246
uint16_t num_gap_ack_blks
Definition: sctp_header.h:247
uint32_t cum_tsn_ack
Definition: sctp_header.h:245
uint8_t site_scope
Definition: sctp_structs.h:651
uint8_t ipv6_addr_legal
Definition: sctp_structs.h:647
uint8_t ipv4_addr_legal
Definition: sctp_structs.h:646
uint8_t ipv4_local_scope
Definition: sctp_structs.h:649
uint8_t loopback_scope
Definition: sctp_structs.h:648
uint8_t local_scope
Definition: sctp_structs.h:650
struct sctp_chunkhdr ch
Definition: sctp_header.h:307
struct sctp_chunkhdr ch
Definition: sctp_header.h:301
uint32_t cumulative_tsn_ack
Definition: sctp_header.h:302
struct sctp_chunkhdr ch
Definition: sctp_header.h:347
sctp_assoc_t sinfo_assoc_id
Definition: sctp_uio.h:125
uint16_t sinfo_keynumber_valid
Definition: sctp_uio.h:127
uint16_t sinfo_flags
Definition: sctp_uio.h:119
uint16_t sinfo_stream
Definition: sctp_uio.h:117
uint32_t sinfo_timetolive
Definition: sctp_uio.h:122
uint32_t sinfo_context
Definition: sctp_uio.h:121
uint16_t sinfo_keynumber
Definition: sctp_uio.h:126
uint32_t sinfo_ppid
Definition: sctp_uio.h:120
void(* sctp_ss_packet_done)(struct sctp_tcb *stcb, struct sctp_nets *net, struct sctp_association *asoc)
Definition: sctp_structs.h:750
void(* sctp_ss_clear)(struct sctp_tcb *stcb, struct sctp_association *asoc, bool clear_values)
Definition: sctp_structs.h:738
bool(* sctp_ss_is_empty)(struct sctp_tcb *stcb, struct sctp_association *asoc)
Definition: sctp_structs.h:743
void(* sctp_ss_remove_from_stream)(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp)
Definition: sctp_structs.h:744
bool(* sctp_ss_is_user_msgs_incomplete)(struct sctp_tcb *stcb, struct sctp_association *asoc)
Definition: sctp_structs.h:756
void(* sctp_ss_scheduled)(struct sctp_tcb *stcb, struct sctp_nets *net, struct sctp_association *asoc, struct sctp_stream_out *strq, int moved_how_much)
Definition: sctp_structs.h:748
void(* sctp_ss_add_to_stream)(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp)
Definition: sctp_structs.h:741
struct sctp_stream_out *(* sctp_ss_select_stream)(struct sctp_tcb *stcb, struct sctp_nets *net, struct sctp_association *asoc)
Definition: sctp_structs.h:746
void(* sctp_ss_init)(struct sctp_tcb *stcb, struct sctp_association *asoc)
Definition: sctp_structs.h:737
void(* sctp_ss_init_stream)(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
Definition: sctp_structs.h:740
uint8_t last_msg_incomplete
Definition: sctp_structs.h:629
uint32_t chunks_on_queues
Definition: sctp_structs.h:613
uint32_t next_mid_ordered
Definition: sctp_structs.h:626
uint32_t abandoned_unsent[1]
Definition: sctp_structs.h:619
uint32_t next_mid_unordered
Definition: sctp_structs.h:627
struct sctp_streamhead outqueue
Definition: sctp_structs.h:611
uint32_t abandoned_sent[1]
Definition: sctp_structs.h:620
struct sctp_nets * net
Definition: sctp_structs.h:514
struct sctp_paramhdr ph
Definition: sctp_header.h:457
struct sctp_paramhdr ph
Definition: sctp_header.h:432
struct sctp_paramhdr ph
Definition: sctp_header.h:424
struct sctp_paramhdr ph
Definition: sctp_header.h:449
struct sctp_paramhdr ph
Definition: sctp_header.h:443
struct sctp_paramhdr ph
Definition: sctp_header.h:438
uint16_t flags
Definition: sctp_header.h:388
uint16_t ssn
Definition: sctp_header.h:383
uint16_t sid
Definition: sctp_header.h:382
struct sctp_paramhdr ph
Definition: sctp_header.h:82
struct socket * sctp_socket
Definition: sctp_pcb.h:438
struct mtx tcb_send_mtx
Definition: sctp_pcb.h:462
struct mtx tcb_mtx
Definition: sctp_pcb.h:461
uint32_t freed_by_sorcv_sincelast
Definition: sctp_pcb.h:455
uint32_t total_sends
Definition: sctp_pcb.h:456
struct sctp_association asoc
Definition: sctp_pcb.h:449
uint16_t rport
Definition: sctp_pcb.h:459
struct sctp_block_entry * block_entry
Definition: sctp_pcb.h:447
struct sctp_inpcb * sctp_ep
Definition: sctp_pcb.h:439
sctp_os_timer_t timer
Definition: sctp_structs.h:46
union sctp_tmit_chunk::@34 rec
struct sctp_data_chunkrec data
Definition: sctp_structs.h:424
uint8_t window_probe
Definition: sctp_structs.h:446
uint8_t book_size_scale
Definition: sctp_structs.h:443
struct timeval sent_rcv_time
Definition: sctp_structs.h:428
uint8_t copy_by_ref
Definition: sctp_structs.h:445
uint16_t book_size
Definition: sctp_structs.h:437
uint16_t snd_count
Definition: sctp_structs.h:434
struct mbuf * last_mbuf
Definition: sctp_structs.h:430
uint8_t pad_inplace
Definition: sctp_structs.h:441
struct sctp_nets * whoTo
Definition: sctp_structs.h:431
struct chk_id chunk_id
Definition: sctp_structs.h:425
uint8_t holds_key_ref
Definition: sctp_structs.h:440
uint16_t send_size
Definition: sctp_structs.h:436
struct sctp_association * asoc
Definition: sctp_structs.h:427
uint8_t no_fr_allowed
Definition: sctp_structs.h:444
uint16_t auth_keyid
Definition: sctp_structs.h:439
struct sctp_ifnlist ifnlist
Definition: sctp_pcb.h:65
uint32_t total_ifa_count
Definition: sctp_pcb.h:69
uint32_t vrf_id
Definition: sctp_pcb.h:66
Definition: sctp.h:48
uint16_t src_port
Definition: sctp.h:49
uint32_t v_tag
Definition: sctp.h:51
uint32_t checksum
Definition: sctp.h:52
uint16_t dest_port
Definition: sctp.h:50
Definition: in.h:97
struct in_addr sin_addr
Definition: in.h:101
uint8_t sin_len
Definition: in.h:98
sa_family_t sin_family
Definition: in.h:99
in_port_t sin_port
Definition: in.h:100
Definition: udp.h:45
u_short uh_ulen
Definition: udp.h:48
u_short uh_sport
Definition: udp.h:46
u_short uh_sum
Definition: udp.h:49
u_short uh_dport
Definition: udp.h:47
#define V_udp_cksum
Definition: udp_var.h:153
#define UDPSTAT_INC(name)
Definition: udp_var.h:118
struct sockaddr_in6 sin6
Definition: sctp_uio.h:629
struct sockaddr sa
Definition: sctp_uio.h:630
struct sockaddr_in sin
Definition: sctp_uio.h:628