77#include <sys/socket.h>
78#include <sys/protosw.h>
79#include <sys/sysctl.h>
80#include <sys/kernel.h>
81#include <sys/callout.h>
82#include <sys/malloc.h>
83#include <sys/module.h>
87#include <net/if_var.h>
91#include <netinet/in.h>
92#include <netinet/in_var.h>
94#include <netinet/ip6.h>
97#include <netinet/icmp6.h>
101#include <security/mac/mac_framework.h>
104#define KTR_MLD KTR_INET6
119static char * mld_rec_type_to_str(
const int);
139 struct in6_multi *,
const int,
const int,
const int,
142 struct mbuf *,
struct mldv2_query *,
const int,
const int);
146 struct mbufq *,
struct mbufq *,
203#define MLD_EMBEDSCOPE(pin6, zoneid) \
204 if (IN6_IS_SCOPE_LINKLOCAL(pin6) || \
205 IN6_IS_ADDR_MC_INTFACELOCAL(pin6)) \
206 (pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF) \
217#define V_mld_gsrdelay VNET(mld_gsrdelay)
218#define V_mli_head VNET(mli_head)
219#define V_interface_timers_running6 VNET(interface_timers_running6)
220#define V_state_change_timers_running6 VNET(state_change_timers_running6)
221#define V_current_state_timers_running6 VNET(current_state_timers_running6)
225SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
226 "IPv6 Multicast Listener Discovery");
232 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
234 "Rate limit for MLDv2 Group-and-Source queries in seconds");
241 "Per-interface MLDv2 state");
252SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RWTUN,
253 &
mld_use_allow, 0,
"Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
261 struct ip6_opt_router
ra;
269 .pad = { .ip6o_type = IP6OPT_PADN, 0 },
271 .ip6or_type = IP6OPT_ROUTER_ALERT,
272 .ip6or_len = IP6OPT_RTALERT_LEN - 2,
273 .ip6or_value[0] = ((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
274 .ip6or_value[1] = (IP6OPT_RTALERT_MLD & 0xFF)
284 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
286 m->m_pkthdr.rcvif = ifp;
287 m->m_pkthdr.flowid = ifp->if_index;
294 m->m_pkthdr.PH_loc.ptr = NULL;
295 m->m_pkthdr.flowid = 0;
305static __inline uint32_t
309#if defined(VIMAGE) && defined(INVARIANTS)
310 KASSERT(curvnet == m->m_pkthdr.PH_loc.ptr,
311 (
"%s: called when curvnet was not restored: cuvnet %p m ptr %p",
312 __func__, curvnet, m->m_pkthdr.PH_loc.ptr));
314 return (m->m_pkthdr.flowid);
329 error = sysctl_wire_old_buffer(req,
sizeof(
int));
337 error = sysctl_handle_int(oidp, &i, 0, req);
338 if (error || !req->newptr)
341 if (i < -1 || i >= 60) {
346 CTR2(
KTR_MLD,
"change mld_gsrdelay from %d to %d",
365 struct epoch_tracker et;
375 if (req->newptr != NULL)
381 error = sysctl_wire_old_buffer(req,
sizeof(
struct mld_ifinfo));
391 ifp = ifnet_byindex(name[0]);
407 error = SYSCTL_OUT(req, &info,
sizeof(info));
429 while ((m = mbufq_dequeue(mq)) != NULL) {
430 CTR3(
KTR_MLD,
"%s: dispatch %p from %p", __func__, mq, m);
478 CTR3(
KTR_MLD,
"%s: called for ifp %p(%s)",
479 __func__, ifp, if_name(ifp));
484 if (!(ifp->if_flags & IFF_MULTICAST))
504 mli = malloc(
sizeof(
struct mld_ifsoftc), M_MLD, M_NOWAIT|M_ZERO);
519 CTR2(
KTR_MLD,
"allocate mld_ifsoftc for ifp %p(%s)",
540 struct epoch_tracker et;
542 struct ifmultiaddr *ifma;
545 CTR3(
KTR_MLD,
"%s: called for ifp %p(%s)", __func__, ifp,
558 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
578 IF_ADDR_WUNLOCK(ifp);
592 CTR3(
KTR_MLD,
"%s: called for ifp %p(%s)",
593 __func__, ifp, if_name(ifp));
605 CTR3(
KTR_MLD,
"%s: freeing mld_ifsoftc for ifp %p(%s)",
606 __func__, ifp, if_name(ifp));
610 LIST_FOREACH_SAFE(mli, &
V_mli_head, mli_link, tmli) {
615 mbufq_drain(&mli->
mli_gq);
617 LIST_REMOVE(mli, mli_link);
636 struct ifmultiaddr *ifma;
639 int is_general_query;
647 is_general_query = 0;
650 CTR3(
KTR_MLD,
"ignore v1 query %s on ifp %p(%s)",
661 CTR3(
KTR_MLD,
"ignore v1 query src %s on ifp %p(%s)",
682 is_general_query = 1;
698 KASSERT(mli != NULL, (
"%s: no mld_ifsoftc for ifp %p", __func__, ifp));
705 if (is_general_query) {
710 CTR2(
KTR_MLD,
"process v1 general query on ifp %p(%s)",
712 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
726 CTR3(
KTR_MLD,
"process v1 query %s on ifp %p(%s)",
763 CTR4(
KTR_MLD,
"%s: %s/%s timer=%d", __func__,
776 CTR1(
KTR_MLD,
"%s: REPORTING and timer running, "
777 "skipping.", __func__);
786 CTR1(
KTR_MLD,
"%s: ->REPORTING", __func__);
792 CTR1(
KTR_MLD,
"%s: ->AWAKENING", __func__);
811 struct mbuf *m,
struct mldv2_query *mld,
const int off,
const int icmp6len)
815 uint32_t maxdelay, nsrc, qqi;
816 int is_general_query;
826 CTR3(
KTR_MLD,
"ignore v2 query src %s on ifp %p(%s)",
837 CTR3(
KTR_MLD,
"ignore v1 query src %s on ifp %p(%s)",
843 is_general_query = 0;
845 CTR2(
KTR_MLD,
"input v2 query on ifp %p(%s)", ifp, if_name(ifp));
847 maxdelay = ntohs(mld->mld_maxdelay);
848 if (maxdelay >= 32768) {
858 CTR3(
KTR_MLD,
"%s: clamping qrv %d to %d", __func__,
887 is_general_query = 1;
901 KASSERT(mli != NULL, (
"%s: no mld_ifsoftc for ifp %p", __func__, ifp));
916 CTR4(
KTR_MLD,
"%s: qrv %d qi %d maxdelay %d", __func__, qrv, qqi,
919 if (is_general_query) {
931 CTR2(
KTR_MLD,
"process v2 general query on ifp %p(%s)",
952 CTR1(
KTR_MLD,
"%s: GS query throttled.",
957 CTR2(
KTR_MLD,
"process v2 group query on ifp %p(%s)",
987 int timer,
struct mbuf *m0,
struct mldv2_query *mld,
const int off)
1016 KASSERT((m0->m_flags & M_PKTHDR) == 0 ||
1017 m0->m_pkthdr.len >= off +
sizeof(
struct mldv2_query) +
1019 (
"mldv2 packet is too short: (%d bytes < %zd bytes, m=%p)",
1020 m0->m_pkthdr.len, off +
sizeof(
struct mldv2_query) +
1021 nsrc *
sizeof(
struct in6_addr), m0));
1070 for (i = 0; i < nsrc; i++) {
1071 m_copydata(m0, soff,
sizeof(
struct in6_addr),
1076 nrecorded += retval;
1079 if (nrecorded > 0) {
1081 "%s: schedule response to SG query", __func__);
1100 struct mld_hdr *mld)
1112 CTR3(
KTR_MLD,
"ignore v1 report %s on ifp %p(%s)",
1118 if (ifp->if_flags & IFF_LOOPBACK)
1128 CTR3(
KTR_MLD,
"ignore v1 query src %s on ifp %p(%s)",
1142 CTR3(
KTR_MLD,
"ignore v1 query dst %s on ifp %p(%s)",
1168 CTR3(
KTR_MLD,
"process v1 report %s on ifp %p(%s)",
1169 ip6_sprintf(ip6tbuf, &mld->mld_addr), ifp, if_name(ifp));
1192 KASSERT(mli != NULL,
1193 (
"%s: no mli for ifp %p", __func__, ifp));
1213 "report suppressed for %s on ifp %p(%s)",
1251 struct ip6_hdr *ip6;
1253 struct mld_hdr *mld;
1257 CTR3(
KTR_MLD,
"%s: called w/mbuf (%p,%d)", __func__, m, off);
1259 ifp = m->m_pkthdr.rcvif;
1262 if (m->m_len < off +
sizeof(*mld)) {
1263 m = m_pullup(m, off +
sizeof(*mld));
1265 ICMP6STAT_INC(icp6s_badlen);
1266 return (IPPROTO_DONE);
1269 mld = (
struct mld_hdr *)(mtod(m, uint8_t *) + off);
1270 if (mld->mld_type == MLD_LISTENER_QUERY &&
1274 mldlen =
sizeof(
struct mld_hdr);
1276 if (m->m_len < off + mldlen) {
1277 m = m_pullup(m, off + mldlen);
1279 ICMP6STAT_INC(icp6s_badlen);
1280 return (IPPROTO_DONE);
1284 ip6 = mtod(m,
struct ip6_hdr *);
1285 mld = (
struct mld_hdr *)(mtod(m, uint8_t *) + off);
1291 switch (mld->mld_type) {
1292 case MLD_LISTENER_QUERY:
1293 icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
1294 if (icmp6len ==
sizeof(
struct mld_hdr)) {
1297 }
else if (icmp6len >=
sizeof(
struct mldv2_query)) {
1303 case MLD_LISTENER_REPORT:
1304 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1308 case MLDV2_LISTENER_REPORT:
1309 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1311 case MLD_LISTENER_DONE:
1312 icmp6_ifstat_inc(ifp, ifs6_in_mlddone);
1328 struct in6_multi_head inmh;
1329 VNET_ITERATOR_DECL(vnet_iter);
1333 VNET_LIST_RLOCK_NOSLEEP();
1334 VNET_FOREACH(vnet_iter) {
1335 CURVNET_SET(vnet_iter);
1339 VNET_LIST_RUNLOCK_NOSLEEP();
1351 struct epoch_tracker et;
1356 struct ifmultiaddr *ifma;
1379 CTR1(
KTR_MLD,
"%s: interface timers running", __func__);
1400 CTR1(
KTR_MLD,
"%s: state change timers running", __func__);
1416 NET_EPOCH_ENTER(et);
1418 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1428 &scq, inm, uri_fasthz);
1432 IF_ADDR_WUNLOCK(ifp);
1444 while ((inm = SLIST_FIRST(inmh)) != NULL) {
1445 SLIST_REMOVE_HEAD(inmh, in6m_defer);
1447 MLD_LISTENER_REPORT);
1470 int report_timer_expired;
1476 report_timer_expired = 0;
1478 report_timer_expired = 1;
1493 if (report_timer_expired) {
1495 SLIST_INSERT_HEAD(inmh, inm, in6m_defer);
1512 struct mbufq *qrq,
struct mbufq *scq,
1513 struct in6_multi *inm,
const int uri_fasthz)
1515 int query_response_timer_expired;
1516 int state_change_retransmit_timer_expired;
1524 query_response_timer_expired = 0;
1525 state_change_retransmit_timer_expired = 0;
1534 query_response_timer_expired = 0;
1536 query_response_timer_expired = 1;
1542 state_change_retransmit_timer_expired = 0;
1544 state_change_retransmit_timer_expired = 1;
1550 if (!state_change_retransmit_timer_expired &&
1551 !query_response_timer_expired)
1570 if (query_response_timer_expired) {
1576 CTR2(
KTR_MLD,
"%s: enqueue record = %d",
1584 if (state_change_retransmit_timer_expired) {
1605 CTR3(
KTR_MLD,
"%s: T1 -> T0 for %s/%s", __func__,
1634 int old_version_timer;
1638 CTR4(
KTR_MLD,
"%s: switching to v%d on ifp %p(%s)", __func__,
1647 old_version_timer *= PR_SLOWHZ;
1664 struct epoch_tracker et;
1665 struct in6_multi_head inmh;
1666 struct ifmultiaddr *ifma;
1670 CTR3(
KTR_MLD,
"%s: cancel v2 timers on ifp %p(%s)", __func__,
1691 NET_EPOCH_ENTER(et);
1692 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1730 IF_ADDR_WUNLOCK(ifp);
1741 VNET_ITERATOR_DECL(vnet_iter);
1743 VNET_LIST_RLOCK_NOSLEEP();
1744 VNET_FOREACH(vnet_iter) {
1745 CURVNET_SET(vnet_iter);
1749 VNET_LIST_RUNLOCK_NOSLEEP();
1784 "%s: transition from v%d -> v%d on %p(%s)",
1799 struct ip6_hdr *ip6;
1800 struct mbuf *mh, *md;
1801 struct mld_hdr *mld;
1814 mh = m_gethdr(M_NOWAIT, MT_DATA);
1820 md = m_get(M_NOWAIT, MT_DATA);
1834 M_ALIGN(mh,
sizeof(
struct ip6_hdr));
1835 mh->m_pkthdr.len =
sizeof(
struct ip6_hdr) + sizeof(struct mld_hdr);
1836 mh->m_len =
sizeof(
struct ip6_hdr);
1838 ip6 = mtod(mh,
struct ip6_hdr *);
1840 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
1841 ip6->ip6_vfc |= IPV6_VERSION;
1842 ip6->ip6_nxt = IPPROTO_ICMPV6;
1846 md->m_len =
sizeof(
struct mld_hdr);
1847 mld = mtod(md,
struct mld_hdr *);
1848 mld->mld_type = type;
1851 mld->mld_maxdelay = 0;
1852 mld->mld_reserved = 0;
1855 mld->mld_cksum =
in6_cksum(mh, IPPROTO_ICMPV6,
1856 sizeof(
struct ip6_hdr),
sizeof(
struct mld_hdr));
1906 CTR1(
KTR_MLD,
"%s: inm is disconnected", __func__);
1914 KASSERT(inm->
in6m_ifma != NULL, (
"%s: no ifma", __func__));
1922 KASSERT(inm->
in6m_ifp == ifp, (
"%s: bad ifp", __func__));
1926 KASSERT(mli != NULL, (
"%s: no mld_ifsoftc for ifp %p", __func__, ifp));
1934 CTR3(
KTR_MLD,
"%s: inm transition %d -> %d", __func__,
1937 CTR1(
KTR_MLD,
"%s: initial join", __func__);
1941 CTR1(
KTR_MLD,
"%s: final leave", __func__);
1946 CTR1(
KTR_MLD,
"%s: filter set change", __func__);
1972 struct epoch_tracker et;
1975 int error, retval, syncstates;
1981 CTR4(
KTR_MLD,
"%s: initial join %s on ifp %p(%s)",
1993 KASSERT(mli && mli->
mli_ifp == ifp, (
"%s: inconsistent ifp", __func__));
2004 if ((ifp->if_flags & IFF_LOOPBACK) ||
2008"%s: not kicking state machine for silent group", __func__);
2040 NET_EPOCH_ENTER(et);
2042 MLD_LISTENER_REPORT);
2068 CTR2(
KTR_MLD,
"%s: enqueue record = %d",
2071 error = retval * -1;
2085 (
"%s: invalid robustness %d", __func__,
2110 CTR3(
KTR_MLD,
"%s: T1 -> T0 for %s/%s", __func__,
2130 CTR4(
KTR_MLD,
"%s: state change for %s on ifp %p(%s)",
2139 KASSERT(mli && mli->
mli_ifp == ifp,
2140 (
"%s: inconsistent ifp", __func__));
2142 if ((ifp->if_flags & IFF_LOOPBACK) ||
2148"%s: not kicking state machine for silent group", __func__);
2150 CTR1(
KTR_MLD,
"%s: nothing to do", __func__);
2152 CTR3(
KTR_MLD,
"%s: T1 -> T0 for %s/%s", __func__,
2162 CTR2(
KTR_MLD,
"%s: enqueue record = %d", __func__, retval);
2188 struct epoch_tracker et;
2196 CTR4(
KTR_MLD,
"%s: final leave %s on ifp %p(%s)",
2209"%s: not kicking state machine for silent group", __func__);
2219 panic(
"%s: MLDv2 state reached, not MLDv2 mode",
2222 NET_EPOCH_ENTER(et);
2237 CTR4(
KTR_MLD,
"%s: Leaving %s/%s with %d "
2238 "pending retransmissions.", __func__,
2252 KASSERT(retval != 0,
2253 (
"%s: enqueue record = %d", __func__,
2273 CTR3(
KTR_MLD,
"%s: T1 -> T0 for %s/%s", __func__,
2277 CTR3(
KTR_MLD,
"%s: T1 now MCAST_UNDEFINED for %p/%s",
2311 const int is_state_change,
const int is_group_query,
2312 const int is_source_query,
const int use_block_allow)
2318 struct mbuf *m0, *m, *md;
2319 int is_filter_list_change;
2320 int minrec0len, m0srcs, msrcs, nbytes, off;
2321 int record_has_sources;
2332 is_filter_list_change = 0;
2339 record_has_sources = 1;
2351 record_has_sources = 0;
2353 if (is_state_change) {
2371 if (mode == MCAST_EXCLUDE) {
2372 CTR1(
KTR_MLD,
"%s: change to EXCLUDE",
2376 CTR1(
KTR_MLD,
"%s: change to INCLUDE",
2378 if (use_block_allow) {
2388 if (mode == MCAST_UNDEFINED) {
2395 if (mode == MCAST_UNDEFINED)
2396 record_has_sources = 0;
2400 if (record_has_sources) {
2401 is_filter_list_change = 1;
2410 if (mode == MCAST_EXCLUDE) {
2412 }
else if (mode == MCAST_INCLUDE) {
2415 (
"%s: inm %p is INCLUDE but ASM count is %d",
2423 if (is_filter_list_change)
2427 CTR3(
KTR_MLD,
"%s: nothing to do for %s/%s",
2439 if (record_has_sources)
2440 minrec0len +=
sizeof(
struct in6_addr);
2442 CTR4(
KTR_MLD,
"%s: queueing %s for %s/%s", __func__,
2443 mld_rec_type_to_str(type),
2455 m0 = mbufq_last(mq);
2456 if (!is_group_query &&
2459 (m0->m_pkthdr.len + minrec0len) <
2461 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2465 CTR1(
KTR_MLD,
"%s: use existing packet", __func__);
2467 if (mbufq_full(mq)) {
2468 CTR1(
KTR_MLD,
"%s: outbound queue full", __func__);
2474 if (!is_state_change && !is_group_query)
2475 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2477 m = m_gethdr(M_NOWAIT, MT_DATA);
2483 CTR1(
KTR_MLD,
"%s: allocated first packet", __func__);
2495 if (!m_append(m,
sizeof(
struct mldv2_record), (
void *)&mr)) {
2498 CTR1(
KTR_MLD,
"%s: m_append() failed.", __func__);
2519 if (record_has_sources) {
2523 md->m_len - nbytes);
2525 md = m_getptr(m, 0, &off);
2530 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->
in6m_srcs,
2532 CTR2(
KTR_MLD,
"%s: visit node %s", __func__,
2535 CTR2(
KTR_MLD,
"%s: node is %d", __func__, now);
2536 if ((now != mode) ||
2538 (!use_block_allow && mode == MCAST_UNDEFINED))) {
2539 CTR1(
KTR_MLD,
"%s: skip node", __func__);
2542 if (is_source_query && ims->
im6s_stp == 0) {
2543 CTR1(
KTR_MLD,
"%s: skip unrecorded node",
2547 CTR1(
KTR_MLD,
"%s: append node", __func__);
2548 if (!m_append(m,
sizeof(
struct in6_addr),
2552 CTR1(
KTR_MLD,
"%s: m_append() failed.",
2558 if (msrcs == m0srcs)
2561 CTR2(
KTR_MLD,
"%s: msrcs is %d this packet", __func__,
2564 nbytes += (msrcs *
sizeof(
struct in6_addr));
2567 if (is_source_query && msrcs == 0) {
2568 CTR1(
KTR_MLD,
"%s: no recorded sources to report", __func__);
2578 CTR1(
KTR_MLD,
"%s: enqueueing first packet", __func__);
2579 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2580 mbufq_enqueue(mq, m);
2582 m->m_pkthdr.PH_vt.vt_nrecs++;
2587 if (!record_has_sources)
2595 while (nims != NULL) {
2596 if (mbufq_full(mq)) {
2597 CTR1(
KTR_MLD,
"%s: outbound queue full", __func__);
2600 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2602 m = m_gethdr(M_NOWAIT, MT_DATA);
2606 md = m_getptr(m, 0, &off);
2607 pmr = (
struct mldv2_record *)(mtod(md, uint8_t *) + off);
2608 CTR1(
KTR_MLD,
"%s: allocated next packet", __func__);
2610 if (!m_append(m,
sizeof(
struct mldv2_record), (
void *)&mr)) {
2613 CTR1(
KTR_MLD,
"%s: m_append() failed.", __func__);
2616 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2623 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2624 CTR2(
KTR_MLD,
"%s: visit node %s",
2627 if ((now != mode) ||
2629 (!use_block_allow && mode == MCAST_UNDEFINED))) {
2630 CTR1(
KTR_MLD,
"%s: skip node", __func__);
2633 if (is_source_query && ims->
im6s_stp == 0) {
2634 CTR1(
KTR_MLD,
"%s: skip unrecorded node",
2638 CTR1(
KTR_MLD,
"%s: append node", __func__);
2639 if (!m_append(m,
sizeof(
struct in6_addr),
2643 CTR1(
KTR_MLD,
"%s: m_append() failed.",
2648 if (msrcs == m0srcs)
2652 nbytes += (msrcs *
sizeof(
struct in6_addr));
2654 CTR1(
KTR_MLD,
"%s: enqueueing next packet", __func__);
2655 mbufq_enqueue(mq, m);
2697 static const int MINRECLEN =
2703 struct mbuf *m, *m0, *md;
2704 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
2706 uint8_t mode, now, then;
2741 m0 = mbufq_last(mq);
2743 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
2745 (m0->m_pkthdr.len + MINRECLEN) <
2748 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2752 "%s: use previous packet", __func__);
2754 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2756 m = m_gethdr(M_NOWAIT, MT_DATA);
2759 "%s: m_get*() failed", __func__);
2762 m->m_pkthdr.PH_vt.vt_nrecs = 0;
2769 "%s: allocated new packet", __func__);
2778 memset(&mr, 0,
sizeof(mr));
2781 if (!m_append(m,
sizeof(mr), (
void *)&mr)) {
2785 "%s: m_append() failed", __func__);
2791 md = m_getptr(m, npbytes -
2799 uint8_t *) + md->m_len -
2813 nims = RB_MIN(ip6_msource_tree,
2816 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2817 CTR2(
KTR_MLD,
"%s: visit node %s", __func__,
2821 CTR3(
KTR_MLD,
"%s: mode: t0 %d, t1 %d",
2822 __func__, then, now);
2825 "%s: skip unchanged", __func__);
2828 if (mode == MCAST_EXCLUDE &&
2829 now == MCAST_INCLUDE) {
2831 "%s: skip IN src on EX group",
2838 if (schanged++ == 0) {
2840 }
else if (crt != nrt)
2842 if (!m_append(m,
sizeof(
struct in6_addr),
2847 "%s: m_append() failed", __func__);
2852 if (++rsrcs == m0srcs)
2863 "%s: m_free(m)", __func__);
2867 "%s: m_adj(m, -mr)", __func__);
2868 m_adj(m, -((
int)
sizeof(
2873 npbytes += (rsrcs *
sizeof(
struct in6_addr));
2883 m->m_pkthdr.PH_vt.vt_nrecs++;
2885 mbufq_enqueue(mq, m);
2887 }
while (nims != NULL);
2892 CTR3(
KTR_MLD,
"%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
2905 int docopy, domerge;
2924 if (mbufq_first(gq) == NULL) {
2925 CTR2(
KTR_MLD,
"%s: WARNING: queue for inm %p is empty",
2930 m = mbufq_first(gq);
2941 mt = mbufq_last(scq);
2943 recslen = m_length(m, NULL);
2945 if ((mt->m_pkthdr.PH_vt.vt_nrecs +
2946 m->m_pkthdr.PH_vt.vt_nrecs <=
2948 (mt->m_pkthdr.len + recslen <=
2953 if (!domerge && mbufq_full(gq)) {
2955 "%s: outbound queue full, skipping whole packet %p",
2965 CTR2(
KTR_MLD,
"%s: dequeueing %p", __func__, m);
2966 m0 = mbufq_dequeue(gq);
2969 CTR2(
KTR_MLD,
"%s: copying %p", __func__, m);
2970 m0 = m_dup(m, M_NOWAIT);
2973 m0->m_nextpkt = NULL;
2978 CTR3(
KTR_MLD,
"%s: queueing %p to scq %p)",
2980 mbufq_enqueue(scq, m0);
2984 CTR3(
KTR_MLD,
"%s: merging %p with ifscq tail %p)",
2988 m0->m_flags &= ~M_PKTHDR;
2989 mt->m_pkthdr.len += recslen;
2990 mt->m_pkthdr.PH_vt.vt_nrecs +=
2991 m0->m_pkthdr.PH_vt.vt_nrecs;
3006 struct ifmultiaddr *ifma;
3016 (
"%s: called when version %d", __func__, mli->
mli_version));
3024 if (mbufq_len(&mli->
mli_gq) != 0)
3029 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3034 (
"%s: inconsistent ifp", __func__));
3048 CTR2(
KTR_MLD,
"%s: enqueue record = %d",
3064 if (mbufq_first(&mli->
mli_gq) != NULL) {
3081 struct ip6_moptions im6o;
3086 struct ip6_hdr *ip6;
3087 struct mld_hdr *mld;
3093 CTR2(
KTR_MLD,
"%s: transmit %p", __func__, m);
3109 ifp = ifnet_byindex(ifindex);
3111 CTR3(
KTR_MLD,
"%s: dropped %p as ifindex %u went away.",
3112 __func__, m, ifindex);
3118 im6o.im6o_multicast_hlim = 1;
3120 im6o.im6o_multicast_ifp = ifp;
3127 CTR2(
KTR_MLD,
"%s: dropped %p", __func__, m);
3135 m0->m_pkthdr.rcvif = V_loif;
3137 ip6 = mtod(m0,
struct ip6_hdr *);
3153 md = m_getptr(m0,
sizeof(
struct ip6_hdr), &off);
3154 mld = (
struct mld_hdr *)(mtod(md, uint8_t *) + off);
3155 type = mld->mld_type;
3161 CTR3(
KTR_MLD,
"%s: ip6_output(%p) = %d", __func__, m0, error);
3164 ICMP6STAT_INC(icp6s_outhist[type]);
3166 icmp6_ifstat_inc(oifp, ifs6_out_msg);
3168 case MLD_LISTENER_REPORT:
3169 case MLDV2_LISTENER_REPORT:
3170 icmp6_ifstat_inc(oifp, ifs6_out_mldreport);
3172 case MLD_LISTENER_DONE:
3173 icmp6_ifstat_inc(oifp, ifs6_out_mlddone);
3195 struct ip6_hdr *ip6;
3199 KASSERT(ifp != NULL, (
"%s: null ifp", __func__));
3200 KASSERT((m->m_flags & M_PKTHDR),
3201 (
"%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3209 CTR1(
KTR_MLD,
"%s: warning: ia is NULL", __func__);
3211 mh = m_gethdr(M_NOWAIT, MT_DATA);
3218 M_ALIGN(mh,
sizeof(
struct ip6_hdr) +
sizeof(
struct mldv2_report));
3220 mldreclen = m_length(m, NULL);
3221 CTR2(
KTR_MLD,
"%s: mldreclen is %d", __func__, mldreclen);
3223 mh->m_len =
sizeof(
struct ip6_hdr) + sizeof(struct
mldv2_report);
3224 mh->m_pkthdr.len =
sizeof(
struct ip6_hdr) +
3227 ip6 = mtod(mh,
struct ip6_hdr *);
3229 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
3230 ip6->ip6_vfc |= IPV6_VERSION;
3231 ip6->ip6_nxt = IPPROTO_ICMPV6;
3239 mld->mld_type = MLDV2_LISTENER_REPORT;
3242 mld->mld_v2_reserved = 0;
3243 mld->mld_v2_numrecs = htons(m->m_pkthdr.PH_vt.vt_nrecs);
3244 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3247 mld->mld_cksum =
in6_cksum(mh, IPPROTO_ICMPV6,
3248 sizeof(
struct ip6_hdr),
sizeof(
struct mldv2_report) + mldreclen);
3254mld_rec_type_to_str(
const int type)
3287 CTR1(
KTR_MLD,
"%s: initializing", __func__);
3302 CTR1(
KTR_MLD,
"%s: tearing down", __func__);
3311 CTR1(
KTR_MLD,
"%s: initializing", __func__);
3323 CTR1(
KTR_MLD,
"%s: tearing down", __func__);
3337 return (EOPNOTSUPP);
struct in6_ifaddr * in6ifa_ifpforlinklocal(struct ifnet *ifp, int ignoreflags)
const struct in6_addr in6addr_linklocal_allv2routers
char * ip6_sprintf(char *ip6buf, const struct in6_addr *addr)
const struct in6_addr in6addr_linklocal_allnodes
const struct in6_addr in6addr_any
#define IPV6_ADDR_MC_SCOPE(a)
#define IN6_IS_ADDR_UNSPECIFIED(a)
#define IN6_ARE_ADDR_EQUAL(a, b)
int in6_cksum(struct mbuf *, u_int8_t, u_int32_t, u_int32_t)
#define IN6_IS_ADDR_MULTICAST(a)
#define IPV6_ADDR_SCOPE_LINKLOCAL
#define IPV6_ADDR_SCOPE_NODELOCAL
#define IN6_IS_SCOPE_LINKLOCAL(a)
void in6m_clear_recorded(struct in6_multi *inm)
void in6m_disconnect_locked(struct in6_multi_head *inmh, struct in6_multi *inm)
int in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr)
void in6m_release_list_deferred(struct in6_multi_head *inmh)
void in6m_commit(struct in6_multi *inm)
static __inline void in6m_acquire_locked(struct in6_multi *inm)
static __inline void in6m_rele_locked(struct in6_multi_head *inmh, struct in6_multi *inm)
#define IN6_MULTI_LIST_LOCK()
static __inline struct in6_multi * in6m_ifmultiaddr_get_inm(struct ifmultiaddr *ifma)
static __inline struct in6_multi * in6m_lookup_locked(struct ifnet *ifp, const struct in6_addr *mcaddr)
#define IN6_MULTI_UNLOCK()
#define IN6_MULTI_LIST_UNLOCK()
#define IN6_MULTI_LIST_LOCK_ASSERT()
static __inline uint8_t im6s_get_mode(const struct in6_multi *inm, const struct ip6_msource *ims, uint8_t t)
void ip6_initpktopts(struct ip6_pktopts *opt)
int ip6_output(struct mbuf *m0, struct ip6_pktopts *opt, struct route_in6 *ro, int flags, struct ip6_moptions *im6o, struct ifnet **ifpp, struct inpcb *inp)
#define IP6STAT_INC(name)
#define IP6PO_TEMPADDR_NOTPREFER
static void mld_final_leave(struct in6_multi *, struct mld_ifsoftc *)
static void vnet_mld_init(const void *unused __unused)
static int mld_v2_merge_state_changes(struct in6_multi *, struct mbufq *)
DECLARE_MODULE(mld, mld_mod, SI_SUB_PROTO_MC, SI_ORDER_ANY)
VNET_DEFINE_STATIC(struct timeval, mld_gsrdelay)
int mld_change_state(struct in6_multi *inm, const int delay)
SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay, CTLFLAG_VNET|CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_MPSAFE, &VNET_NAME(mld_gsrdelay.tv_sec), 0, sysctl_mld_gsr, "I", "Rate limit for MLDv2 Group-and-Source queries in seconds")
#define MLD_EMBEDSCOPE(pin6, zoneid)
static void mld_v2_dispatch_general_query(struct mld_ifsoftc *)
#define V_current_state_timers_running6
static __inline int mld_is_addr_reported(const struct in6_addr *addr)
static struct mld_raopt mld_ra
static void mli_delete_locked(const struct ifnet *)
static struct mbuf * mld_v2_encap_report(struct ifnet *, struct mbuf *)
struct mld_raopt __packed
static moduledata_t mld_mod
static void mld_v1_update_group(struct in6_multi *, const int)
#define V_interface_timers_running6
static void mld_fasttimo_vnet(struct in6_multi_head *inmh)
static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *, struct mld_hdr *)
VNET_SYSINIT(vnet_mld_init, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_init, NULL)
#define V_state_change_timers_running6
static __inline void mld_save_context(struct mbuf *m, struct ifnet *ifp)
static int mld_v1_transmit_report(struct in6_multi *, const int)
static int mld_v2_enqueue_group_record(struct mbufq *, struct in6_multi *, const int, const int, const int, const int)
static void mld_set_version(struct mld_ifsoftc *, const int)
static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *, struct mbuf *, struct mldv2_query *, const int, const int)
struct mld_ifsoftc * mld_domifattach(struct ifnet *ifp)
SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RWTUN, &mld_v1enable, 0, "Enable fallback to MLDv1")
static MALLOC_DEFINE(M_MLD, "mld", "mld state")
static struct ip6_pktopts mld_po
static int sysctl_mld_gsr(SYSCTL_HANDLER_ARGS)
static int mld_v2_process_group_query(struct in6_multi *, struct mld_ifsoftc *mli, int, struct mbuf *, struct mldv2_query *, const int)
static __inline void mld_scrub_context(struct mbuf *m)
static void mld_slowtimo_vnet(void)
static int mld_v1_input_report(struct ifnet *, const struct ip6_hdr *, struct mld_hdr *)
static int mld_v2_enqueue_filter_change(struct mbufq *, struct in6_multi *)
static void mld_v1_process_querier_timers(struct mld_ifsoftc *)
VNET_SYSUNINIT(vnet_mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_uninit, NULL)
static __inline uint32_t mld_restore_context(struct mbuf *m)
static void mld_dispatch_packet(struct mbuf *)
SYSUNINIT(mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_uninit, NULL)
int mld_input(struct mbuf **mp, int off, int icmp6len)
static struct mld_ifsoftc * mli_alloc_locked(struct ifnet *)
static void mld_v2_cancel_link_timers(struct mld_ifsoftc *)
static int mld_handle_state_change(struct in6_multi *, struct mld_ifsoftc *)
SYSINIT(mld_init, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_init, NULL)
void mld_domifdetach(struct ifnet *ifp)
SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW|CTLFLAG_MPSAFE, 0, "IPv6 Multicast Listener Discovery")
static void mld_uninit(void *unused __unused)
static void mld_dispatch_queue(struct mbufq *, int)
static struct mtx mld_mtx
static void vnet_mld_uninit(const void *unused __unused)
static void mld_v2_process_group_timers(struct in6_multi_head *, struct mbufq *, struct mbufq *, struct in6_multi *, const int)
static int mld_modevent(module_t mod, int type, void *unused __unused)
static int mld_initial_join(struct in6_multi *, struct mld_ifsoftc *, const int)
static void mld_v1_process_group_timer(struct in6_multi_head *, struct in6_multi *)
static void mld_init(void *unused __unused)
void mld_ifdetach(struct ifnet *ifp, struct in6_multi_head *inmh)
static int sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS)
#define MLD_CHANGE_TO_INCLUDE_MODE
#define MLD_V2_REPORT_MAXRECS
#define MLD_MODE_IS_EXCLUDE
#define MLD_MODE_IS_INCLUDE
#define MLD_CHANGE_TO_EXCLUDE_MODE
#define MLD_BLOCK_OLD_SOURCES
#define MLD_ALLOW_NEW_SOURCES
#define MLD_RESPONSE_BURST_INTERVAL
#define MLD_SLEEPING_MEMBER
#define MLD_SG_QUERY_PENDING_MEMBER
#define MLD_G_QUERY_PENDING_MEMBER
#define MLD_AWAKENING_MEMBER
#define MLD_MAX_RESPONSE_BURST
#define MLD_MAX_RESPONSE_PACKETS
#define MLD_MAX_STATE_CHANGE_PACKETS
#define MLD_RANDOM_DELAY(X)
#define MLD_LOCK_DESTROY()
#define MLD_LEAVING_MEMBER
#define MLD_LOCK_ASSERT()
#define MLD_MAX_G_GS_PACKETS
#define MLD_SILENT_MEMBER
#define MLD_REPORTING_MEMBER
#define MLD_MAX_GS_SOURCES
LIST_HEAD(nd_prhead, nd_prefix)
int in6_setscope(struct in6_addr *in6, struct ifnet *ifp, u_int32_t *ret_id)
int in6_clearscope(struct in6_addr *in6)
struct sockaddr_in6 ia_addr
struct mld_ifsoftc * in6m_mli
struct in6_addr in6m_addr
struct ifmultiaddr * in6m_ifma
struct ip6_msource_tree in6m_srcs
struct timeval in6m_lastgsrtv
struct in6_multi::in6m_st in6m_st[2]
struct in6_addr im6s_addr
struct ip6_hbh * ip6po_hbh
int ip6po_prefer_tempaddr
struct in6_addr sin6_addr