FreeBSD kernel IPv6 code
mld6.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2009 Bruce Simpson.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote
15 * products derived from this software without specific prior written
16 * permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $KAME: mld6.c,v 1.27 2001/04/04 05:17:30 itojun Exp $
31 */
32
33/*-
34 * Copyright (c) 1988 Stephen Deering.
35 * Copyright (c) 1992, 1993
36 * The Regents of the University of California. All rights reserved.
37 *
38 * This code is derived from software contributed to Berkeley by
39 * Stephen Deering of Stanford University.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
66 */
67
68#include <sys/cdefs.h>
69__FBSDID("$FreeBSD$");
70
71#include "opt_inet.h"
72#include "opt_inet6.h"
73
74#include <sys/param.h>
75#include <sys/systm.h>
76#include <sys/mbuf.h>
77#include <sys/socket.h>
78#include <sys/protosw.h>
79#include <sys/sysctl.h>
80#include <sys/kernel.h>
81#include <sys/callout.h>
82#include <sys/malloc.h>
83#include <sys/module.h>
84#include <sys/ktr.h>
85
86#include <net/if.h>
87#include <net/if_var.h>
88#include <net/route.h>
89#include <net/vnet.h>
90
91#include <netinet/in.h>
92#include <netinet/in_var.h>
93#include <netinet6/in6_var.h>
94#include <netinet/ip6.h>
95#include <netinet6/ip6_var.h>
96#include <netinet6/scope6_var.h>
97#include <netinet/icmp6.h>
98#include <netinet6/mld6.h>
99#include <netinet6/mld6_var.h>
100
101#include <security/mac/mac_framework.h>
102
103#ifndef KTR_MLD
104#define KTR_MLD KTR_INET6
105#endif
106
107static struct mld_ifsoftc *
108 mli_alloc_locked(struct ifnet *);
109static void mli_delete_locked(const struct ifnet *);
110static void mld_dispatch_packet(struct mbuf *);
111static void mld_dispatch_queue(struct mbufq *, int);
112static void mld_final_leave(struct in6_multi *, struct mld_ifsoftc *);
113static void mld_fasttimo_vnet(struct in6_multi_head *inmh);
114static int mld_handle_state_change(struct in6_multi *,
115 struct mld_ifsoftc *);
116static int mld_initial_join(struct in6_multi *, struct mld_ifsoftc *,
117 const int);
118#ifdef KTR
119static char * mld_rec_type_to_str(const int);
120#endif
121static void mld_set_version(struct mld_ifsoftc *, const int);
122static void mld_slowtimo_vnet(void);
123static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
124 /*const*/ struct mld_hdr *);
125static int mld_v1_input_report(struct ifnet *, const struct ip6_hdr *,
126 /*const*/ struct mld_hdr *);
127static void mld_v1_process_group_timer(struct in6_multi_head *,
128 struct in6_multi *);
129static void mld_v1_process_querier_timers(struct mld_ifsoftc *);
130static int mld_v1_transmit_report(struct in6_multi *, const int);
131static void mld_v1_update_group(struct in6_multi *, const int);
132static void mld_v2_cancel_link_timers(struct mld_ifsoftc *);
133static void mld_v2_dispatch_general_query(struct mld_ifsoftc *);
134static struct mbuf *
135 mld_v2_encap_report(struct ifnet *, struct mbuf *);
136static int mld_v2_enqueue_filter_change(struct mbufq *,
137 struct in6_multi *);
138static int mld_v2_enqueue_group_record(struct mbufq *,
139 struct in6_multi *, const int, const int, const int,
140 const int);
141static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
142 struct mbuf *, struct mldv2_query *, const int, const int);
143static int mld_v2_merge_state_changes(struct in6_multi *,
144 struct mbufq *);
145static void mld_v2_process_group_timers(struct in6_multi_head *,
146 struct mbufq *, struct mbufq *,
147 struct in6_multi *, const int);
148static int mld_v2_process_group_query(struct in6_multi *,
149 struct mld_ifsoftc *mli, int, struct mbuf *,
150 struct mldv2_query *, const int);
151static int sysctl_mld_gsr(SYSCTL_HANDLER_ARGS);
152static int sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS);
153
154/*
155 * Normative references: RFC 2710, RFC 3590, RFC 3810.
156 *
157 * Locking:
158 * * The MLD subsystem lock ends up being system-wide for the moment,
159 * but could be per-VIMAGE later on.
160 * * The permitted lock order is: IN6_MULTI_LOCK, MLD_LOCK, IF_ADDR_LOCK.
161 * Any may be taken independently; if any are held at the same
162 * time, the above lock order must be followed.
163 * * IN6_MULTI_LOCK covers in_multi.
164 * * MLD_LOCK covers per-link state and any global variables in this file.
165 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
166 * per-link state iterators.
167 *
168 * XXX LOR PREVENTION
169 * A special case for IPv6 is the in6_setscope() routine. ip6_output()
170 * will not accept an ifp; it wants an embedded scope ID, unlike
171 * ip_output(), which happily takes the ifp given to it. The embedded
172 * scope ID is only used by MLD to select the outgoing interface.
173 *
174 * During interface attach and detach, MLD will take MLD_LOCK *after*
175 * the IF_AFDATA_LOCK.
176 * As in6_setscope() takes IF_AFDATA_LOCK then SCOPE_LOCK, we can't call
177 * it with MLD_LOCK held without triggering an LOR. A netisr with indirect
178 * dispatch could work around this, but we'd rather not do that, as it
179 * can introduce other races.
180 *
181 * As such, we exploit the fact that the scope ID is just the interface
182 * index, and embed it in the IPv6 destination address accordingly.
183 * This is potentially NOT VALID for MLDv1 reports, as they
184 * are always sent to the multicast group itself; as MLDv2
185 * reports are always sent to ff02::16, this is not an issue
186 * when MLDv2 is in use.
187 *
188 * This does not however eliminate the LOR when ip6_output() itself
189 * calls in6_setscope() internally whilst MLD_LOCK is held. This will
190 * trigger a LOR warning in WITNESS when the ifnet is detached.
191 *
192 * The right answer is probably to make IF_AFDATA_LOCK an rwlock, given
193 * how it's used across the network stack. Here we're simply exploiting
194 * the fact that MLD runs at a similar layer in the stack to scope6.c.
195 *
196 * VIMAGE:
197 * * Each in6_multi corresponds to an ifp, and each ifp corresponds
198 * to a vnet in ifp->if_vnet.
199 */
200static struct mtx mld_mtx;
201static MALLOC_DEFINE(M_MLD, "mld", "mld state");
202
203#define MLD_EMBEDSCOPE(pin6, zoneid) \
204 if (IN6_IS_SCOPE_LINKLOCAL(pin6) || \
205 IN6_IS_ADDR_MC_INTFACELOCAL(pin6)) \
206 (pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF) \
207
208/*
209 * VIMAGE-wide globals.
210 */
211VNET_DEFINE_STATIC(struct timeval, mld_gsrdelay) = {10, 0};
213VNET_DEFINE_STATIC(int, interface_timers_running6);
214VNET_DEFINE_STATIC(int, state_change_timers_running6);
215VNET_DEFINE_STATIC(int, current_state_timers_running6);
216
217#define V_mld_gsrdelay VNET(mld_gsrdelay)
218#define V_mli_head VNET(mli_head)
219#define V_interface_timers_running6 VNET(interface_timers_running6)
220#define V_state_change_timers_running6 VNET(state_change_timers_running6)
221#define V_current_state_timers_running6 VNET(current_state_timers_running6)
222
223SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */
224
225SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
226 "IPv6 Multicast Listener Discovery");
227
228/*
229 * Virtualized sysctls.
230 */
231SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay,
232 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
233 &VNET_NAME(mld_gsrdelay.tv_sec), 0, sysctl_mld_gsr, "I",
234 "Rate limit for MLDv2 Group-and-Source queries in seconds");
235
236/*
237 * Non-virtualized sysctls.
238 */
239static SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo,
240 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_mld_ifinfo,
241 "Per-interface MLDv2 state");
242
243static int mld_v1enable = 1;
244SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RWTUN,
245 &mld_v1enable, 0, "Enable fallback to MLDv1");
246
247static int mld_v2enable = 1;
248SYSCTL_INT(_net_inet6_mld, OID_AUTO, v2enable, CTLFLAG_RWTUN,
249 &mld_v2enable, 0, "Enable MLDv2");
250
251static int mld_use_allow = 1;
252SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RWTUN,
253 &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
254
255/*
256 * Packed Router Alert option structure declaration.
257 */
258struct mld_raopt {
259 struct ip6_hbh hbh;
260 struct ip6_opt pad;
261 struct ip6_opt_router ra;
263
264/*
265 * Router Alert hop-by-hop option header.
266 */
267static struct mld_raopt mld_ra = {
268 .hbh = { 0, 0 },
269 .pad = { .ip6o_type = IP6OPT_PADN, 0 },
270 .ra = {
271 .ip6or_type = IP6OPT_ROUTER_ALERT,
272 .ip6or_len = IP6OPT_RTALERT_LEN - 2,
273 .ip6or_value[0] = ((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
274 .ip6or_value[1] = (IP6OPT_RTALERT_MLD & 0xFF)
275 }
276};
277static struct ip6_pktopts mld_po;
278
279static __inline void
280mld_save_context(struct mbuf *m, struct ifnet *ifp)
281{
282
283#ifdef VIMAGE
284 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
285#endif /* VIMAGE */
286 m->m_pkthdr.rcvif = ifp;
287 m->m_pkthdr.flowid = ifp->if_index;
288}
289
290static __inline void
291mld_scrub_context(struct mbuf *m)
292{
293
294 m->m_pkthdr.PH_loc.ptr = NULL;
295 m->m_pkthdr.flowid = 0;
296}
297
298/*
299 * Restore context from a queued output chain.
300 * Return saved ifindex.
301 *
302 * VIMAGE: The assertion is there to make sure that we
303 * actually called CURVNET_SET() with what's in the mbuf chain.
304 */
305static __inline uint32_t
306mld_restore_context(struct mbuf *m)
307{
308
309#if defined(VIMAGE) && defined(INVARIANTS)
310 KASSERT(curvnet == m->m_pkthdr.PH_loc.ptr,
311 ("%s: called when curvnet was not restored: cuvnet %p m ptr %p",
312 __func__, curvnet, m->m_pkthdr.PH_loc.ptr));
313#endif
314 return (m->m_pkthdr.flowid);
315}
316
317/*
318 * Retrieve or set threshold between group-source queries in seconds.
319 *
320 * VIMAGE: Assume curvnet set by caller.
321 * SMPng: NOTE: Serialized by MLD lock.
322 */
323static int
324sysctl_mld_gsr(SYSCTL_HANDLER_ARGS)
325{
326 int error;
327 int i;
328
329 error = sysctl_wire_old_buffer(req, sizeof(int));
330 if (error)
331 return (error);
332
333 MLD_LOCK();
334
335 i = V_mld_gsrdelay.tv_sec;
336
337 error = sysctl_handle_int(oidp, &i, 0, req);
338 if (error || !req->newptr)
339 goto out_locked;
340
341 if (i < -1 || i >= 60) {
342 error = EINVAL;
343 goto out_locked;
344 }
345
346 CTR2(KTR_MLD, "change mld_gsrdelay from %d to %d",
347 V_mld_gsrdelay.tv_sec, i);
348 V_mld_gsrdelay.tv_sec = i;
349
350out_locked:
351 MLD_UNLOCK();
352 return (error);
353}
354
355/*
356 * Expose struct mld_ifsoftc to userland, keyed by ifindex.
357 * For use by ifmcstat(8).
358 *
359 * VIMAGE: Assume curvnet set by caller. The node handler itself
360 * is not directly virtualized.
361 */
362static int
363sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS)
364{
365 struct epoch_tracker et;
366 int *name;
367 int error;
368 u_int namelen;
369 struct ifnet *ifp;
370 struct mld_ifsoftc *mli;
371
372 name = (int *)arg1;
373 namelen = arg2;
374
375 if (req->newptr != NULL)
376 return (EPERM);
377
378 if (namelen != 1)
379 return (EINVAL);
380
381 error = sysctl_wire_old_buffer(req, sizeof(struct mld_ifinfo));
382 if (error)
383 return (error);
384
387 MLD_LOCK();
388 NET_EPOCH_ENTER(et);
389
390 error = ENOENT;
391 ifp = ifnet_byindex(name[0]);
392 if (ifp == NULL)
393 goto out_locked;
394
395 LIST_FOREACH(mli, &V_mli_head, mli_link) {
396 if (ifp == mli->mli_ifp) {
397 struct mld_ifinfo info;
398
399 info.mli_version = mli->mli_version;
400 info.mli_v1_timer = mli->mli_v1_timer;
401 info.mli_v2_timer = mli->mli_v2_timer;
402 info.mli_flags = mli->mli_flags;
403 info.mli_rv = mli->mli_rv;
404 info.mli_qi = mli->mli_qi;
405 info.mli_qri = mli->mli_qri;
406 info.mli_uri = mli->mli_uri;
407 error = SYSCTL_OUT(req, &info, sizeof(info));
408 break;
409 }
410 }
411
412out_locked:
413 NET_EPOCH_EXIT(et);
414 MLD_UNLOCK();
417 return (error);
418}
419
420/*
421 * Dispatch an entire queue of pending packet chains.
422 * VIMAGE: Assumes the vnet pointer has been set.
423 */
424static void
425mld_dispatch_queue(struct mbufq *mq, int limit)
426{
427 struct mbuf *m;
428
429 while ((m = mbufq_dequeue(mq)) != NULL) {
430 CTR3(KTR_MLD, "%s: dispatch %p from %p", __func__, mq, m);
432 if (--limit == 0)
433 break;
434 }
435}
436
437/*
438 * Filter outgoing MLD report state by group.
439 *
440 * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
441 * and node-local addresses. However, kernel and socket consumers
442 * always embed the KAME scope ID in the address provided, so strip it
443 * when performing comparison.
444 * Note: This is not the same as the *multicast* scope.
445 *
446 * Return zero if the given group is one for which MLD reports
447 * should be suppressed, or non-zero if reports should be issued.
448 */
449static __inline int
451{
452
453 KASSERT(IN6_IS_ADDR_MULTICAST(addr), ("%s: not multicast", __func__));
454
456 return (0);
457
459 struct in6_addr tmp = *addr;
460 in6_clearscope(&tmp);
462 return (0);
463 }
464
465 return (1);
466}
467
468/*
469 * Attach MLD when PF_INET6 is attached to an interface.
470 *
471 * SMPng: Normally called with IF_AFDATA_LOCK held.
472 */
473struct mld_ifsoftc *
474mld_domifattach(struct ifnet *ifp)
475{
476 struct mld_ifsoftc *mli;
477
478 CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
479 __func__, ifp, if_name(ifp));
480
481 MLD_LOCK();
482
483 mli = mli_alloc_locked(ifp);
484 if (!(ifp->if_flags & IFF_MULTICAST))
485 mli->mli_flags |= MLIF_SILENT;
486 if (mld_use_allow)
487 mli->mli_flags |= MLIF_USEALLOW;
488
489 MLD_UNLOCK();
490
491 return (mli);
492}
493
494/*
495 * VIMAGE: assume curvnet set by caller.
496 */
497static struct mld_ifsoftc *
498mli_alloc_locked(/*const*/ struct ifnet *ifp)
499{
500 struct mld_ifsoftc *mli;
501
503
504 mli = malloc(sizeof(struct mld_ifsoftc), M_MLD, M_NOWAIT|M_ZERO);
505 if (mli == NULL)
506 goto out;
507
508 mli->mli_ifp = ifp;
510 mli->mli_flags = 0;
511 mli->mli_rv = MLD_RV_INIT;
512 mli->mli_qi = MLD_QI_INIT;
513 mli->mli_qri = MLD_QRI_INIT;
514 mli->mli_uri = MLD_URI_INIT;
515 mbufq_init(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS);
516
517 LIST_INSERT_HEAD(&V_mli_head, mli, mli_link);
518
519 CTR2(KTR_MLD, "allocate mld_ifsoftc for ifp %p(%s)",
520 ifp, if_name(ifp));
521
522out:
523 return (mli);
524}
525
526/*
527 * Hook for ifdetach.
528 *
529 * NOTE: Some finalization tasks need to run before the protocol domain
530 * is detached, but also before the link layer does its cleanup.
531 * Run before link-layer cleanup; cleanup groups, but do not free MLD state.
532 *
533 * SMPng: Caller must hold IN6_MULTI_LOCK().
534 * Must take IF_ADDR_LOCK() to cover if_multiaddrs iterator.
535 * XXX This routine is also bitten by unlocked ifma_protospec access.
536 */
537void
538mld_ifdetach(struct ifnet *ifp, struct in6_multi_head *inmh)
539{
540 struct epoch_tracker et;
541 struct mld_ifsoftc *mli;
542 struct ifmultiaddr *ifma;
543 struct in6_multi *inm;
544
545 CTR3(KTR_MLD, "%s: called for ifp %p(%s)", __func__, ifp,
546 if_name(ifp));
547
549 MLD_LOCK();
550
551 mli = MLD_IFINFO(ifp);
552 IF_ADDR_WLOCK(ifp);
553 /*
554 * Extract list of in6_multi associated with the detaching ifp
555 * which the PF_INET6 layer is about to release.
556 */
557 NET_EPOCH_ENTER(et);
558 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
559 inm = in6m_ifmultiaddr_get_inm(ifma);
560 if (inm == NULL)
561 continue;
562 in6m_disconnect_locked(inmh, inm);
563
564 if (mli->mli_version == MLD_VERSION_2) {
566
567 /*
568 * We need to release the final reference held
569 * for issuing the INCLUDE {}.
570 */
571 if (inm->in6m_state == MLD_LEAVING_MEMBER) {
573 in6m_rele_locked(inmh, inm);
574 }
575 }
576 }
577 NET_EPOCH_EXIT(et);
578 IF_ADDR_WUNLOCK(ifp);
579 MLD_UNLOCK();
580}
581
582/*
583 * Hook for domifdetach.
584 * Runs after link-layer cleanup; free MLD state.
585 *
586 * SMPng: Normally called with IF_AFDATA_LOCK held.
587 */
588void
589mld_domifdetach(struct ifnet *ifp)
590{
591
592 CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
593 __func__, ifp, if_name(ifp));
594
595 MLD_LOCK();
597 MLD_UNLOCK();
598}
599
600static void
601mli_delete_locked(const struct ifnet *ifp)
602{
603 struct mld_ifsoftc *mli, *tmli;
604
605 CTR3(KTR_MLD, "%s: freeing mld_ifsoftc for ifp %p(%s)",
606 __func__, ifp, if_name(ifp));
607
609
610 LIST_FOREACH_SAFE(mli, &V_mli_head, mli_link, tmli) {
611 if (mli->mli_ifp == ifp) {
612 /*
613 * Free deferred General Query responses.
614 */
615 mbufq_drain(&mli->mli_gq);
616
617 LIST_REMOVE(mli, mli_link);
618
619 free(mli, M_MLD);
620 return;
621 }
622 }
623}
624
625/*
626 * Process a received MLDv1 general or address-specific query.
627 * Assumes that the query header has been pulled up to sizeof(mld_hdr).
628 *
629 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
630 * mld_addr. This is OK as we own the mbuf chain.
631 */
632static int
633mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
634 /*const*/ struct mld_hdr *mld)
635{
636 struct ifmultiaddr *ifma;
637 struct mld_ifsoftc *mli;
638 struct in6_multi *inm;
639 int is_general_query;
640 uint16_t timer;
641#ifdef KTR
642 char ip6tbuf[INET6_ADDRSTRLEN];
643#endif
644
645 NET_EPOCH_ASSERT();
646
647 is_general_query = 0;
648
649 if (!mld_v1enable) {
650 CTR3(KTR_MLD, "ignore v1 query %s on ifp %p(%s)",
651 ip6_sprintf(ip6tbuf, &mld->mld_addr),
652 ifp, if_name(ifp));
653 return (0);
654 }
655
656 /*
657 * RFC3810 Section 6.2: MLD queries must originate from
658 * a router's link-local address.
659 */
660 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
661 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
662 ip6_sprintf(ip6tbuf, &ip6->ip6_src),
663 ifp, if_name(ifp));
664 return (0);
665 }
666
667 /*
668 * Do address field validation upfront before we accept
669 * the query.
670 */
671 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
672 /*
673 * MLDv1 General Query.
674 * If this was not sent to the all-nodes group, ignore it.
675 */
676 struct in6_addr dst;
677
678 dst = ip6->ip6_dst;
679 in6_clearscope(&dst);
681 return (EINVAL);
682 is_general_query = 1;
683 } else {
684 /*
685 * Embed scope ID of receiving interface in MLD query for
686 * lookup whilst we don't hold other locks.
687 */
688 in6_setscope(&mld->mld_addr, ifp, NULL);
689 }
690
692 MLD_LOCK();
693
694 /*
695 * Switch to MLDv1 host compatibility mode.
696 */
697 mli = MLD_IFINFO(ifp);
698 KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
700
701 timer = (ntohs(mld->mld_maxdelay) * PR_FASTHZ) / MLD_TIMER_SCALE;
702 if (timer == 0)
703 timer = 1;
704
705 if (is_general_query) {
706 /*
707 * For each reporting group joined on this
708 * interface, kick the report timer.
709 */
710 CTR2(KTR_MLD, "process v1 general query on ifp %p(%s)",
711 ifp, if_name(ifp));
712 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
713 inm = in6m_ifmultiaddr_get_inm(ifma);
714 if (inm == NULL)
715 continue;
716 mld_v1_update_group(inm, timer);
717 }
718 } else {
719 /*
720 * MLDv1 Group-Specific Query.
721 * If this is a group-specific MLDv1 query, we need only
722 * look up the single group to process it.
723 */
724 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
725 if (inm != NULL) {
726 CTR3(KTR_MLD, "process v1 query %s on ifp %p(%s)",
727 ip6_sprintf(ip6tbuf, &mld->mld_addr),
728 ifp, if_name(ifp));
729 mld_v1_update_group(inm, timer);
730 }
731 /* XXX Clear embedded scope ID as userland won't expect it. */
732 in6_clearscope(&mld->mld_addr);
733 }
734
735 MLD_UNLOCK();
737
738 return (0);
739}
740
741/*
742 * Update the report timer on a group in response to an MLDv1 query.
743 *
744 * If we are becoming the reporting member for this group, start the timer.
745 * If we already are the reporting member for this group, and timer is
746 * below the threshold, reset it.
747 *
748 * We may be updating the group for the first time since we switched
749 * to MLDv2. If we are, then we must clear any recorded source lists,
750 * and transition to REPORTING state; the group timer is overloaded
751 * for group and group-source query responses.
752 *
753 * Unlike MLDv2, the delay per group should be jittered
754 * to avoid bursts of MLDv1 reports.
755 */
756static void
757mld_v1_update_group(struct in6_multi *inm, const int timer)
758{
759#ifdef KTR
760 char ip6tbuf[INET6_ADDRSTRLEN];
761#endif
762
763 CTR4(KTR_MLD, "%s: %s/%s timer=%d", __func__,
764 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
765 if_name(inm->in6m_ifp), timer);
766
768
769 switch (inm->in6m_state) {
770 case MLD_NOT_MEMBER:
772 break;
774 if (inm->in6m_timer != 0 &&
775 inm->in6m_timer <= timer) {
776 CTR1(KTR_MLD, "%s: REPORTING and timer running, "
777 "skipping.", __func__);
778 break;
779 }
780 /* FALLTHROUGH */
783 case MLD_IDLE_MEMBER:
784 case MLD_LAZY_MEMBER:
786 CTR1(KTR_MLD, "%s: ->REPORTING", __func__);
788 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
790 break;
792 CTR1(KTR_MLD, "%s: ->AWAKENING", __func__);
794 break;
796 break;
797 }
798}
799
800/*
801 * Process a received MLDv2 general, group-specific or
802 * group-and-source-specific query.
803 *
804 * Assumes that mld points to a struct mldv2_query which is stored in
805 * contiguous memory.
806 *
807 * Return 0 if successful, otherwise an appropriate error code is returned.
808 */
809static int
810mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
811 struct mbuf *m, struct mldv2_query *mld, const int off, const int icmp6len)
812{
813 struct mld_ifsoftc *mli;
814 struct in6_multi *inm;
815 uint32_t maxdelay, nsrc, qqi;
816 int is_general_query;
817 uint16_t timer;
818 uint8_t qrv;
819#ifdef KTR
820 char ip6tbuf[INET6_ADDRSTRLEN];
821#endif
822
823 NET_EPOCH_ASSERT();
824
825 if (!mld_v2enable) {
826 CTR3(KTR_MLD, "ignore v2 query src %s on ifp %p(%s)",
827 ip6_sprintf(ip6tbuf, &ip6->ip6_src),
828 ifp, if_name(ifp));
829 return (0);
830 }
831
832 /*
833 * RFC3810 Section 6.2: MLD queries must originate from
834 * a router's link-local address.
835 */
836 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
837 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
838 ip6_sprintf(ip6tbuf, &ip6->ip6_src),
839 ifp, if_name(ifp));
840 return (0);
841 }
842
843 is_general_query = 0;
844
845 CTR2(KTR_MLD, "input v2 query on ifp %p(%s)", ifp, if_name(ifp));
846
847 maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */
848 if (maxdelay >= 32768) {
849 maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) <<
850 (MLD_MRC_EXP(maxdelay) + 3);
851 }
852 timer = (maxdelay * PR_FASTHZ) / MLD_TIMER_SCALE;
853 if (timer == 0)
854 timer = 1;
855
856 qrv = MLD_QRV(mld->mld_misc);
857 if (qrv < 2) {
858 CTR3(KTR_MLD, "%s: clamping qrv %d to %d", __func__,
859 qrv, MLD_RV_INIT);
860 qrv = MLD_RV_INIT;
861 }
862
863 qqi = mld->mld_qqi;
864 if (qqi >= 128) {
865 qqi = MLD_QQIC_MANT(mld->mld_qqi) <<
866 (MLD_QQIC_EXP(mld->mld_qqi) + 3);
867 }
868
869 nsrc = ntohs(mld->mld_numsrc);
870 if (nsrc > MLD_MAX_GS_SOURCES)
871 return (EMSGSIZE);
872 if (icmp6len < sizeof(struct mldv2_query) +
873 (nsrc * sizeof(struct in6_addr)))
874 return (EMSGSIZE);
875
876 /*
877 * Do further input validation upfront to avoid resetting timers
878 * should we need to discard this query.
879 */
881 /*
882 * A general query with a source list has undefined
883 * behaviour; discard it.
884 */
885 if (nsrc > 0)
886 return (EINVAL);
887 is_general_query = 1;
888 } else {
889 /*
890 * Embed scope ID of receiving interface in MLD query for
891 * lookup whilst we don't hold other locks (due to KAME
892 * locking lameness). We own this mbuf chain just now.
893 */
894 in6_setscope(&mld->mld_addr, ifp, NULL);
895 }
896
898 MLD_LOCK();
899
900 mli = MLD_IFINFO(ifp);
901 KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
902
903 /*
904 * Discard the v2 query if we're in Compatibility Mode.
905 * The RFC is pretty clear that hosts need to stay in MLDv1 mode
906 * until the Old Version Querier Present timer expires.
907 */
908 if (mli->mli_version != MLD_VERSION_2)
909 goto out_locked;
910
912 mli->mli_rv = qrv;
913 mli->mli_qi = qqi;
914 mli->mli_qri = maxdelay;
915
916 CTR4(KTR_MLD, "%s: qrv %d qi %d maxdelay %d", __func__, qrv, qqi,
917 maxdelay);
918
919 if (is_general_query) {
920 /*
921 * MLDv2 General Query.
922 *
923 * Schedule a current-state report on this ifp for
924 * all groups, possibly containing source lists.
925 *
926 * If there is a pending General Query response
927 * scheduled earlier than the selected delay, do
928 * not schedule any other reports.
929 * Otherwise, reset the interface timer.
930 */
931 CTR2(KTR_MLD, "process v2 general query on ifp %p(%s)",
932 ifp, if_name(ifp));
933 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
934 mli->mli_v2_timer = MLD_RANDOM_DELAY(timer);
936 }
937 } else {
938 /*
939 * MLDv2 Group-specific or Group-and-source-specific Query.
940 *
941 * Group-source-specific queries are throttled on
942 * a per-group basis to defeat denial-of-service attempts.
943 * Queries for groups we are not a member of on this
944 * link are simply ignored.
945 */
946 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
947 if (inm == NULL)
948 goto out_locked;
949 if (nsrc > 0) {
950 if (!ratecheck(&inm->in6m_lastgsrtv,
951 &V_mld_gsrdelay)) {
952 CTR1(KTR_MLD, "%s: GS query throttled.",
953 __func__);
954 goto out_locked;
955 }
956 }
957 CTR2(KTR_MLD, "process v2 group query on ifp %p(%s)",
958 ifp, if_name(ifp));
959 /*
960 * If there is a pending General Query response
961 * scheduled sooner than the selected delay, no
962 * further report need be scheduled.
963 * Otherwise, prepare to respond to the
964 * group-specific or group-and-source query.
965 */
966 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer)
967 mld_v2_process_group_query(inm, mli, timer, m, mld, off);
968
969 /* XXX Clear embedded scope ID as userland won't expect it. */
971 }
972
973out_locked:
974 MLD_UNLOCK();
976
977 return (0);
978}
979
980/*
981 * Process a received MLDv2 group-specific or group-and-source-specific
982 * query.
983 * Return <0 if any error occurred. Currently this is ignored.
984 */
985static int
987 int timer, struct mbuf *m0, struct mldv2_query *mld, const int off)
988{
989 int retval;
990 uint16_t nsrc;
991
994
995 retval = 0;
996
997 switch (inm->in6m_state) {
998 case MLD_NOT_MEMBER:
1001 case MLD_LAZY_MEMBER:
1003 case MLD_IDLE_MEMBER:
1004 case MLD_LEAVING_MEMBER:
1005 return (retval);
1006 break;
1010 break;
1011 }
1012
1013 nsrc = ntohs(mld->mld_numsrc);
1014
1015 /* Length should be checked by calling function. */
1016 KASSERT((m0->m_flags & M_PKTHDR) == 0 ||
1017 m0->m_pkthdr.len >= off + sizeof(struct mldv2_query) +
1018 nsrc * sizeof(struct in6_addr),
1019 ("mldv2 packet is too short: (%d bytes < %zd bytes, m=%p)",
1020 m0->m_pkthdr.len, off + sizeof(struct mldv2_query) +
1021 nsrc * sizeof(struct in6_addr), m0));
1022
1023 /*
1024 * Deal with group-specific queries upfront.
1025 * If any group query is already pending, purge any recorded
1026 * source-list state if it exists, and schedule a query response
1027 * for this group-specific query.
1028 */
1029 if (nsrc == 0) {
1033 timer = min(inm->in6m_timer, timer);
1034 }
1036 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1038 return (retval);
1039 }
1040
1041 /*
1042 * Deal with the case where a group-and-source-specific query has
1043 * been received but a group-specific query is already pending.
1044 */
1046 timer = min(inm->in6m_timer, timer);
1047 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1049 return (retval);
1050 }
1051
1052 /*
1053 * Finally, deal with the case where a group-and-source-specific
1054 * query has been received, where a response to a previous g-s-r
1055 * query exists, or none exists.
1056 * In this case, we need to parse the source-list which the Querier
1057 * has provided us with and check if we have any source list filter
1058 * entries at T1 for these sources. If we do not, there is no need
1059 * schedule a report and the query may be dropped.
1060 * If we do, we must record them and schedule a current-state
1061 * report for those sources.
1062 */
1063 if (inm->in6m_nsrc > 0) {
1064 struct in6_addr srcaddr;
1065 int i, nrecorded;
1066 int soff;
1067
1068 soff = off + sizeof(struct mldv2_query);
1069 nrecorded = 0;
1070 for (i = 0; i < nsrc; i++) {
1071 m_copydata(m0, soff, sizeof(struct in6_addr),
1072 (caddr_t)&srcaddr);
1073 retval = in6m_record_source(inm, &srcaddr);
1074 if (retval < 0)
1075 break;
1076 nrecorded += retval;
1077 soff += sizeof(struct in6_addr);
1078 }
1079 if (nrecorded > 0) {
1080 CTR1(KTR_MLD,
1081 "%s: schedule response to SG query", __func__);
1083 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1085 }
1086 }
1087
1088 return (retval);
1089}
1090
1091/*
1092 * Process a received MLDv1 host membership report.
1093 * Assumes mld points to mld_hdr in pulled up mbuf chain.
1094 *
1095 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
1096 * mld_addr. This is OK as we own the mbuf chain.
1097 */
1098static int
1099mld_v1_input_report(struct ifnet *ifp, const struct ip6_hdr *ip6,
1100 /*const*/ struct mld_hdr *mld)
1101{
1102 struct in6_addr src, dst;
1103 struct in6_ifaddr *ia;
1104 struct in6_multi *inm;
1105#ifdef KTR
1106 char ip6tbuf[INET6_ADDRSTRLEN];
1107#endif
1108
1109 NET_EPOCH_ASSERT();
1110
1111 if (!mld_v1enable) {
1112 CTR3(KTR_MLD, "ignore v1 report %s on ifp %p(%s)",
1113 ip6_sprintf(ip6tbuf, &mld->mld_addr),
1114 ifp, if_name(ifp));
1115 return (0);
1116 }
1117
1118 if (ifp->if_flags & IFF_LOOPBACK)
1119 return (0);
1120
1121 /*
1122 * MLDv1 reports must originate from a host's link-local address,
1123 * or the unspecified address (when booting).
1124 */
1125 src = ip6->ip6_src;
1126 in6_clearscope(&src);
1127 if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) {
1128 CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
1129 ip6_sprintf(ip6tbuf, &ip6->ip6_src),
1130 ifp, if_name(ifp));
1131 return (EINVAL);
1132 }
1133
1134 /*
1135 * RFC2710 Section 4: MLDv1 reports must pertain to a multicast
1136 * group, and must be directed to the group itself.
1137 */
1138 dst = ip6->ip6_dst;
1139 in6_clearscope(&dst);
1140 if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) ||
1141 !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) {
1142 CTR3(KTR_MLD, "ignore v1 query dst %s on ifp %p(%s)",
1143 ip6_sprintf(ip6tbuf, &ip6->ip6_dst),
1144 ifp, if_name(ifp));
1145 return (EINVAL);
1146 }
1147
1148 /*
1149 * Make sure we don't hear our own membership report, as fast
1150 * leave requires knowing that we are the only member of a
1151 * group. Assume we used the link-local address if available,
1152 * otherwise look for ::.
1153 *
1154 * XXX Note that scope ID comparison is needed for the address
1155 * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
1156 * performed for the on-wire address.
1157 */
1159 if ((ia && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia))) ||
1160 (ia == NULL && IN6_IS_ADDR_UNSPECIFIED(&src))) {
1161 if (ia != NULL)
1162 ifa_free(&ia->ia_ifa);
1163 return (0);
1164 }
1165 if (ia != NULL)
1166 ifa_free(&ia->ia_ifa);
1167
1168 CTR3(KTR_MLD, "process v1 report %s on ifp %p(%s)",
1169 ip6_sprintf(ip6tbuf, &mld->mld_addr), ifp, if_name(ifp));
1170
1171 /*
1172 * Embed scope ID of receiving interface in MLD query for lookup
1173 * whilst we don't hold other locks (due to KAME locking lameness).
1174 */
1175 if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr))
1176 in6_setscope(&mld->mld_addr, ifp, NULL);
1177
1179 MLD_LOCK();
1180
1181 /*
1182 * MLDv1 report suppression.
1183 * If we are a member of this group, and our membership should be
1184 * reported, and our group timer is pending or about to be reset,
1185 * stop our group timer by transitioning to the 'lazy' state.
1186 */
1187 inm = in6m_lookup_locked(ifp, &mld->mld_addr);
1188 if (inm != NULL) {
1189 struct mld_ifsoftc *mli;
1190
1191 mli = inm->in6m_mli;
1192 KASSERT(mli != NULL,
1193 ("%s: no mli for ifp %p", __func__, ifp));
1194
1195 /*
1196 * If we are in MLDv2 host mode, do not allow the
1197 * other host's MLDv1 report to suppress our reports.
1198 */
1199 if (mli->mli_version == MLD_VERSION_2)
1200 goto out_locked;
1201
1202 inm->in6m_timer = 0;
1203
1204 switch (inm->in6m_state) {
1205 case MLD_NOT_MEMBER:
1206 case MLD_SILENT_MEMBER:
1208 break;
1210 case MLD_IDLE_MEMBER:
1212 CTR3(KTR_MLD,
1213 "report suppressed for %s on ifp %p(%s)",
1214 ip6_sprintf(ip6tbuf, &mld->mld_addr),
1215 ifp, if_name(ifp));
1216 case MLD_LAZY_MEMBER:
1218 break;
1221 case MLD_LEAVING_MEMBER:
1222 break;
1223 }
1224 }
1225
1226out_locked:
1227 MLD_UNLOCK();
1229
1230 /* XXX Clear embedded scope ID as userland won't expect it. */
1231 in6_clearscope(&mld->mld_addr);
1232
1233 return (0);
1234}
1235
1236/*
1237 * MLD input path.
1238 *
1239 * Assume query messages which fit in a single ICMPv6 message header
1240 * have been pulled up.
1241 * Assume that userland will want to see the message, even if it
1242 * otherwise fails kernel input validation; do not free it.
1243 * Pullup may however free the mbuf chain m if it fails.
1244 *
1245 * Return IPPROTO_DONE if we freed m. Otherwise, return 0.
1246 */
1247int
1248mld_input(struct mbuf **mp, int off, int icmp6len)
1249{
1250 struct ifnet *ifp;
1251 struct ip6_hdr *ip6;
1252 struct mbuf *m;
1253 struct mld_hdr *mld;
1254 int mldlen;
1255
1256 m = *mp;
1257 CTR3(KTR_MLD, "%s: called w/mbuf (%p,%d)", __func__, m, off);
1258
1259 ifp = m->m_pkthdr.rcvif;
1260
1261 /* Pullup to appropriate size. */
1262 if (m->m_len < off + sizeof(*mld)) {
1263 m = m_pullup(m, off + sizeof(*mld));
1264 if (m == NULL) {
1265 ICMP6STAT_INC(icp6s_badlen);
1266 return (IPPROTO_DONE);
1267 }
1268 }
1269 mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
1270 if (mld->mld_type == MLD_LISTENER_QUERY &&
1271 icmp6len >= sizeof(struct mldv2_query)) {
1272 mldlen = sizeof(struct mldv2_query);
1273 } else {
1274 mldlen = sizeof(struct mld_hdr);
1275 }
1276 if (m->m_len < off + mldlen) {
1277 m = m_pullup(m, off + mldlen);
1278 if (m == NULL) {
1279 ICMP6STAT_INC(icp6s_badlen);
1280 return (IPPROTO_DONE);
1281 }
1282 }
1283 *mp = m;
1284 ip6 = mtod(m, struct ip6_hdr *);
1285 mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
1286
1287 /*
1288 * Userland needs to see all of this traffic for implementing
1289 * the endpoint discovery portion of multicast routing.
1290 */
1291 switch (mld->mld_type) {
1292 case MLD_LISTENER_QUERY:
1293 icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
1294 if (icmp6len == sizeof(struct mld_hdr)) {
1295 if (mld_v1_input_query(ifp, ip6, mld) != 0)
1296 return (0);
1297 } else if (icmp6len >= sizeof(struct mldv2_query)) {
1298 if (mld_v2_input_query(ifp, ip6, m,
1299 (struct mldv2_query *)mld, off, icmp6len) != 0)
1300 return (0);
1301 }
1302 break;
1303 case MLD_LISTENER_REPORT:
1304 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1305 if (mld_v1_input_report(ifp, ip6, mld) != 0)
1306 return (0);
1307 break;
1308 case MLDV2_LISTENER_REPORT:
1309 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1310 break;
1311 case MLD_LISTENER_DONE:
1312 icmp6_ifstat_inc(ifp, ifs6_in_mlddone);
1313 break;
1314 default:
1315 break;
1316 }
1317
1318 return (0);
1319}
1320
1321/*
1322 * Fast timeout handler (global).
1323 * VIMAGE: Timeout handlers are expected to service all vimages.
1324 */
1325void
1327{
1328 struct in6_multi_head inmh;
1329 VNET_ITERATOR_DECL(vnet_iter);
1330
1331 SLIST_INIT(&inmh);
1332
1333 VNET_LIST_RLOCK_NOSLEEP();
1334 VNET_FOREACH(vnet_iter) {
1335 CURVNET_SET(vnet_iter);
1336 mld_fasttimo_vnet(&inmh);
1337 CURVNET_RESTORE();
1338 }
1339 VNET_LIST_RUNLOCK_NOSLEEP();
1341}
1342
1343/*
1344 * Fast timeout handler (per-vnet).
1345 *
1346 * VIMAGE: Assume caller has set up our curvnet.
1347 */
1348static void
1349mld_fasttimo_vnet(struct in6_multi_head *inmh)
1350{
1351 struct epoch_tracker et;
1352 struct mbufq scq; /* State-change packets */
1353 struct mbufq qrq; /* Query response packets */
1354 struct ifnet *ifp;
1355 struct mld_ifsoftc *mli;
1356 struct ifmultiaddr *ifma;
1357 struct in6_multi *inm;
1358 int uri_fasthz;
1359
1360 uri_fasthz = 0;
1361
1362 /*
1363 * Quick check to see if any work needs to be done, in order to
1364 * minimize the overhead of fasttimo processing.
1365 * SMPng: XXX Unlocked reads.
1366 */
1370 return;
1371
1373 MLD_LOCK();
1374
1375 /*
1376 * MLDv2 General Query response timer processing.
1377 */
1379 CTR1(KTR_MLD, "%s: interface timers running", __func__);
1380
1382 LIST_FOREACH(mli, &V_mli_head, mli_link) {
1383 if (mli->mli_v2_timer == 0) {
1384 /* Do nothing. */
1385 } else if (--mli->mli_v2_timer == 0) {
1387 } else {
1389 }
1390 }
1391 }
1392
1395 goto out_locked;
1396
1399
1400 CTR1(KTR_MLD, "%s: state change timers running", __func__);
1401
1402 /*
1403 * MLD host report and state-change timer processing.
1404 * Note: Processing a v2 group timer may remove a node.
1405 */
1406 LIST_FOREACH(mli, &V_mli_head, mli_link) {
1407 ifp = mli->mli_ifp;
1408
1409 if (mli->mli_version == MLD_VERSION_2) {
1410 uri_fasthz = MLD_RANDOM_DELAY(mli->mli_uri *
1411 PR_FASTHZ);
1412 mbufq_init(&qrq, MLD_MAX_G_GS_PACKETS);
1413 mbufq_init(&scq, MLD_MAX_STATE_CHANGE_PACKETS);
1414 }
1415
1416 NET_EPOCH_ENTER(et);
1417 IF_ADDR_WLOCK(ifp);
1418 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1419 inm = in6m_ifmultiaddr_get_inm(ifma);
1420 if (inm == NULL)
1421 continue;
1422 switch (mli->mli_version) {
1423 case MLD_VERSION_1:
1424 mld_v1_process_group_timer(inmh, inm);
1425 break;
1426 case MLD_VERSION_2:
1427 mld_v2_process_group_timers(inmh, &qrq,
1428 &scq, inm, uri_fasthz);
1429 break;
1430 }
1431 }
1432 IF_ADDR_WUNLOCK(ifp);
1433
1434 switch (mli->mli_version) {
1435 case MLD_VERSION_1:
1436 /*
1437 * Transmit reports for this lifecycle. This
1438 * is done while not holding IF_ADDR_LOCK
1439 * since this can call
1440 * in6ifa_ifpforlinklocal() which locks
1441 * IF_ADDR_LOCK internally as well as
1442 * ip6_output() to transmit a packet.
1443 */
1444 while ((inm = SLIST_FIRST(inmh)) != NULL) {
1445 SLIST_REMOVE_HEAD(inmh, in6m_defer);
1446 (void)mld_v1_transmit_report(inm,
1447 MLD_LISTENER_REPORT);
1448 }
1449 break;
1450 case MLD_VERSION_2:
1451 mld_dispatch_queue(&qrq, 0);
1452 mld_dispatch_queue(&scq, 0);
1453 break;
1454 }
1455 NET_EPOCH_EXIT(et);
1456 }
1457
1458out_locked:
1459 MLD_UNLOCK();
1461}
1462
1463/*
1464 * Update host report group timer.
1465 * Will update the global pending timer flags.
1466 */
1467static void
1468mld_v1_process_group_timer(struct in6_multi_head *inmh, struct in6_multi *inm)
1469{
1470 int report_timer_expired;
1471
1474
1475 if (inm->in6m_timer == 0) {
1476 report_timer_expired = 0;
1477 } else if (--inm->in6m_timer == 0) {
1478 report_timer_expired = 1;
1479 } else {
1481 return;
1482 }
1483
1484 switch (inm->in6m_state) {
1485 case MLD_NOT_MEMBER:
1486 case MLD_SILENT_MEMBER:
1487 case MLD_IDLE_MEMBER:
1488 case MLD_LAZY_MEMBER:
1491 break;
1493 if (report_timer_expired) {
1495 SLIST_INSERT_HEAD(inmh, inm, in6m_defer);
1496 }
1497 break;
1500 case MLD_LEAVING_MEMBER:
1501 break;
1502 }
1503}
1504
1505/*
1506 * Update a group's timers for MLDv2.
1507 * Will update the global pending timer flags.
1508 * Note: Unlocked read from mli.
1509 */
1510static void
1511mld_v2_process_group_timers(struct in6_multi_head *inmh,
1512 struct mbufq *qrq, struct mbufq *scq,
1513 struct in6_multi *inm, const int uri_fasthz)
1514{
1515 int query_response_timer_expired;
1516 int state_change_retransmit_timer_expired;
1517#ifdef KTR
1518 char ip6tbuf[INET6_ADDRSTRLEN];
1519#endif
1520
1523
1524 query_response_timer_expired = 0;
1525 state_change_retransmit_timer_expired = 0;
1526
1527 /*
1528 * During a transition from compatibility mode back to MLDv2,
1529 * a group record in REPORTING state may still have its group
1530 * timer active. This is a no-op in this function; it is easier
1531 * to deal with it here than to complicate the slow-timeout path.
1532 */
1533 if (inm->in6m_timer == 0) {
1534 query_response_timer_expired = 0;
1535 } else if (--inm->in6m_timer == 0) {
1536 query_response_timer_expired = 1;
1537 } else {
1539 }
1540
1541 if (inm->in6m_sctimer == 0) {
1542 state_change_retransmit_timer_expired = 0;
1543 } else if (--inm->in6m_sctimer == 0) {
1544 state_change_retransmit_timer_expired = 1;
1545 } else {
1547 }
1548
1549 /* We are in fasttimo, so be quick about it. */
1550 if (!state_change_retransmit_timer_expired &&
1551 !query_response_timer_expired)
1552 return;
1553
1554 switch (inm->in6m_state) {
1555 case MLD_NOT_MEMBER:
1556 case MLD_SILENT_MEMBER:
1558 case MLD_LAZY_MEMBER:
1560 case MLD_IDLE_MEMBER:
1561 break;
1564 /*
1565 * Respond to a previously pending Group-Specific
1566 * or Group-and-Source-Specific query by enqueueing
1567 * the appropriate Current-State report for
1568 * immediate transmission.
1569 */
1570 if (query_response_timer_expired) {
1571 int retval;
1572
1573 retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1,
1575 0);
1576 CTR2(KTR_MLD, "%s: enqueue record = %d",
1577 __func__, retval);
1580 }
1581 /* FALLTHROUGH */
1583 case MLD_LEAVING_MEMBER:
1584 if (state_change_retransmit_timer_expired) {
1585 /*
1586 * State-change retransmission timer fired.
1587 * If there are any further pending retransmissions,
1588 * set the global pending state-change flag, and
1589 * reset the timer.
1590 */
1591 if (--inm->in6m_scrv > 0) {
1592 inm->in6m_sctimer = uri_fasthz;
1594 }
1595 /*
1596 * Retransmit the previously computed state-change
1597 * report. If there are no further pending
1598 * retransmissions, the mbuf queue will be consumed.
1599 * Update T0 state to T1 as we have now sent
1600 * a state-change.
1601 */
1602 (void)mld_v2_merge_state_changes(inm, scq);
1603
1604 in6m_commit(inm);
1605 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
1606 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
1607 if_name(inm->in6m_ifp));
1608
1609 /*
1610 * If we are leaving the group for good, make sure
1611 * we release MLD's reference to it.
1612 * This release must be deferred using a SLIST,
1613 * as we are called from a loop which traverses
1614 * the in_ifmultiaddr TAILQ.
1615 */
1616 if (inm->in6m_state == MLD_LEAVING_MEMBER &&
1617 inm->in6m_scrv == 0) {
1619 in6m_disconnect_locked(inmh, inm);
1620 in6m_rele_locked(inmh, inm);
1621 }
1622 }
1623 break;
1624 }
1625}
1626
1627/*
1628 * Switch to a different version on the given interface,
1629 * as per Section 9.12.
1630 */
1631static void
1632mld_set_version(struct mld_ifsoftc *mli, const int version)
1633{
1634 int old_version_timer;
1635
1637
1638 CTR4(KTR_MLD, "%s: switching to v%d on ifp %p(%s)", __func__,
1639 version, mli->mli_ifp, if_name(mli->mli_ifp));
1640
1641 if (version == MLD_VERSION_1) {
1642 /*
1643 * Compute the "Older Version Querier Present" timer as per
1644 * Section 9.12.
1645 */
1646 old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri;
1647 old_version_timer *= PR_SLOWHZ;
1648 mli->mli_v1_timer = old_version_timer;
1649 }
1650
1651 if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) {
1654 }
1655}
1656
1657/*
1658 * Cancel pending MLDv2 timers for the given link and all groups
1659 * joined on it; state-change, general-query, and group-query timers.
1660 */
1661static void
1663{
1664 struct epoch_tracker et;
1665 struct in6_multi_head inmh;
1666 struct ifmultiaddr *ifma;
1667 struct ifnet *ifp;
1668 struct in6_multi *inm;
1669
1670 CTR3(KTR_MLD, "%s: cancel v2 timers on ifp %p(%s)", __func__,
1671 mli->mli_ifp, if_name(mli->mli_ifp));
1672
1673 SLIST_INIT(&inmh);
1676
1677 /*
1678 * Fast-track this potentially expensive operation
1679 * by checking all the global 'timer pending' flags.
1680 */
1684 return;
1685
1686 mli->mli_v2_timer = 0;
1687
1688 ifp = mli->mli_ifp;
1689
1690 IF_ADDR_WLOCK(ifp);
1691 NET_EPOCH_ENTER(et);
1692 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1693 inm = in6m_ifmultiaddr_get_inm(ifma);
1694 if (inm == NULL)
1695 continue;
1696 switch (inm->in6m_state) {
1697 case MLD_NOT_MEMBER:
1698 case MLD_SILENT_MEMBER:
1699 case MLD_IDLE_MEMBER:
1700 case MLD_LAZY_MEMBER:
1703 break;
1704 case MLD_LEAVING_MEMBER:
1705 /*
1706 * If we are leaving the group and switching
1707 * version, we need to release the final
1708 * reference held for issuing the INCLUDE {}.
1709 */
1710 if (inm->in6m_refcount == 1)
1711 in6m_disconnect_locked(&inmh, inm);
1712 in6m_rele_locked(&inmh, inm);
1713 /* FALLTHROUGH */
1717 /* FALLTHROUGH */
1719 inm->in6m_sctimer = 0;
1720 inm->in6m_timer = 0;
1722 /*
1723 * Free any pending MLDv2 state-change records.
1724 */
1725 mbufq_drain(&inm->in6m_scq);
1726 break;
1727 }
1728 }
1729 NET_EPOCH_EXIT(et);
1730 IF_ADDR_WUNLOCK(ifp);
1732}
1733
1734/*
1735 * Global slowtimo handler.
1736 * VIMAGE: Timeout handlers are expected to service all vimages.
1737 */
1738void
1740{
1741 VNET_ITERATOR_DECL(vnet_iter);
1742
1743 VNET_LIST_RLOCK_NOSLEEP();
1744 VNET_FOREACH(vnet_iter) {
1745 CURVNET_SET(vnet_iter);
1747 CURVNET_RESTORE();
1748 }
1749 VNET_LIST_RUNLOCK_NOSLEEP();
1750}
1751
1752/*
1753 * Per-vnet slowtimo handler.
1754 */
1755static void
1757{
1758 struct mld_ifsoftc *mli;
1759
1760 MLD_LOCK();
1761
1762 LIST_FOREACH(mli, &V_mli_head, mli_link) {
1764 }
1765
1766 MLD_UNLOCK();
1767}
1768
1769/*
1770 * Update the Older Version Querier Present timers for a link.
1771 * See Section 9.12 of RFC 3810.
1772 */
1773static void
1775{
1776
1778
1779 if (mli->mli_version != MLD_VERSION_2 && --mli->mli_v1_timer == 0) {
1780 /*
1781 * MLDv1 Querier Present timer expired; revert to MLDv2.
1782 */
1783 CTR5(KTR_MLD,
1784 "%s: transition from v%d -> v%d on %p(%s)",
1785 __func__, mli->mli_version, MLD_VERSION_2,
1786 mli->mli_ifp, if_name(mli->mli_ifp));
1788 }
1789}
1790
1791/*
1792 * Transmit an MLDv1 report immediately.
1793 */
1794static int
1795mld_v1_transmit_report(struct in6_multi *in6m, const int type)
1796{
1797 struct ifnet *ifp;
1798 struct in6_ifaddr *ia;
1799 struct ip6_hdr *ip6;
1800 struct mbuf *mh, *md;
1801 struct mld_hdr *mld;
1802
1803 NET_EPOCH_ASSERT();
1806
1807 ifp = in6m->in6m_ifp;
1808 /* in process of being freed */
1809 if (ifp == NULL)
1810 return (0);
1812 /* ia may be NULL if link-local address is tentative. */
1813
1814 mh = m_gethdr(M_NOWAIT, MT_DATA);
1815 if (mh == NULL) {
1816 if (ia != NULL)
1817 ifa_free(&ia->ia_ifa);
1818 return (ENOMEM);
1819 }
1820 md = m_get(M_NOWAIT, MT_DATA);
1821 if (md == NULL) {
1822 m_free(mh);
1823 if (ia != NULL)
1824 ifa_free(&ia->ia_ifa);
1825 return (ENOMEM);
1826 }
1827 mh->m_next = md;
1828
1829 /*
1830 * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so
1831 * that ether_output() does not need to allocate another mbuf
1832 * for the header in the most common case.
1833 */
1834 M_ALIGN(mh, sizeof(struct ip6_hdr));
1835 mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr);
1836 mh->m_len = sizeof(struct ip6_hdr);
1837
1838 ip6 = mtod(mh, struct ip6_hdr *);
1839 ip6->ip6_flow = 0;
1840 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
1841 ip6->ip6_vfc |= IPV6_VERSION;
1842 ip6->ip6_nxt = IPPROTO_ICMPV6;
1843 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
1844 ip6->ip6_dst = in6m->in6m_addr;
1845
1846 md->m_len = sizeof(struct mld_hdr);
1847 mld = mtod(md, struct mld_hdr *);
1848 mld->mld_type = type;
1849 mld->mld_code = 0;
1850 mld->mld_cksum = 0;
1851 mld->mld_maxdelay = 0;
1852 mld->mld_reserved = 0;
1853 mld->mld_addr = in6m->in6m_addr;
1854 in6_clearscope(&mld->mld_addr);
1855 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
1856 sizeof(struct ip6_hdr), sizeof(struct mld_hdr));
1857
1858 mld_save_context(mh, ifp);
1859 mh->m_flags |= M_MLDV1;
1860
1862
1863 if (ia != NULL)
1864 ifa_free(&ia->ia_ifa);
1865 return (0);
1866}
1867
1868/*
1869 * Process a state change from the upper layer for the given IPv6 group.
1870 *
1871 * Each socket holds a reference on the in_multi in its own ip_moptions.
1872 * The socket layer will have made the necessary updates to.the group
1873 * state, it is now up to MLD to issue a state change report if there
1874 * has been any change between T0 (when the last state-change was issued)
1875 * and T1 (now).
1876 *
1877 * We use the MLDv2 state machine at group level. The MLd module
1878 * however makes the decision as to which MLD protocol version to speak.
1879 * A state change *from* INCLUDE {} always means an initial join.
1880 * A state change *to* INCLUDE {} always means a final leave.
1881 *
1882 * If delay is non-zero, and the state change is an initial multicast
1883 * join, the state change report will be delayed by 'delay' ticks
1884 * in units of PR_FASTHZ if MLDv1 is active on the link; otherwise
1885 * the initial MLDv2 state change report will be delayed by whichever
1886 * is sooner, a pending state-change timer or delay itself.
1887 *
1888 * VIMAGE: curvnet should have been set by caller, as this routine
1889 * is called from the socket option handlers.
1890 */
1891int
1892mld_change_state(struct in6_multi *inm, const int delay)
1893{
1894 struct mld_ifsoftc *mli;
1895 struct ifnet *ifp;
1896 int error;
1897
1899
1900 error = 0;
1901
1902 /*
1903 * Check if the in6_multi has already been disconnected.
1904 */
1905 if (inm->in6m_ifp == NULL) {
1906 CTR1(KTR_MLD, "%s: inm is disconnected", __func__);
1907 return (0);
1908 }
1909
1910 /*
1911 * Try to detect if the upper layer just asked us to change state
1912 * for an interface which has now gone away.
1913 */
1914 KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__));
1915 ifp = inm->in6m_ifma->ifma_ifp;
1916 if (ifp == NULL)
1917 return (0);
1918 /*
1919 * Sanity check that netinet6's notion of ifp is the
1920 * same as net's.
1921 */
1922 KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__));
1923
1924 MLD_LOCK();
1925 mli = MLD_IFINFO(ifp);
1926 KASSERT(mli != NULL, ("%s: no mld_ifsoftc for ifp %p", __func__, ifp));
1927
1928 /*
1929 * If we detect a state transition to or from MCAST_UNDEFINED
1930 * for this group, then we are starting or finishing an MLD
1931 * life cycle for this group.
1932 */
1933 if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) {
1934 CTR3(KTR_MLD, "%s: inm transition %d -> %d", __func__,
1935 inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode);
1936 if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) {
1937 CTR1(KTR_MLD, "%s: initial join", __func__);
1938 error = mld_initial_join(inm, mli, delay);
1939 goto out_locked;
1940 } else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) {
1941 CTR1(KTR_MLD, "%s: final leave", __func__);
1942 mld_final_leave(inm, mli);
1943 goto out_locked;
1944 }
1945 } else {
1946 CTR1(KTR_MLD, "%s: filter set change", __func__);
1947 }
1948
1949 error = mld_handle_state_change(inm, mli);
1950
1951out_locked:
1952 MLD_UNLOCK();
1953 return (error);
1954}
1955
1956/*
1957 * Perform the initial join for an MLD group.
1958 *
1959 * When joining a group:
1960 * If the group should have its MLD traffic suppressed, do nothing.
1961 * MLDv1 starts sending MLDv1 host membership reports.
1962 * MLDv2 will schedule an MLDv2 state-change report containing the
1963 * initial state of the membership.
1964 *
1965 * If the delay argument is non-zero, then we must delay sending the
1966 * initial state change for delay ticks (in units of PR_FASTHZ).
1967 */
1968static int
1969mld_initial_join(struct in6_multi *inm, struct mld_ifsoftc *mli,
1970 const int delay)
1971{
1972 struct epoch_tracker et;
1973 struct ifnet *ifp;
1974 struct mbufq *mq;
1975 int error, retval, syncstates;
1976 int odelay;
1977#ifdef KTR
1978 char ip6tbuf[INET6_ADDRSTRLEN];
1979#endif
1980
1981 CTR4(KTR_MLD, "%s: initial join %s on ifp %p(%s)",
1982 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
1983 inm->in6m_ifp, if_name(inm->in6m_ifp));
1984
1985 error = 0;
1986 syncstates = 1;
1987
1988 ifp = inm->in6m_ifp;
1989
1992
1993 KASSERT(mli && mli->mli_ifp == ifp, ("%s: inconsistent ifp", __func__));
1994
1995 /*
1996 * Groups joined on loopback or marked as 'not reported',
1997 * enter the MLD_SILENT_MEMBER state and
1998 * are never reported in any protocol exchanges.
1999 * All other groups enter the appropriate state machine
2000 * for the version in use on this link.
2001 * A link marked as MLIF_SILENT causes MLD to be completely
2002 * disabled for the link.
2003 */
2004 if ((ifp->if_flags & IFF_LOOPBACK) ||
2005 (mli->mli_flags & MLIF_SILENT) ||
2007 CTR1(KTR_MLD,
2008"%s: not kicking state machine for silent group", __func__);
2010 inm->in6m_timer = 0;
2011 } else {
2012 /*
2013 * Deal with overlapping in_multi lifecycle.
2014 * If this group was LEAVING, then make sure
2015 * we drop the reference we picked up to keep the
2016 * group around for the final INCLUDE {} enqueue.
2017 */
2018 if (mli->mli_version == MLD_VERSION_2 &&
2020 inm->in6m_refcount--;
2021 MPASS(inm->in6m_refcount > 0);
2022 }
2024
2025 switch (mli->mli_version) {
2026 case MLD_VERSION_1:
2027 /*
2028 * If a delay was provided, only use it if
2029 * it is greater than the delay normally
2030 * used for an MLDv1 state change report,
2031 * and delay sending the initial MLDv1 report
2032 * by not transitioning to the IDLE state.
2033 */
2034 odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI * PR_FASTHZ);
2035 if (delay) {
2036 inm->in6m_timer = max(delay, odelay);
2038 } else {
2040 NET_EPOCH_ENTER(et);
2041 error = mld_v1_transmit_report(inm,
2042 MLD_LISTENER_REPORT);
2043 NET_EPOCH_EXIT(et);
2044 if (error == 0) {
2045 inm->in6m_timer = odelay;
2047 }
2048 }
2049 break;
2050
2051 case MLD_VERSION_2:
2052 /*
2053 * Defer update of T0 to T1, until the first copy
2054 * of the state change has been transmitted.
2055 */
2056 syncstates = 0;
2057
2058 /*
2059 * Immediately enqueue a State-Change Report for
2060 * this interface, freeing any previous reports.
2061 * Don't kick the timers if there is nothing to do,
2062 * or if an error occurred.
2063 */
2064 mq = &inm->in6m_scq;
2065 mbufq_drain(mq);
2066 retval = mld_v2_enqueue_group_record(mq, inm, 1,
2067 0, 0, (mli->mli_flags & MLIF_USEALLOW));
2068 CTR2(KTR_MLD, "%s: enqueue record = %d",
2069 __func__, retval);
2070 if (retval <= 0) {
2071 error = retval * -1;
2072 break;
2073 }
2074
2075 /*
2076 * Schedule transmission of pending state-change
2077 * report up to RV times for this link. The timer
2078 * will fire at the next mld_fasttimo (~200ms),
2079 * giving us an opportunity to merge the reports.
2080 *
2081 * If a delay was provided to this function, only
2082 * use this delay if sooner than the existing one.
2083 */
2084 KASSERT(mli->mli_rv > 1,
2085 ("%s: invalid robustness %d", __func__,
2086 mli->mli_rv));
2087 inm->in6m_scrv = mli->mli_rv;
2088 if (delay) {
2089 if (inm->in6m_sctimer > 1) {
2090 inm->in6m_sctimer =
2091 min(inm->in6m_sctimer, delay);
2092 } else
2093 inm->in6m_sctimer = delay;
2094 } else
2095 inm->in6m_sctimer = 1;
2097
2098 error = 0;
2099 break;
2100 }
2101 }
2102
2103 /*
2104 * Only update the T0 state if state change is atomic,
2105 * i.e. we don't need to wait for a timer to fire before we
2106 * can consider the state change to have been communicated.
2107 */
2108 if (syncstates) {
2109 in6m_commit(inm);
2110 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2111 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2112 if_name(inm->in6m_ifp));
2113 }
2114
2115 return (error);
2116}
2117
2118/*
2119 * Issue an intermediate state change during the life-cycle.
2120 */
2121static int
2123{
2124 struct ifnet *ifp;
2125 int retval;
2126#ifdef KTR
2127 char ip6tbuf[INET6_ADDRSTRLEN];
2128#endif
2129
2130 CTR4(KTR_MLD, "%s: state change for %s on ifp %p(%s)",
2131 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2132 inm->in6m_ifp, if_name(inm->in6m_ifp));
2133
2134 ifp = inm->in6m_ifp;
2135
2138
2139 KASSERT(mli && mli->mli_ifp == ifp,
2140 ("%s: inconsistent ifp", __func__));
2141
2142 if ((ifp->if_flags & IFF_LOOPBACK) ||
2143 (mli->mli_flags & MLIF_SILENT) ||
2145 (mli->mli_version != MLD_VERSION_2)) {
2146 if (!mld_is_addr_reported(&inm->in6m_addr)) {
2147 CTR1(KTR_MLD,
2148"%s: not kicking state machine for silent group", __func__);
2149 }
2150 CTR1(KTR_MLD, "%s: nothing to do", __func__);
2151 in6m_commit(inm);
2152 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2153 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2154 if_name(inm->in6m_ifp));
2155 return (0);
2156 }
2157
2158 mbufq_drain(&inm->in6m_scq);
2159
2160 retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0,
2161 (mli->mli_flags & MLIF_USEALLOW));
2162 CTR2(KTR_MLD, "%s: enqueue record = %d", __func__, retval);
2163 if (retval <= 0)
2164 return (-retval);
2165
2166 /*
2167 * If record(s) were enqueued, start the state-change
2168 * report timer for this group.
2169 */
2170 inm->in6m_scrv = mli->mli_rv;
2171 inm->in6m_sctimer = 1;
2173
2174 return (0);
2175}
2176
2177/*
2178 * Perform the final leave for a multicast address.
2179 *
2180 * When leaving a group:
2181 * MLDv1 sends a DONE message, if and only if we are the reporter.
2182 * MLDv2 enqueues a state-change report containing a transition
2183 * to INCLUDE {} for immediate transmission.
2184 */
2185static void
2186mld_final_leave(struct in6_multi *inm, struct mld_ifsoftc *mli)
2187{
2188 struct epoch_tracker et;
2189 int syncstates;
2190#ifdef KTR
2191 char ip6tbuf[INET6_ADDRSTRLEN];
2192#endif
2193
2194 syncstates = 1;
2195
2196 CTR4(KTR_MLD, "%s: final leave %s on ifp %p(%s)",
2197 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2198 inm->in6m_ifp, if_name(inm->in6m_ifp));
2199
2202
2203 switch (inm->in6m_state) {
2204 case MLD_NOT_MEMBER:
2205 case MLD_SILENT_MEMBER:
2206 case MLD_LEAVING_MEMBER:
2207 /* Already leaving or left; do nothing. */
2208 CTR1(KTR_MLD,
2209"%s: not kicking state machine for silent group", __func__);
2210 break;
2212 case MLD_IDLE_MEMBER:
2215 if (mli->mli_version == MLD_VERSION_1) {
2216#ifdef INVARIANTS
2219 panic("%s: MLDv2 state reached, not MLDv2 mode",
2220 __func__);
2221#endif
2222 NET_EPOCH_ENTER(et);
2223 mld_v1_transmit_report(inm, MLD_LISTENER_DONE);
2224 NET_EPOCH_EXIT(et);
2227 } else if (mli->mli_version == MLD_VERSION_2) {
2228 /*
2229 * Stop group timer and all pending reports.
2230 * Immediately enqueue a state-change report
2231 * TO_IN {} to be sent on the next fast timeout,
2232 * giving us an opportunity to merge reports.
2233 */
2234 mbufq_drain(&inm->in6m_scq);
2235 inm->in6m_timer = 0;
2236 inm->in6m_scrv = mli->mli_rv;
2237 CTR4(KTR_MLD, "%s: Leaving %s/%s with %d "
2238 "pending retransmissions.", __func__,
2239 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2240 if_name(inm->in6m_ifp), inm->in6m_scrv);
2241 if (inm->in6m_scrv == 0) {
2243 inm->in6m_sctimer = 0;
2244 } else {
2245 int retval;
2246
2248
2250 &inm->in6m_scq, inm, 1, 0, 0,
2251 (mli->mli_flags & MLIF_USEALLOW));
2252 KASSERT(retval != 0,
2253 ("%s: enqueue record = %d", __func__,
2254 retval));
2255
2257 inm->in6m_sctimer = 1;
2259 syncstates = 0;
2260 }
2261 break;
2262 }
2263 break;
2264 case MLD_LAZY_MEMBER:
2267 /* Our reports are suppressed; do nothing. */
2268 break;
2269 }
2270
2271 if (syncstates) {
2272 in6m_commit(inm);
2273 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2274 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2275 if_name(inm->in6m_ifp));
2276 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
2277 CTR3(KTR_MLD, "%s: T1 now MCAST_UNDEFINED for %p/%s",
2278 __func__, &inm->in6m_addr, if_name(inm->in6m_ifp));
2279 }
2280}
2281
2282/*
2283 * Enqueue an MLDv2 group record to the given output queue.
2284 *
2285 * If is_state_change is zero, a current-state record is appended.
2286 * If is_state_change is non-zero, a state-change report is appended.
2287 *
2288 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2289 * If is_group_query is zero, and if there is a packet with free space
2290 * at the tail of the queue, it will be appended to providing there
2291 * is enough free space.
2292 * Otherwise a new mbuf packet chain is allocated.
2293 *
2294 * If is_source_query is non-zero, each source is checked to see if
2295 * it was recorded for a Group-Source query, and will be omitted if
2296 * it is not both in-mode and recorded.
2297 *
2298 * If use_block_allow is non-zero, state change reports for initial join
2299 * and final leave, on an inclusive mode group with a source list, will be
2300 * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively.
2301 *
2302 * The function will attempt to allocate leading space in the packet
2303 * for the IPv6+ICMP headers to be prepended without fragmenting the chain.
2304 *
2305 * If successful the size of all data appended to the queue is returned,
2306 * otherwise an error code less than zero is returned, or zero if
2307 * no record(s) were appended.
2308 */
2309static int
2310mld_v2_enqueue_group_record(struct mbufq *mq, struct in6_multi *inm,
2311 const int is_state_change, const int is_group_query,
2312 const int is_source_query, const int use_block_allow)
2313{
2314 struct mldv2_record mr;
2315 struct mldv2_record *pmr;
2316 struct ifnet *ifp;
2317 struct ip6_msource *ims, *nims;
2318 struct mbuf *m0, *m, *md;
2319 int is_filter_list_change;
2320 int minrec0len, m0srcs, msrcs, nbytes, off;
2321 int record_has_sources;
2322 int now;
2323 int type;
2324 uint8_t mode;
2325#ifdef KTR
2326 char ip6tbuf[INET6_ADDRSTRLEN];
2327#endif
2328
2330
2331 ifp = inm->in6m_ifp;
2332 is_filter_list_change = 0;
2333 m = NULL;
2334 m0 = NULL;
2335 m0srcs = 0;
2336 msrcs = 0;
2337 nbytes = 0;
2338 nims = NULL;
2339 record_has_sources = 1;
2340 pmr = NULL;
2341 type = MLD_DO_NOTHING;
2342 mode = inm->in6m_st[1].iss_fmode;
2343
2344 /*
2345 * If we did not transition out of ASM mode during t0->t1,
2346 * and there are no source nodes to process, we can skip
2347 * the generation of source records.
2348 */
2349 if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 &&
2350 inm->in6m_nsrc == 0)
2351 record_has_sources = 0;
2352
2353 if (is_state_change) {
2354 /*
2355 * Queue a state change record.
2356 * If the mode did not change, and there are non-ASM
2357 * listeners or source filters present,
2358 * we potentially need to issue two records for the group.
2359 * If there are ASM listeners, and there was no filter
2360 * mode transition of any kind, do nothing.
2361 *
2362 * If we are transitioning to MCAST_UNDEFINED, we need
2363 * not send any sources. A transition to/from this state is
2364 * considered inclusive with some special treatment.
2365 *
2366 * If we are rewriting initial joins/leaves to use
2367 * ALLOW/BLOCK, and the group's membership is inclusive,
2368 * we need to send sources in all cases.
2369 */
2370 if (mode != inm->in6m_st[0].iss_fmode) {
2371 if (mode == MCAST_EXCLUDE) {
2372 CTR1(KTR_MLD, "%s: change to EXCLUDE",
2373 __func__);
2375 } else {
2376 CTR1(KTR_MLD, "%s: change to INCLUDE",
2377 __func__);
2378 if (use_block_allow) {
2379 /*
2380 * XXX
2381 * Here we're interested in state
2382 * edges either direction between
2383 * MCAST_UNDEFINED and MCAST_INCLUDE.
2384 * Perhaps we should just check
2385 * the group state, rather than
2386 * the filter mode.
2387 */
2388 if (mode == MCAST_UNDEFINED) {
2389 type = MLD_BLOCK_OLD_SOURCES;
2390 } else {
2391 type = MLD_ALLOW_NEW_SOURCES;
2392 }
2393 } else {
2395 if (mode == MCAST_UNDEFINED)
2396 record_has_sources = 0;
2397 }
2398 }
2399 } else {
2400 if (record_has_sources) {
2401 is_filter_list_change = 1;
2402 } else {
2403 type = MLD_DO_NOTHING;
2404 }
2405 }
2406 } else {
2407 /*
2408 * Queue a current state record.
2409 */
2410 if (mode == MCAST_EXCLUDE) {
2411 type = MLD_MODE_IS_EXCLUDE;
2412 } else if (mode == MCAST_INCLUDE) {
2413 type = MLD_MODE_IS_INCLUDE;
2414 KASSERT(inm->in6m_st[1].iss_asm == 0,
2415 ("%s: inm %p is INCLUDE but ASM count is %d",
2416 __func__, inm, inm->in6m_st[1].iss_asm));
2417 }
2418 }
2419
2420 /*
2421 * Generate the filter list changes using a separate function.
2422 */
2423 if (is_filter_list_change)
2424 return (mld_v2_enqueue_filter_change(mq, inm));
2425
2426 if (type == MLD_DO_NOTHING) {
2427 CTR3(KTR_MLD, "%s: nothing to do for %s/%s",
2428 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2429 if_name(inm->in6m_ifp));
2430 return (0);
2431 }
2432
2433 /*
2434 * If any sources are present, we must be able to fit at least
2435 * one in the trailing space of the tail packet's mbuf,
2436 * ideally more.
2437 */
2438 minrec0len = sizeof(struct mldv2_record);
2439 if (record_has_sources)
2440 minrec0len += sizeof(struct in6_addr);
2441
2442 CTR4(KTR_MLD, "%s: queueing %s for %s/%s", __func__,
2443 mld_rec_type_to_str(type),
2444 ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2445 if_name(inm->in6m_ifp));
2446
2447 /*
2448 * Check if we have a packet in the tail of the queue for this
2449 * group into which the first group record for this group will fit.
2450 * Otherwise allocate a new packet.
2451 * Always allocate leading space for IP6+RA+ICMPV6+REPORT.
2452 * Note: Group records for G/GSR query responses MUST be sent
2453 * in their own packet.
2454 */
2455 m0 = mbufq_last(mq);
2456 if (!is_group_query &&
2457 m0 != NULL &&
2458 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) &&
2459 (m0->m_pkthdr.len + minrec0len) <
2460 (ifp->if_mtu - MLD_MTUSPACE)) {
2461 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2462 sizeof(struct mldv2_record)) /
2463 sizeof(struct in6_addr);
2464 m = m0;
2465 CTR1(KTR_MLD, "%s: use existing packet", __func__);
2466 } else {
2467 if (mbufq_full(mq)) {
2468 CTR1(KTR_MLD, "%s: outbound queue full", __func__);
2469 return (-ENOMEM);
2470 }
2471 m = NULL;
2472 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2473 sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
2474 if (!is_state_change && !is_group_query)
2475 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2476 if (m == NULL)
2477 m = m_gethdr(M_NOWAIT, MT_DATA);
2478 if (m == NULL)
2479 return (-ENOMEM);
2480
2481 mld_save_context(m, ifp);
2482
2483 CTR1(KTR_MLD, "%s: allocated first packet", __func__);
2484 }
2485
2486 /*
2487 * Append group record.
2488 * If we have sources, we don't know how many yet.
2489 */
2490 mr.mr_type = type;
2491 mr.mr_datalen = 0;
2492 mr.mr_numsrc = 0;
2493 mr.mr_addr = inm->in6m_addr;
2495 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
2496 if (m != m0)
2497 m_freem(m);
2498 CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
2499 return (-ENOMEM);
2500 }
2501 nbytes += sizeof(struct mldv2_record);
2502
2503 /*
2504 * Append as many sources as will fit in the first packet.
2505 * If we are appending to a new packet, the chain allocation
2506 * may potentially use clusters; use m_getptr() in this case.
2507 * If we are appending to an existing packet, we need to obtain
2508 * a pointer to the group record after m_append(), in case a new
2509 * mbuf was allocated.
2510 *
2511 * Only append sources which are in-mode at t1. If we are
2512 * transitioning to MCAST_UNDEFINED state on the group, and
2513 * use_block_allow is zero, do not include source entries.
2514 * Otherwise, we need to include this source in the report.
2515 *
2516 * Only report recorded sources in our filter set when responding
2517 * to a group-source query.
2518 */
2519 if (record_has_sources) {
2520 if (m == m0) {
2521 md = m_last(m);
2522 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
2523 md->m_len - nbytes);
2524 } else {
2525 md = m_getptr(m, 0, &off);
2526 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
2527 off);
2528 }
2529 msrcs = 0;
2530 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs,
2531 nims) {
2532 CTR2(KTR_MLD, "%s: visit node %s", __func__,
2533 ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2534 now = im6s_get_mode(inm, ims, 1);
2535 CTR2(KTR_MLD, "%s: node is %d", __func__, now);
2536 if ((now != mode) ||
2537 (now == mode &&
2538 (!use_block_allow && mode == MCAST_UNDEFINED))) {
2539 CTR1(KTR_MLD, "%s: skip node", __func__);
2540 continue;
2541 }
2542 if (is_source_query && ims->im6s_stp == 0) {
2543 CTR1(KTR_MLD, "%s: skip unrecorded node",
2544 __func__);
2545 continue;
2546 }
2547 CTR1(KTR_MLD, "%s: append node", __func__);
2548 if (!m_append(m, sizeof(struct in6_addr),
2549 (void *)&ims->im6s_addr)) {
2550 if (m != m0)
2551 m_freem(m);
2552 CTR1(KTR_MLD, "%s: m_append() failed.",
2553 __func__);
2554 return (-ENOMEM);
2555 }
2556 nbytes += sizeof(struct in6_addr);
2557 ++msrcs;
2558 if (msrcs == m0srcs)
2559 break;
2560 }
2561 CTR2(KTR_MLD, "%s: msrcs is %d this packet", __func__,
2562 msrcs);
2563 pmr->mr_numsrc = htons(msrcs);
2564 nbytes += (msrcs * sizeof(struct in6_addr));
2565 }
2566
2567 if (is_source_query && msrcs == 0) {
2568 CTR1(KTR_MLD, "%s: no recorded sources to report", __func__);
2569 if (m != m0)
2570 m_freem(m);
2571 return (0);
2572 }
2573
2574 /*
2575 * We are good to go with first packet.
2576 */
2577 if (m != m0) {
2578 CTR1(KTR_MLD, "%s: enqueueing first packet", __func__);
2579 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2580 mbufq_enqueue(mq, m);
2581 } else
2582 m->m_pkthdr.PH_vt.vt_nrecs++;
2583
2584 /*
2585 * No further work needed if no source list in packet(s).
2586 */
2587 if (!record_has_sources)
2588 return (nbytes);
2589
2590 /*
2591 * Whilst sources remain to be announced, we need to allocate
2592 * a new packet and fill out as many sources as will fit.
2593 * Always try for a cluster first.
2594 */
2595 while (nims != NULL) {
2596 if (mbufq_full(mq)) {
2597 CTR1(KTR_MLD, "%s: outbound queue full", __func__);
2598 return (-ENOMEM);
2599 }
2600 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2601 if (m == NULL)
2602 m = m_gethdr(M_NOWAIT, MT_DATA);
2603 if (m == NULL)
2604 return (-ENOMEM);
2605 mld_save_context(m, ifp);
2606 md = m_getptr(m, 0, &off);
2607 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off);
2608 CTR1(KTR_MLD, "%s: allocated next packet", __func__);
2609
2610 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
2611 if (m != m0)
2612 m_freem(m);
2613 CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
2614 return (-ENOMEM);
2615 }
2616 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2617 nbytes += sizeof(struct mldv2_record);
2618
2619 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2620 sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
2621
2622 msrcs = 0;
2623 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2624 CTR2(KTR_MLD, "%s: visit node %s",
2625 __func__, ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2626 now = im6s_get_mode(inm, ims, 1);
2627 if ((now != mode) ||
2628 (now == mode &&
2629 (!use_block_allow && mode == MCAST_UNDEFINED))) {
2630 CTR1(KTR_MLD, "%s: skip node", __func__);
2631 continue;
2632 }
2633 if (is_source_query && ims->im6s_stp == 0) {
2634 CTR1(KTR_MLD, "%s: skip unrecorded node",
2635 __func__);
2636 continue;
2637 }
2638 CTR1(KTR_MLD, "%s: append node", __func__);
2639 if (!m_append(m, sizeof(struct in6_addr),
2640 (void *)&ims->im6s_addr)) {
2641 if (m != m0)
2642 m_freem(m);
2643 CTR1(KTR_MLD, "%s: m_append() failed.",
2644 __func__);
2645 return (-ENOMEM);
2646 }
2647 ++msrcs;
2648 if (msrcs == m0srcs)
2649 break;
2650 }
2651 pmr->mr_numsrc = htons(msrcs);
2652 nbytes += (msrcs * sizeof(struct in6_addr));
2653
2654 CTR1(KTR_MLD, "%s: enqueueing next packet", __func__);
2655 mbufq_enqueue(mq, m);
2656 }
2657
2658 return (nbytes);
2659}
2660
2661/*
2662 * Type used to mark record pass completion.
2663 * We exploit the fact we can cast to this easily from the
2664 * current filter modes on each ip_msource node.
2665 */
2666typedef enum {
2667 REC_NONE = 0x00, /* MCAST_UNDEFINED */
2668 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
2669 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
2672
2673/*
2674 * Enqueue an MLDv2 filter list change to the given output queue.
2675 *
2676 * Source list filter state is held in an RB-tree. When the filter list
2677 * for a group is changed without changing its mode, we need to compute
2678 * the deltas between T0 and T1 for each source in the filter set,
2679 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
2680 *
2681 * As we may potentially queue two record types, and the entire R-B tree
2682 * needs to be walked at once, we break this out into its own function
2683 * so we can generate a tightly packed queue of packets.
2684 *
2685 * XXX This could be written to only use one tree walk, although that makes
2686 * serializing into the mbuf chains a bit harder. For now we do two walks
2687 * which makes things easier on us, and it may or may not be harder on
2688 * the L2 cache.
2689 *
2690 * If successful the size of all data appended to the queue is returned,
2691 * otherwise an error code less than zero is returned, or zero if
2692 * no record(s) were appended.
2693 */
2694static int
2695mld_v2_enqueue_filter_change(struct mbufq *mq, struct in6_multi *inm)
2696{
2697 static const int MINRECLEN =
2698 sizeof(struct mldv2_record) + sizeof(struct in6_addr);
2699 struct ifnet *ifp;
2700 struct mldv2_record mr;
2701 struct mldv2_record *pmr;
2702 struct ip6_msource *ims, *nims;
2703 struct mbuf *m, *m0, *md;
2704 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
2705 int nallow, nblock;
2706 uint8_t mode, now, then;
2707 rectype_t crt, drt, nrt;
2708#ifdef KTR
2709 char ip6tbuf[INET6_ADDRSTRLEN];
2710#endif
2711
2713
2714 if (inm->in6m_nsrc == 0 ||
2715 (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0))
2716 return (0);
2717
2718 ifp = inm->in6m_ifp; /* interface */
2719 mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */
2720 crt = REC_NONE; /* current group record type */
2721 drt = REC_NONE; /* mask of completed group record types */
2722 nrt = REC_NONE; /* record type for current node */
2723 m0srcs = 0; /* # source which will fit in current mbuf chain */
2724 npbytes = 0; /* # of bytes appended this packet */
2725 nbytes = 0; /* # of bytes appended to group's state-change queue */
2726 rsrcs = 0; /* # sources encoded in current record */
2727 schanged = 0; /* # nodes encoded in overall filter change */
2728 nallow = 0; /* # of source entries in ALLOW_NEW */
2729 nblock = 0; /* # of source entries in BLOCK_OLD */
2730 nims = NULL; /* next tree node pointer */
2731
2732 /*
2733 * For each possible filter record mode.
2734 * The first kind of source we encounter tells us which
2735 * is the first kind of record we start appending.
2736 * If a node transitioned to UNDEFINED at t1, its mode is treated
2737 * as the inverse of the group's filter mode.
2738 */
2739 while (drt != REC_FULL) {
2740 do {
2741 m0 = mbufq_last(mq);
2742 if (m0 != NULL &&
2743 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
2745 (m0->m_pkthdr.len + MINRECLEN) <
2746 (ifp->if_mtu - MLD_MTUSPACE)) {
2747 m = m0;
2748 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2749 sizeof(struct mldv2_record)) /
2750 sizeof(struct in6_addr);
2751 CTR1(KTR_MLD,
2752 "%s: use previous packet", __func__);
2753 } else {
2754 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2755 if (m == NULL)
2756 m = m_gethdr(M_NOWAIT, MT_DATA);
2757 if (m == NULL) {
2758 CTR1(KTR_MLD,
2759 "%s: m_get*() failed", __func__);
2760 return (-ENOMEM);
2761 }
2762 m->m_pkthdr.PH_vt.vt_nrecs = 0;
2763 mld_save_context(m, ifp);
2764 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2765 sizeof(struct mldv2_record)) /
2766 sizeof(struct in6_addr);
2767 npbytes = 0;
2768 CTR1(KTR_MLD,
2769 "%s: allocated new packet", __func__);
2770 }
2771 /*
2772 * Append the MLD group record header to the
2773 * current packet's data area.
2774 * Recalculate pointer to free space for next
2775 * group record, in case m_append() allocated
2776 * a new mbuf or cluster.
2777 */
2778 memset(&mr, 0, sizeof(mr));
2779 mr.mr_addr = inm->in6m_addr;
2781 if (!m_append(m, sizeof(mr), (void *)&mr)) {
2782 if (m != m0)
2783 m_freem(m);
2784 CTR1(KTR_MLD,
2785 "%s: m_append() failed", __func__);
2786 return (-ENOMEM);
2787 }
2788 npbytes += sizeof(struct mldv2_record);
2789 if (m != m0) {
2790 /* new packet; offset in chain */
2791 md = m_getptr(m, npbytes -
2792 sizeof(struct mldv2_record), &off);
2793 pmr = (struct mldv2_record *)(mtod(md,
2794 uint8_t *) + off);
2795 } else {
2796 /* current packet; offset from last append */
2797 md = m_last(m);
2798 pmr = (struct mldv2_record *)(mtod(md,
2799 uint8_t *) + md->m_len -
2800 sizeof(struct mldv2_record));
2801 }
2802 /*
2803 * Begin walking the tree for this record type
2804 * pass, or continue from where we left off
2805 * previously if we had to allocate a new packet.
2806 * Only report deltas in-mode at t1.
2807 * We need not report included sources as allowed
2808 * if we are in inclusive mode on the group,
2809 * however the converse is not true.
2810 */
2811 rsrcs = 0;
2812 if (nims == NULL) {
2813 nims = RB_MIN(ip6_msource_tree,
2814 &inm->in6m_srcs);
2815 }
2816 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2817 CTR2(KTR_MLD, "%s: visit node %s", __func__,
2818 ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2819 now = im6s_get_mode(inm, ims, 1);
2820 then = im6s_get_mode(inm, ims, 0);
2821 CTR3(KTR_MLD, "%s: mode: t0 %d, t1 %d",
2822 __func__, then, now);
2823 if (now == then) {
2824 CTR1(KTR_MLD,
2825 "%s: skip unchanged", __func__);
2826 continue;
2827 }
2828 if (mode == MCAST_EXCLUDE &&
2829 now == MCAST_INCLUDE) {
2830 CTR1(KTR_MLD,
2831 "%s: skip IN src on EX group",
2832 __func__);
2833 continue;
2834 }
2835 nrt = (rectype_t)now;
2836 if (nrt == REC_NONE)
2837 nrt = (rectype_t)(~mode & REC_FULL);
2838 if (schanged++ == 0) {
2839 crt = nrt;
2840 } else if (crt != nrt)
2841 continue;
2842 if (!m_append(m, sizeof(struct in6_addr),
2843 (void *)&ims->im6s_addr)) {
2844 if (m != m0)
2845 m_freem(m);
2846 CTR1(KTR_MLD,
2847 "%s: m_append() failed", __func__);
2848 return (-ENOMEM);
2849 }
2850 nallow += !!(crt == REC_ALLOW);
2851 nblock += !!(crt == REC_BLOCK);
2852 if (++rsrcs == m0srcs)
2853 break;
2854 }
2855 /*
2856 * If we did not append any tree nodes on this
2857 * pass, back out of allocations.
2858 */
2859 if (rsrcs == 0) {
2860 npbytes -= sizeof(struct mldv2_record);
2861 if (m != m0) {
2862 CTR1(KTR_MLD,
2863 "%s: m_free(m)", __func__);
2864 m_freem(m);
2865 } else {
2866 CTR1(KTR_MLD,
2867 "%s: m_adj(m, -mr)", __func__);
2868 m_adj(m, -((int)sizeof(
2869 struct mldv2_record)));
2870 }
2871 continue;
2872 }
2873 npbytes += (rsrcs * sizeof(struct in6_addr));
2874 if (crt == REC_ALLOW)
2876 else if (crt == REC_BLOCK)
2878 pmr->mr_numsrc = htons(rsrcs);
2879 /*
2880 * Count the new group record, and enqueue this
2881 * packet if it wasn't already queued.
2882 */
2883 m->m_pkthdr.PH_vt.vt_nrecs++;
2884 if (m != m0)
2885 mbufq_enqueue(mq, m);
2886 nbytes += npbytes;
2887 } while (nims != NULL);
2888 drt |= crt;
2889 crt = (~crt & REC_FULL);
2890 }
2891
2892 CTR3(KTR_MLD, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
2893 nallow, nblock);
2894
2895 return (nbytes);
2896}
2897
2898static int
2899mld_v2_merge_state_changes(struct in6_multi *inm, struct mbufq *scq)
2900{
2901 struct mbufq *gq;
2902 struct mbuf *m; /* pending state-change */
2903 struct mbuf *m0; /* copy of pending state-change */
2904 struct mbuf *mt; /* last state-change in packet */
2905 int docopy, domerge;
2906 u_int recslen;
2907
2908 docopy = 0;
2909 domerge = 0;
2910 recslen = 0;
2911
2914
2915 /*
2916 * If there are further pending retransmissions, make a writable
2917 * copy of each queued state-change message before merging.
2918 */
2919 if (inm->in6m_scrv > 0)
2920 docopy = 1;
2921
2922 gq = &inm->in6m_scq;
2923#ifdef KTR
2924 if (mbufq_first(gq) == NULL) {
2925 CTR2(KTR_MLD, "%s: WARNING: queue for inm %p is empty",
2926 __func__, inm);
2927 }
2928#endif
2929
2930 m = mbufq_first(gq);
2931 while (m != NULL) {
2932 /*
2933 * Only merge the report into the current packet if
2934 * there is sufficient space to do so; an MLDv2 report
2935 * packet may only contain 65,535 group records.
2936 * Always use a simple mbuf chain concatentation to do this,
2937 * as large state changes for single groups may have
2938 * allocated clusters.
2939 */
2940 domerge = 0;
2941 mt = mbufq_last(scq);
2942 if (mt != NULL) {
2943 recslen = m_length(m, NULL);
2944
2945 if ((mt->m_pkthdr.PH_vt.vt_nrecs +
2946 m->m_pkthdr.PH_vt.vt_nrecs <=
2948 (mt->m_pkthdr.len + recslen <=
2949 (inm->in6m_ifp->if_mtu - MLD_MTUSPACE)))
2950 domerge = 1;
2951 }
2952
2953 if (!domerge && mbufq_full(gq)) {
2954 CTR2(KTR_MLD,
2955 "%s: outbound queue full, skipping whole packet %p",
2956 __func__, m);
2957 mt = m->m_nextpkt;
2958 if (!docopy)
2959 m_freem(m);
2960 m = mt;
2961 continue;
2962 }
2963
2964 if (!docopy) {
2965 CTR2(KTR_MLD, "%s: dequeueing %p", __func__, m);
2966 m0 = mbufq_dequeue(gq);
2967 m = m0->m_nextpkt;
2968 } else {
2969 CTR2(KTR_MLD, "%s: copying %p", __func__, m);
2970 m0 = m_dup(m, M_NOWAIT);
2971 if (m0 == NULL)
2972 return (ENOMEM);
2973 m0->m_nextpkt = NULL;
2974 m = m->m_nextpkt;
2975 }
2976
2977 if (!domerge) {
2978 CTR3(KTR_MLD, "%s: queueing %p to scq %p)",
2979 __func__, m0, scq);
2980 mbufq_enqueue(scq, m0);
2981 } else {
2982 struct mbuf *mtl; /* last mbuf of packet mt */
2983
2984 CTR3(KTR_MLD, "%s: merging %p with ifscq tail %p)",
2985 __func__, m0, mt);
2986
2987 mtl = m_last(mt);
2988 m0->m_flags &= ~M_PKTHDR;
2989 mt->m_pkthdr.len += recslen;
2990 mt->m_pkthdr.PH_vt.vt_nrecs +=
2991 m0->m_pkthdr.PH_vt.vt_nrecs;
2992
2993 mtl->m_next = m0;
2994 }
2995 }
2996
2997 return (0);
2998}
2999
3000/*
3001 * Respond to a pending MLDv2 General Query.
3002 */
3003static void
3005{
3006 struct ifmultiaddr *ifma;
3007 struct ifnet *ifp;
3008 struct in6_multi *inm;
3009 int retval;
3010
3011 NET_EPOCH_ASSERT();
3014
3015 KASSERT(mli->mli_version == MLD_VERSION_2,
3016 ("%s: called when version %d", __func__, mli->mli_version));
3017
3018 /*
3019 * Check that there are some packets queued. If so, send them first.
3020 * For large number of groups the reply to general query can take
3021 * many packets, we should finish sending them before starting of
3022 * queuing the new reply.
3023 */
3024 if (mbufq_len(&mli->mli_gq) != 0)
3025 goto send;
3026
3027 ifp = mli->mli_ifp;
3028
3029 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3030 inm = in6m_ifmultiaddr_get_inm(ifma);
3031 if (inm == NULL)
3032 continue;
3033 KASSERT(ifp == inm->in6m_ifp,
3034 ("%s: inconsistent ifp", __func__));
3035
3036 switch (inm->in6m_state) {
3037 case MLD_NOT_MEMBER:
3038 case MLD_SILENT_MEMBER:
3039 break;
3041 case MLD_IDLE_MEMBER:
3042 case MLD_LAZY_MEMBER:
3046 retval = mld_v2_enqueue_group_record(&mli->mli_gq,
3047 inm, 0, 0, 0, 0);
3048 CTR2(KTR_MLD, "%s: enqueue record = %d",
3049 __func__, retval);
3050 break;
3053 case MLD_LEAVING_MEMBER:
3054 break;
3055 }
3056 }
3057
3058send:
3060
3061 /*
3062 * Slew transmission of bursts over 500ms intervals.
3063 */
3064 if (mbufq_first(&mli->mli_gq) != NULL) {
3065 mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY(
3068 }
3069}
3070
3071/*
3072 * Transmit the next pending message in the output queue.
3073 *
3074 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3075 * MRT: Nothing needs to be done, as MLD traffic is always local to
3076 * a link and uses a link-scope multicast address.
3077 */
3078static void
3079mld_dispatch_packet(struct mbuf *m)
3080{
3081 struct ip6_moptions im6o;
3082 struct ifnet *ifp;
3083 struct ifnet *oifp;
3084 struct mbuf *m0;
3085 struct mbuf *md;
3086 struct ip6_hdr *ip6;
3087 struct mld_hdr *mld;
3088 int error;
3089 int off;
3090 int type;
3091 uint32_t ifindex;
3092
3093 CTR2(KTR_MLD, "%s: transmit %p", __func__, m);
3094 NET_EPOCH_ASSERT();
3095
3096 /*
3097 * Set VNET image pointer from enqueued mbuf chain
3098 * before doing anything else. Whilst we use interface
3099 * indexes to guard against interface detach, they are
3100 * unique to each VIMAGE and must be retrieved.
3101 */
3102 ifindex = mld_restore_context(m);
3103
3104 /*
3105 * Check if the ifnet still exists. This limits the scope of
3106 * any race in the absence of a global ifp lock for low cost
3107 * (an array lookup).
3108 */
3109 ifp = ifnet_byindex(ifindex);
3110 if (ifp == NULL) {
3111 CTR3(KTR_MLD, "%s: dropped %p as ifindex %u went away.",
3112 __func__, m, ifindex);
3113 m_freem(m);
3114 IP6STAT_INC(ip6s_noroute);
3115 goto out;
3116 }
3117
3118 im6o.im6o_multicast_hlim = 1;
3119 im6o.im6o_multicast_loop = (V_ip6_mrouter != NULL);
3120 im6o.im6o_multicast_ifp = ifp;
3121
3122 if (m->m_flags & M_MLDV1) {
3123 m0 = m;
3124 } else {
3125 m0 = mld_v2_encap_report(ifp, m);
3126 if (m0 == NULL) {
3127 CTR2(KTR_MLD, "%s: dropped %p", __func__, m);
3128 IP6STAT_INC(ip6s_odropped);
3129 goto out;
3130 }
3131 }
3132
3134 m_clrprotoflags(m);
3135 m0->m_pkthdr.rcvif = V_loif;
3136
3137 ip6 = mtod(m0, struct ip6_hdr *);
3138#if 0
3139 (void)in6_setscope(&ip6->ip6_dst, ifp, NULL); /* XXX LOR */
3140#else
3141 /*
3142 * XXX XXX Break some KPI rules to prevent an LOR which would
3143 * occur if we called in6_setscope() at transmission.
3144 * See comments at top of file.
3145 */
3146 MLD_EMBEDSCOPE(&ip6->ip6_dst, ifp->if_index);
3147#endif
3148
3149 /*
3150 * Retrieve the ICMPv6 type before handoff to ip6_output(),
3151 * so we can bump the stats.
3152 */
3153 md = m_getptr(m0, sizeof(struct ip6_hdr), &off);
3154 mld = (struct mld_hdr *)(mtod(md, uint8_t *) + off);
3155 type = mld->mld_type;
3156
3157 oifp = NULL;
3158 error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, &im6o,
3159 &oifp, NULL);
3160 if (error) {
3161 CTR3(KTR_MLD, "%s: ip6_output(%p) = %d", __func__, m0, error);
3162 goto out;
3163 }
3164 ICMP6STAT_INC(icp6s_outhist[type]);
3165 if (oifp != NULL) {
3166 icmp6_ifstat_inc(oifp, ifs6_out_msg);
3167 switch (type) {
3168 case MLD_LISTENER_REPORT:
3169 case MLDV2_LISTENER_REPORT:
3170 icmp6_ifstat_inc(oifp, ifs6_out_mldreport);
3171 break;
3172 case MLD_LISTENER_DONE:
3173 icmp6_ifstat_inc(oifp, ifs6_out_mlddone);
3174 break;
3175 }
3176 }
3177out:
3178 return;
3179}
3180
3181/*
3182 * Encapsulate an MLDv2 report.
3183 *
3184 * KAME IPv6 requires that hop-by-hop options be passed separately,
3185 * and that the IPv6 header be prepended in a separate mbuf.
3186 *
3187 * Returns a pointer to the new mbuf chain head, or NULL if the
3188 * allocation failed.
3189 */
3190static struct mbuf *
3191mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
3192{
3193 struct mbuf *mh;
3194 struct mldv2_report *mld;
3195 struct ip6_hdr *ip6;
3196 struct in6_ifaddr *ia;
3197 int mldreclen;
3198
3199 KASSERT(ifp != NULL, ("%s: null ifp", __func__));
3200 KASSERT((m->m_flags & M_PKTHDR),
3201 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3202
3203 /*
3204 * RFC3590: OK to send as :: or tentative during DAD.
3205 */
3206 NET_EPOCH_ASSERT();
3208 if (ia == NULL)
3209 CTR1(KTR_MLD, "%s: warning: ia is NULL", __func__);
3210
3211 mh = m_gethdr(M_NOWAIT, MT_DATA);
3212 if (mh == NULL) {
3213 if (ia != NULL)
3214 ifa_free(&ia->ia_ifa);
3215 m_freem(m);
3216 return (NULL);
3217 }
3218 M_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report));
3219
3220 mldreclen = m_length(m, NULL);
3221 CTR2(KTR_MLD, "%s: mldreclen is %d", __func__, mldreclen);
3222
3223 mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report);
3224 mh->m_pkthdr.len = sizeof(struct ip6_hdr) +
3225 sizeof(struct mldv2_report) + mldreclen;
3226
3227 ip6 = mtod(mh, struct ip6_hdr *);
3228 ip6->ip6_flow = 0;
3229 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
3230 ip6->ip6_vfc |= IPV6_VERSION;
3231 ip6->ip6_nxt = IPPROTO_ICMPV6;
3232 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
3233 if (ia != NULL)
3234 ifa_free(&ia->ia_ifa);
3235 ip6->ip6_dst = in6addr_linklocal_allv2routers;
3236 /* scope ID will be set in netisr */
3237
3238 mld = (struct mldv2_report *)(ip6 + 1);
3239 mld->mld_type = MLDV2_LISTENER_REPORT;
3240 mld->mld_code = 0;
3241 mld->mld_cksum = 0;
3242 mld->mld_v2_reserved = 0;
3243 mld->mld_v2_numrecs = htons(m->m_pkthdr.PH_vt.vt_nrecs);
3244 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3245
3246 mh->m_next = m;
3247 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
3248 sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen);
3249 return (mh);
3250}
3251
3252#ifdef KTR
3253static char *
3254mld_rec_type_to_str(const int type)
3255{
3256
3257 switch (type) {
3259 return "TO_EX";
3260 break;
3262 return "TO_IN";
3263 break;
3265 return "MODE_EX";
3266 break;
3268 return "MODE_IN";
3269 break;
3271 return "ALLOW_NEW";
3272 break;
3274 return "BLOCK_OLD";
3275 break;
3276 default:
3277 break;
3278 }
3279 return "unknown";
3280}
3281#endif
3282
3283static void
3284mld_init(void *unused __unused)
3285{
3286
3287 CTR1(KTR_MLD, "%s: initializing", __func__);
3288 MLD_LOCK_INIT();
3289
3291 mld_po.ip6po_hlim = 1;
3295}
3296SYSINIT(mld_init, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_init, NULL);
3297
3298static void
3299mld_uninit(void *unused __unused)
3300{
3301
3302 CTR1(KTR_MLD, "%s: tearing down", __func__);
3304}
3305SYSUNINIT(mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_uninit, NULL);
3306
3307static void
3308vnet_mld_init(const void *unused __unused)
3309{
3310
3311 CTR1(KTR_MLD, "%s: initializing", __func__);
3312
3313 LIST_INIT(&V_mli_head);
3314}
3315VNET_SYSINIT(vnet_mld_init, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_init,
3316 NULL);
3317
3318static void
3319vnet_mld_uninit(const void *unused __unused)
3320{
3321
3322 /* This can happen if we shutdown the network stack. */
3323 CTR1(KTR_MLD, "%s: tearing down", __func__);
3324}
3325VNET_SYSUNINIT(vnet_mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_uninit,
3326 NULL);
3327
3328static int
3329mld_modevent(module_t mod, int type, void *unused __unused)
3330{
3331
3332 switch (type) {
3333 case MOD_LOAD:
3334 case MOD_UNLOAD:
3335 break;
3336 default:
3337 return (EOPNOTSUPP);
3338 }
3339 return (0);
3340}
3341
3342static moduledata_t mld_mod = {
3343 "mld",
3345 0
3346};
3347DECLARE_MODULE(mld, mld_mod, SI_SUB_PROTO_MC, SI_ORDER_ANY);
struct in6_ifaddr * in6ifa_ifpforlinklocal(struct ifnet *ifp, int ignoreflags)
Definition: in6.c:1534
const struct in6_addr in6addr_linklocal_allv2routers
Definition: in6.c:140
char * ip6_sprintf(char *ip6buf, const struct in6_addr *addr)
Definition: in6.c:1637
const struct in6_addr in6addr_linklocal_allnodes
Definition: in6.c:136
const struct in6_addr in6addr_any
Definition: in6.c:132
#define IPV6_ADDR_MC_SCOPE(a)
Definition: in6.h:307
#define IN6_IS_ADDR_UNSPECIFIED(a)
Definition: in6.h:239
#define IN6_ARE_ADDR_EQUAL(a, b)
Definition: in6.h:227
int in6_cksum(struct mbuf *, u_int8_t, u_int32_t, u_int32_t)
Definition: in6_cksum.c:364
#define IN6_IS_ADDR_MULTICAST(a)
Definition: in6.h:304
#define IPV6_ADDR_SCOPE_LINKLOCAL
Definition: in6.h:279
#define INET6_ADDRSTRLEN
Definition: in6.h:112
#define IPV6_ADDR_SCOPE_NODELOCAL
Definition: in6.h:277
#define IN6_IS_SCOPE_LINKLOCAL(a)
Definition: in6.h:356
void in6m_clear_recorded(struct in6_multi *inm)
Definition: in6_mcast.c:638
void in6m_disconnect_locked(struct in6_multi_head *inmh, struct in6_multi *inm)
Definition: in6_mcast.c:559
int in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr)
Definition: in6_mcast.c:677
void in6m_release_list_deferred(struct in6_multi_head *inmh)
Definition: in6_mcast.c:533
void in6m_commit(struct in6_multi *inm)
Definition: in6_mcast.c:1128
static __inline void in6m_acquire_locked(struct in6_multi *inm)
Definition: in6_var.h:827
static __inline void in6m_rele_locked(struct in6_multi_head *inmh, struct in6_multi *inm)
Definition: in6_var.h:843
#define IN6_MULTI_LIST_LOCK()
Definition: in6_var.h:758
#define IN6_IFF_ANYCAST
Definition: in6_var.h:493
static __inline struct in6_multi * in6m_ifmultiaddr_get_inm(struct ifmultiaddr *ifma)
Definition: in6_var.h:773
static __inline struct in6_multi * in6m_lookup_locked(struct ifnet *ifp, const struct in6_addr *mcaddr)
Definition: in6_var.h:791
#define IN6_IFF_NOTREADY
Definition: in6_var.h:504
#define IN6_MULTI_LOCK()
Definition: in6_var.h:763
#define IN6_MULTI_UNLOCK()
Definition: in6_var.h:764
#define IN6_MULTI_LIST_UNLOCK()
Definition: in6_var.h:759
#define IA6_IN6(ia)
Definition: in6_var.h:403
#define IN6_MULTI_LIST_LOCK_ASSERT()
Definition: in6_var.h:760
static __inline uint8_t im6s_get_mode(const struct in6_multi *inm, const struct ip6_msource *ims, uint8_t t)
Definition: in6_var.h:736
void ip6_initpktopts(struct ip6_pktopts *opt)
Definition: ip6_output.c:2537
int ip6_output(struct mbuf *m0, struct ip6_pktopts *opt, struct route_in6 *ro, int flags, struct ip6_moptions *im6o, struct ifnet **ifpp, struct inpcb *inp)
Definition: ip6_output.c:409
#define IP6STAT_INC(name)
Definition: ip6_var.h:256
#define V_ip6_mrouter
Definition: ip6_var.h:301
#define IPV6_UNSPECSRC
Definition: ip6_var.h:262
#define IP6PO_DONTFRAG
Definition: ip6_var.h:175
#define IP6PO_TEMPADDR_NOTPREFER
Definition: ip6_var.h:167
SYSCTL_DECL(_net_inet6)
static void mld_final_leave(struct in6_multi *, struct mld_ifsoftc *)
Definition: mld6.c:2186
static int mld_v1enable
Definition: mld6.c:243
#define KTR_MLD
Definition: mld6.c:104
static void vnet_mld_init(const void *unused __unused)
Definition: mld6.c:3308
static int mld_v2_merge_state_changes(struct in6_multi *, struct mbufq *)
Definition: mld6.c:2899
DECLARE_MODULE(mld, mld_mod, SI_SUB_PROTO_MC, SI_ORDER_ANY)
VNET_DEFINE_STATIC(struct timeval, mld_gsrdelay)
int mld_change_state(struct in6_multi *inm, const int delay)
Definition: mld6.c:1892
SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay, CTLFLAG_VNET|CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_MPSAFE, &VNET_NAME(mld_gsrdelay.tv_sec), 0, sysctl_mld_gsr, "I", "Rate limit for MLDv2 Group-and-Source queries in seconds")
#define MLD_EMBEDSCOPE(pin6, zoneid)
Definition: mld6.c:203
static void mld_v2_dispatch_general_query(struct mld_ifsoftc *)
Definition: mld6.c:3004
#define V_current_state_timers_running6
Definition: mld6.c:221
static __inline int mld_is_addr_reported(const struct in6_addr *addr)
Definition: mld6.c:450
static struct mld_raopt mld_ra
Definition: mld6.c:267
static void mli_delete_locked(const struct ifnet *)
Definition: mld6.c:601
static struct mbuf * mld_v2_encap_report(struct ifnet *, struct mbuf *)
Definition: mld6.c:3191
struct mld_raopt __packed
static moduledata_t mld_mod
Definition: mld6.c:3342
void mld_slowtimo(void)
Definition: mld6.c:1739
static void mld_v1_update_group(struct in6_multi *, const int)
Definition: mld6.c:757
#define V_interface_timers_running6
Definition: mld6.c:219
#define V_mld_gsrdelay
Definition: mld6.c:217
static void mld_fasttimo_vnet(struct in6_multi_head *inmh)
Definition: mld6.c:1349
static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *, struct mld_hdr *)
Definition: mld6.c:633
VNET_SYSINIT(vnet_mld_init, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_init, NULL)
#define V_state_change_timers_running6
Definition: mld6.c:220
static int mld_use_allow
Definition: mld6.c:251
static __inline void mld_save_context(struct mbuf *m, struct ifnet *ifp)
Definition: mld6.c:280
static int mld_v1_transmit_report(struct in6_multi *, const int)
Definition: mld6.c:1795
static int mld_v2_enqueue_group_record(struct mbufq *, struct in6_multi *, const int, const int, const int, const int)
Definition: mld6.c:2310
static void mld_set_version(struct mld_ifsoftc *, const int)
Definition: mld6.c:1632
static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *, struct mbuf *, struct mldv2_query *, const int, const int)
Definition: mld6.c:810
struct mld_ifsoftc * mld_domifattach(struct ifnet *ifp)
Definition: mld6.c:474
SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RWTUN, &mld_v1enable, 0, "Enable fallback to MLDv1")
static MALLOC_DEFINE(M_MLD, "mld", "mld state")
static struct ip6_pktopts mld_po
Definition: mld6.c:277
static int sysctl_mld_gsr(SYSCTL_HANDLER_ARGS)
Definition: mld6.c:324
static int mld_v2_process_group_query(struct in6_multi *, struct mld_ifsoftc *mli, int, struct mbuf *, struct mldv2_query *, const int)
Definition: mld6.c:986
static int mld_v2enable
Definition: mld6.c:247
static __inline void mld_scrub_context(struct mbuf *m)
Definition: mld6.c:291
rectype_t
Definition: mld6.c:2666
@ REC_ALLOW
Definition: mld6.c:2668
@ REC_NONE
Definition: mld6.c:2667
@ REC_FULL
Definition: mld6.c:2670
@ REC_BLOCK
Definition: mld6.c:2669
static void mld_slowtimo_vnet(void)
Definition: mld6.c:1756
static int mld_v1_input_report(struct ifnet *, const struct ip6_hdr *, struct mld_hdr *)
Definition: mld6.c:1099
__FBSDID("$FreeBSD$")
static int mld_v2_enqueue_filter_change(struct mbufq *, struct in6_multi *)
Definition: mld6.c:2695
static void mld_v1_process_querier_timers(struct mld_ifsoftc *)
Definition: mld6.c:1774
VNET_SYSUNINIT(vnet_mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY, vnet_mld_uninit, NULL)
static __inline uint32_t mld_restore_context(struct mbuf *m)
Definition: mld6.c:306
static void mld_dispatch_packet(struct mbuf *)
Definition: mld6.c:3079
SYSUNINIT(mld_uninit, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_uninit, NULL)
int mld_input(struct mbuf **mp, int off, int icmp6len)
Definition: mld6.c:1248
static struct mld_ifsoftc * mli_alloc_locked(struct ifnet *)
Definition: mld6.c:498
static void mld_v2_cancel_link_timers(struct mld_ifsoftc *)
Definition: mld6.c:1662
static int mld_handle_state_change(struct in6_multi *, struct mld_ifsoftc *)
Definition: mld6.c:2122
SYSINIT(mld_init, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE, mld_init, NULL)
void mld_domifdetach(struct ifnet *ifp)
Definition: mld6.c:589
SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW|CTLFLAG_MPSAFE, 0, "IPv6 Multicast Listener Discovery")
#define V_mli_head
Definition: mld6.c:218
static void mld_uninit(void *unused __unused)
Definition: mld6.c:3299
static void mld_dispatch_queue(struct mbufq *, int)
Definition: mld6.c:425
static struct mtx mld_mtx
Definition: mld6.c:200
static void vnet_mld_uninit(const void *unused __unused)
Definition: mld6.c:3319
static void mld_v2_process_group_timers(struct in6_multi_head *, struct mbufq *, struct mbufq *, struct in6_multi *, const int)
Definition: mld6.c:1511
static int mld_modevent(module_t mod, int type, void *unused __unused)
Definition: mld6.c:3329
void mld_fasttimo(void)
Definition: mld6.c:1326
static int mld_initial_join(struct in6_multi *, struct mld_ifsoftc *, const int)
Definition: mld6.c:1969
static void mld_v1_process_group_timer(struct in6_multi_head *, struct in6_multi *)
Definition: mld6.c:1468
static void mld_init(void *unused __unused)
Definition: mld6.c:3284
void mld_ifdetach(struct ifnet *ifp, struct in6_multi_head *inmh)
Definition: mld6.c:538
static int sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS)
Definition: mld6.c:363
#define MLD_QRV(x)
Definition: mld6.h:63
#define MLD_CHANGE_TO_INCLUDE_MODE
Definition: mld6.h:91
#define MLD_V2_REPORT_MAXRECS
Definition: mld6.h:83
#define MLD_MRC_MANT(x)
Definition: mld6.h:58
#define MLD_V1_MAX_RI
Definition: mld6.h:106
#define MLD_DO_NOTHING
Definition: mld6.h:88
#define MLD_TIMER_SCALE
Definition: mld6.h:112
#define MLD_MODE_IS_EXCLUDE
Definition: mld6.h:90
#define MLD_MODE_IS_INCLUDE
Definition: mld6.h:89
#define MLD_CHANGE_TO_EXCLUDE_MODE
Definition: mld6.h:92
#define MLD_BLOCK_OLD_SOURCES
Definition: mld6.h:94
#define MLD_QQIC_EXP(x)
Definition: mld6.h:59
#define MLD_MRC_EXP(x)
Definition: mld6.h:57
#define MLD_ALLOW_NEW_SOURCES
Definition: mld6.h:93
#define MLD_QQIC_MANT(x)
Definition: mld6.h:60
#define MLD_RV_INIT
Definition: mld6_var.h:65
#define MLD_LAZY_MEMBER
Definition: mld6_var.h:47
#define MLD_RESPONSE_BURST_INTERVAL
Definition: mld6_var.h:86
#define MLD_NOT_MEMBER
Definition: mld6_var.h:43
#define MLD_SLEEPING_MEMBER
Definition: mld6_var.h:48
#define MLD_QI_INIT
Definition: mld6_var.h:69
#define M_MLDV1
Definition: mld6_var.h:91
#define MLD_UNLOCK()
Definition: mld6_var.h:154
#define MLD_SG_QUERY_PENDING_MEMBER
Definition: mld6_var.h:51
#define MLD_G_QUERY_PENDING_MEMBER
Definition: mld6_var.h:50
#define MLD_MTUSPACE
Definition: mld6_var.h:105
#define MLD_URI_INIT
Definition: mld6_var.h:77
#define MLD_AWAKENING_MEMBER
Definition: mld6_var.h:49
#define MLD_MAX_RESPONSE_BURST
Definition: mld6_var.h:85
#define MLD_MAX_RESPONSE_PACKETS
Definition: mld6_var.h:84
#define MLD_MAX_STATE_CHANGE_PACKETS
Definition: mld6_var.h:83
#define MLD_LOCK()
Definition: mld6_var.h:152
#define MLIF_SILENT
Definition: mld6_var.h:116
#define MLD_RANDOM_DELAY(X)
Definition: mld6_var.h:142
#define MLD_LOCK_DESTROY()
Definition: mld6_var.h:151
#define MLD_LEAVING_MEMBER
Definition: mld6_var.h:52
#define MLD_LOCK_ASSERT()
Definition: mld6_var.h:153
#define MLD_QRI_INIT
Definition: mld6_var.h:73
#define MLD_MAX_G_GS_PACKETS
Definition: mld6_var.h:82
#define MLD_SILENT_MEMBER
Definition: mld6_var.h:44
#define MLD_VERSION_2
Definition: mld6_var.h:60
#define MLD_IFINFO(ifp)
Definition: mld6_var.h:160
#define MLD_IDLE_MEMBER
Definition: mld6_var.h:46
#define MLD_REPORTING_MEMBER
Definition: mld6_var.h:45
#define MLD_VERSION_1
Definition: mld6_var.h:59
#define MLD_LOCK_INIT()
Definition: mld6_var.h:150
#define MLIF_USEALLOW
Definition: mld6_var.h:117
#define MLD_MAX_GS_SOURCES
Definition: mld6_var.h:81
LIST_HEAD(nd_prhead, nd_prefix)
int in6_setscope(struct in6_addr *in6, struct ifnet *ifp, u_int32_t *ret_id)
Definition: scope6.c:406
int in6_clearscope(struct in6_addr *in6)
Definition: scope6.c:455
Definition: in6.h:97
struct sockaddr_in6 ia_addr
Definition: in6_var.h:125
struct ifaddr ia_ifa
Definition: in6_var.h:122
uint16_t iss_fmode
Definition: in6_var.h:717
uint16_t iss_asm
Definition: in6_var.h:718
struct mld_ifsoftc * in6m_mli
Definition: in6_var.h:697
uint16_t in6m_sctimer
Definition: in6_var.h:706
struct ifnet * in6m_ifp
Definition: in6_var.h:690
struct in6_addr in6m_addr
Definition: in6_var.h:689
u_long in6m_nsrc
Definition: in6_var.h:701
u_int in6m_timer
Definition: in6_var.h:694
struct ifmultiaddr * in6m_ifma
Definition: in6_var.h:691
u_int in6m_state
Definition: in6_var.h:693
uint16_t in6m_scrv
Definition: in6_var.h:707
u_int in6m_refcount
Definition: in6_var.h:692
struct ip6_msource_tree in6m_srcs
Definition: in6_var.h:700
struct timeval in6m_lastgsrtv
Definition: in6_var.h:705
struct in6_multi::in6m_st in6m_st[2]
struct mbufq in6m_scq
Definition: in6_var.h:703
uint8_t im6s_stp
Definition: in6_var.h:567
struct in6_addr im6s_addr
Definition: in6_var.h:562
struct ip6_hbh * ip6po_hbh
Definition: ip6_var.h:146
int ip6po_prefer_tempaddr
Definition: ip6_var.h:164
int ip6po_hlim
Definition: ip6_var.h:138
int ip6po_flags
Definition: ip6_var.h:170
uint32_t mli_rv
Definition: mld6_var.h:118
uint32_t mli_flags
Definition: mld6_var.h:115
uint32_t mli_v2_timer
Definition: mld6_var.h:114
uint32_t mli_qri
Definition: mld6_var.h:120
uint32_t mli_version
Definition: mld6_var.h:112
uint32_t mli_qi
Definition: mld6_var.h:119
uint32_t mli_uri
Definition: mld6_var.h:121
uint32_t mli_v1_timer
Definition: mld6_var.h:113
uint32_t mli_qi
Definition: mld6_var.h:136
uint32_t mli_flags
Definition: mld6_var.h:134
struct mbufq mli_gq
Definition: mld6_var.h:139
uint32_t mli_rv
Definition: mld6_var.h:135
uint32_t mli_v1_timer
Definition: mld6_var.h:132
struct ifnet * mli_ifp
Definition: mld6_var.h:130
uint32_t mli_uri
Definition: mld6_var.h:138
uint32_t mli_v2_timer
Definition: mld6_var.h:133
uint32_t mli_version
Definition: mld6_var.h:131
uint32_t mli_qri
Definition: mld6_var.h:137
struct ip6_opt_router ra
Definition: mld6.c:261
struct ip6_opt pad
Definition: mld6.c:260
struct ip6_hbh hbh
Definition: mld6.c:259
uint8_t mld_qqi
Definition: mld6.h:52
uint8_t mld_misc
Definition: mld6.h:51
uint16_t mld_numsrc
Definition: mld6.h:53
struct in6_addr mld_addr
Definition: mld6.h:50
uint16_t mr_numsrc
Definition: mld6.h:79
struct in6_addr mr_addr
Definition: mld6.h:80
uint8_t mr_type
Definition: mld6.h:77
uint8_t mr_datalen
Definition: mld6.h:78
struct in6_addr sin6_addr
Definition: in6.h:130