FreeBSD kernel IPv4 code
igmp.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2007-2009 Bruce Simpson.
5 * Copyright (c) 1988 Stephen Deering.
6 * Copyright (c) 1992, 1993
7 * The Regents of the University of California. All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * Stephen Deering of Stanford University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
37 */
38
39/*
40 * Internet Group Management Protocol (IGMP) routines.
41 * [RFC1112, RFC2236, RFC3376]
42 *
43 * Written by Steve Deering, Stanford, May 1988.
44 * Modified by Rosen Sharma, Stanford, Aug 1994.
45 * Modified by Bill Fenner, Xerox PARC, Feb 1995.
46 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995.
47 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson.
48 *
49 * MULTICAST Revision: 3.5.1.4
50 */
51
52#include <sys/cdefs.h>
53__FBSDID("$FreeBSD$");
54
55#include "opt_ddb.h"
56
57#include <sys/param.h>
58#include <sys/systm.h>
59#include <sys/module.h>
60#include <sys/malloc.h>
61#include <sys/mbuf.h>
62#include <sys/socket.h>
63#include <sys/protosw.h>
64#include <sys/kernel.h>
65#include <sys/lock.h>
66#include <sys/sysctl.h>
67#include <sys/ktr.h>
68#include <sys/condvar.h>
69
70#ifdef DDB
71#include <ddb/ddb.h>
72#endif
73
74#include <net/if.h>
75#include <net/if_var.h>
76#include <net/netisr.h>
77#include <net/vnet.h>
78
79#include <netinet/in.h>
80#include <netinet/in_var.h>
81#include <netinet/in_systm.h>
82#include <netinet/ip.h>
83#include <netinet/ip_var.h>
84#include <netinet/ip_options.h>
85#include <netinet/igmp.h>
86#include <netinet/igmp_var.h>
87
88#include <machine/in_cksum.h>
89
90#include <security/mac/mac_framework.h>
91
92#ifndef KTR_IGMPV3
93#define KTR_IGMPV3 KTR_INET
94#endif
95
96static struct igmp_ifsoftc *
97 igi_alloc_locked(struct ifnet *);
98static void igi_delete_locked(const struct ifnet *);
99static void igmp_dispatch_queue(struct mbufq *, int, const int);
100static void igmp_fasttimo_vnet(void);
101static void igmp_final_leave(struct in_multi *, struct igmp_ifsoftc *);
102static int igmp_handle_state_change(struct in_multi *,
103 struct igmp_ifsoftc *);
104static int igmp_initial_join(struct in_multi *, struct igmp_ifsoftc *);
105static int igmp_input_v1_query(struct ifnet *, const struct ip *,
106 const struct igmp *);
107static int igmp_input_v2_query(struct ifnet *, const struct ip *,
108 const struct igmp *);
109static int igmp_input_v3_query(struct ifnet *, const struct ip *,
110 /*const*/ struct igmpv3 *);
111static int igmp_input_v3_group_query(struct in_multi *,
112 struct igmp_ifsoftc *, int, /*const*/ struct igmpv3 *);
113static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *,
114 /*const*/ struct igmp *);
115static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *,
116 /*const*/ struct igmp *);
117static void igmp_intr(struct mbuf *);
118static int igmp_isgroupreported(const struct in_addr);
119static struct mbuf *
120 igmp_ra_alloc(void);
121#ifdef KTR
122static char * igmp_rec_type_to_str(const int);
123#endif
124static void igmp_set_version(struct igmp_ifsoftc *, const int);
125static void igmp_slowtimo_vnet(void);
126static int igmp_v1v2_queue_report(struct in_multi *, const int);
127static void igmp_v1v2_process_group_timer(struct in_multi *, const int);
129static void igmp_v2_update_group(struct in_multi *, const int);
130static void igmp_v3_cancel_link_timers(struct igmp_ifsoftc *);
131static void igmp_v3_dispatch_general_query(struct igmp_ifsoftc *);
132static struct mbuf *
133 igmp_v3_encap_report(struct ifnet *, struct mbuf *);
134static int igmp_v3_enqueue_group_record(struct mbufq *,
135 struct in_multi *, const int, const int, const int);
136static int igmp_v3_enqueue_filter_change(struct mbufq *,
137 struct in_multi *);
138static void igmp_v3_process_group_timers(struct in_multi_head *,
139 struct mbufq *, struct mbufq *, struct in_multi *,
140 const int);
141static int igmp_v3_merge_state_changes(struct in_multi *,
142 struct mbufq *);
143static void igmp_v3_suppress_group_record(struct in_multi *);
144static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS);
145static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS);
146static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS);
147static int sysctl_igmp_stat(SYSCTL_HANDLER_ARGS);
148
149static const struct netisr_handler igmp_nh = {
150 .nh_name = "igmp",
151 .nh_handler = igmp_intr,
152 .nh_proto = NETISR_IGMP,
153 .nh_policy = NETISR_POLICY_SOURCE,
154};
155
156/*
157 * System-wide globals.
158 *
159 * Unlocked access to these is OK, except for the global IGMP output
160 * queue. The IGMP subsystem lock ends up being system-wide for the moment,
161 * because all VIMAGEs have to share a global output queue, as netisrs
162 * themselves are not virtualized.
163 *
164 * Locking:
165 * * The permitted lock order is: IN_MULTI_LIST_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
166 * Any may be taken independently; if any are held at the same
167 * time, the above lock order must be followed.
168 * * All output is delegated to the netisr.
169 * Now that Giant has been eliminated, the netisr may be inlined.
170 * * IN_MULTI_LIST_LOCK covers in_multi.
171 * * IGMP_LOCK covers igmp_ifsoftc and any global variables in this file,
172 * including the output queue.
173 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
174 * per-link state iterators.
175 * * igmp_ifsoftc is valid as long as PF_INET is attached to the interface,
176 * therefore it is not refcounted.
177 * We allow unlocked reads of igmp_ifsoftc when accessed via in_multi.
178 *
179 * Reference counting
180 * * IGMP acquires its own reference every time an in_multi is passed to
181 * it and the group is being joined for the first time.
182 * * IGMP releases its reference(s) on in_multi in a deferred way,
183 * because the operations which process the release run as part of
184 * a loop whose control variables are directly affected by the release
185 * (that, and not recursing on the IF_ADDR_LOCK).
186 *
187 * VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds
188 * to a vnet in ifp->if_vnet.
189 *
190 * SMPng: XXX We may potentially race operations on ifma_protospec.
191 * The problem is that we currently lack a clean way of taking the
192 * IF_ADDR_LOCK() between the ifnet and in layers w/o recursing,
193 * as anything which modifies ifma needs to be covered by that lock.
194 * So check for ifma_protospec being NULL before proceeding.
195 */
196struct mtx igmp_mtx;
197
198struct mbuf *m_raopt; /* Router Alert option */
199static MALLOC_DEFINE(M_IGMP, "igmp", "igmp state");
200
201/*
202 * VIMAGE-wide globals.
203 *
204 * The IGMPv3 timers themselves need to run per-image, however,
205 * protosw timers run globally (see tcp).
206 * An ifnet can only be in one vimage at a time, and the loopback
207 * ifnet, loif, is itself virtualized.
208 * It would otherwise be possible to seriously hose IGMP state,
209 * and create inconsistencies in upstream multicast routing, if you have
210 * multiple VIMAGEs running on the same link joining different multicast
211 * groups, UNLESS the "primary IP address" is different. This is because
212 * IGMP for IPv4 does not force link-local addresses to be used for each
213 * node, unlike MLD for IPv6.
214 * Obviously the IGMPv3 per-interface state has per-vimage granularity
215 * also as a result.
216 *
217 * FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection
218 * policy to control the address used by IGMP on the link.
219 */
220VNET_DEFINE_STATIC(int, interface_timers_running); /* IGMPv3 general
221 * query response */
222VNET_DEFINE_STATIC(int, state_change_timers_running); /* IGMPv3 state-change
223 * retransmit */
224VNET_DEFINE_STATIC(int, current_state_timers_running); /* IGMPv1/v2 host
225 * report; IGMPv3 g/sg
226 * query response */
227
228#define V_interface_timers_running VNET(interface_timers_running)
229#define V_state_change_timers_running VNET(state_change_timers_running)
230#define V_current_state_timers_running VNET(current_state_timers_running)
231
235
237 LIST_HEAD_INITIALIZER(igi_head);
238VNET_DEFINE_STATIC(struct timeval, igmp_gsrdelay) = {10, 0};
239
240#define V_igi_head VNET(igi_head)
241#define V_igmp_gsrdelay VNET(igmp_gsrdelay)
242
243VNET_DEFINE_STATIC(int, igmp_recvifkludge) = 1;
244VNET_DEFINE_STATIC(int, igmp_sendra) = 1;
245VNET_DEFINE_STATIC(int, igmp_sendlocal) = 1;
246VNET_DEFINE_STATIC(int, igmp_v1enable) = 1;
247VNET_DEFINE_STATIC(int, igmp_v2enable) = 1;
248VNET_DEFINE_STATIC(int, igmp_legacysupp);
249VNET_DEFINE_STATIC(int, igmp_default_version) = IGMP_VERSION_3;
250
251#define V_igmp_recvifkludge VNET(igmp_recvifkludge)
252#define V_igmp_sendra VNET(igmp_sendra)
253#define V_igmp_sendlocal VNET(igmp_sendlocal)
254#define V_igmp_v1enable VNET(igmp_v1enable)
255#define V_igmp_v2enable VNET(igmp_v2enable)
256#define V_igmp_legacysupp VNET(igmp_legacysupp)
257#define V_igmp_default_version VNET(igmp_default_version)
258
259/*
260 * Virtualized sysctls.
261 */
262SYSCTL_PROC(_net_inet_igmp, IGMPCTL_STATS, stats,
263 CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_MPSAFE,
264 &VNET_NAME(igmpstat), 0, sysctl_igmp_stat, "S,igmpstat",
265 "IGMP statistics (struct igmpstat, netinet/igmp_var.h)");
266SYSCTL_INT(_net_inet_igmp, OID_AUTO, recvifkludge, CTLFLAG_VNET | CTLFLAG_RW,
267 &VNET_NAME(igmp_recvifkludge), 0,
268 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address");
269SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendra, CTLFLAG_VNET | CTLFLAG_RW,
270 &VNET_NAME(igmp_sendra), 0,
271 "Send IP Router Alert option in IGMPv2/v3 messages");
272SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendlocal, CTLFLAG_VNET | CTLFLAG_RW,
273 &VNET_NAME(igmp_sendlocal), 0,
274 "Send IGMP membership reports for 224.0.0.0/24 groups");
275SYSCTL_INT(_net_inet_igmp, OID_AUTO, v1enable, CTLFLAG_VNET | CTLFLAG_RW,
276 &VNET_NAME(igmp_v1enable), 0,
277 "Enable backwards compatibility with IGMPv1");
278SYSCTL_INT(_net_inet_igmp, OID_AUTO, v2enable, CTLFLAG_VNET | CTLFLAG_RW,
279 &VNET_NAME(igmp_v2enable), 0,
280 "Enable backwards compatibility with IGMPv2");
281SYSCTL_INT(_net_inet_igmp, OID_AUTO, legacysupp, CTLFLAG_VNET | CTLFLAG_RW,
282 &VNET_NAME(igmp_legacysupp), 0,
283 "Allow v1/v2 reports to suppress v3 group responses");
284SYSCTL_PROC(_net_inet_igmp, OID_AUTO, default_version,
285 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
286 &VNET_NAME(igmp_default_version), 0, sysctl_igmp_default_version, "I",
287 "Default version of IGMP to run on each interface");
288SYSCTL_PROC(_net_inet_igmp, OID_AUTO, gsrdelay,
289 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
290 &VNET_NAME(igmp_gsrdelay.tv_sec), 0, sysctl_igmp_gsr, "I",
291 "Rate limit for IGMPv3 Group-and-Source queries in seconds");
292
293/*
294 * Non-virtualized sysctls.
295 */
296static SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo,
297 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_igmp_ifinfo,
298 "Per-interface IGMPv3 state");
299
300static __inline void
301igmp_save_context(struct mbuf *m, struct ifnet *ifp)
302{
303
304#ifdef VIMAGE
305 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
306#endif /* VIMAGE */
307 m->m_pkthdr.rcvif = ifp;
308 m->m_pkthdr.flowid = ifp->if_index;
309}
310
311static __inline void
312igmp_scrub_context(struct mbuf *m)
313{
314
315 m->m_pkthdr.PH_loc.ptr = NULL;
316 m->m_pkthdr.flowid = 0;
317}
318
319/*
320 * Restore context from a queued IGMP output chain.
321 * Return saved ifindex.
322 *
323 * VIMAGE: The assertion is there to make sure that we
324 * actually called CURVNET_SET() with what's in the mbuf chain.
325 */
326static __inline uint32_t
327igmp_restore_context(struct mbuf *m)
328{
329
330#ifdef notyet
331#if defined(VIMAGE) && defined(INVARIANTS)
332 KASSERT(curvnet == (m->m_pkthdr.PH_loc.ptr),
333 ("%s: called when curvnet was not restored", __func__));
334#endif
335#endif
336 return (m->m_pkthdr.flowid);
337}
338
339/*
340 * IGMP statistics.
341 */
342static int
343sysctl_igmp_stat(SYSCTL_HANDLER_ARGS)
344{
345 struct igmpstat igps0;
346 int error;
347 char *p;
348
349 error = sysctl_wire_old_buffer(req, sizeof(struct igmpstat));
350 if (error)
351 return (error);
352
353 if (req->oldptr != NULL) {
354 if (req->oldlen < sizeof(struct igmpstat))
355 error = ENOMEM;
356 else {
357 /*
358 * Copy the counters, and explicitly set the struct's
359 * version and length fields.
360 */
361 COUNTER_ARRAY_COPY(VNET(igmpstat), &igps0,
362 sizeof(struct igmpstat) / sizeof(uint64_t));
365 error = SYSCTL_OUT(req, &igps0,
366 sizeof(struct igmpstat));
367 }
368 } else
369 req->validlen = sizeof(struct igmpstat);
370 if (error)
371 goto out;
372 if (req->newptr != NULL) {
373 if (req->newlen < sizeof(struct igmpstat))
374 error = ENOMEM;
375 else
376 error = SYSCTL_IN(req, &igps0,
377 sizeof(igps0));
378 if (error)
379 goto out;
380 /*
381 * igps0 must be "all zero".
382 */
383 p = (char *)&igps0;
384 while (p < (char *)&igps0 + sizeof(igps0) && *p == '\0')
385 p++;
386 if (p != (char *)&igps0 + sizeof(igps0)) {
387 error = EINVAL;
388 goto out;
389 }
390 COUNTER_ARRAY_ZERO(VNET(igmpstat),
391 sizeof(struct igmpstat) / sizeof(uint64_t));
392 }
393out:
394 return (error);
395}
396
397/*
398 * Retrieve or set default IGMP version.
399 *
400 * VIMAGE: Assume curvnet set by caller.
401 * SMPng: NOTE: Serialized by IGMP lock.
402 */
403static int
404sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS)
405{
406 int error;
407 int new;
408
409 error = sysctl_wire_old_buffer(req, sizeof(int));
410 if (error)
411 return (error);
412
413 IGMP_LOCK();
414
416
417 error = sysctl_handle_int(oidp, &new, 0, req);
418 if (error || !req->newptr)
419 goto out_locked;
420
421 if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) {
422 error = EINVAL;
423 goto out_locked;
424 }
425
426 CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d",
428
430
431out_locked:
432 IGMP_UNLOCK();
433 return (error);
434}
435
436/*
437 * Retrieve or set threshold between group-source queries in seconds.
438 *
439 * VIMAGE: Assume curvnet set by caller.
440 * SMPng: NOTE: Serialized by IGMP lock.
441 */
442static int
443sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS)
444{
445 int error;
446 int i;
447
448 error = sysctl_wire_old_buffer(req, sizeof(int));
449 if (error)
450 return (error);
451
452 IGMP_LOCK();
453
454 i = V_igmp_gsrdelay.tv_sec;
455
456 error = sysctl_handle_int(oidp, &i, 0, req);
457 if (error || !req->newptr)
458 goto out_locked;
459
460 if (i < -1 || i >= 60) {
461 error = EINVAL;
462 goto out_locked;
463 }
464
465 CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d",
466 V_igmp_gsrdelay.tv_sec, i);
467 V_igmp_gsrdelay.tv_sec = i;
468
469out_locked:
470 IGMP_UNLOCK();
471 return (error);
472}
473
474/*
475 * Expose struct igmp_ifsoftc to userland, keyed by ifindex.
476 * For use by ifmcstat(8).
477 *
478 * SMPng: NOTE: Does an unlocked ifindex space read.
479 * VIMAGE: Assume curvnet set by caller. The node handler itself
480 * is not directly virtualized.
481 */
482static int
483sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
484{
485 struct epoch_tracker et;
486 int *name;
487 int error;
488 u_int namelen;
489 struct ifnet *ifp;
490 struct igmp_ifsoftc *igi;
491
492 name = (int *)arg1;
493 namelen = arg2;
494
495 if (req->newptr != NULL)
496 return (EPERM);
497
498 if (namelen != 1)
499 return (EINVAL);
500
501 error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo));
502 if (error)
503 return (error);
504
506 IGMP_LOCK();
507
508 error = ENOENT;
509
510 NET_EPOCH_ENTER(et);
511 ifp = ifnet_byindex(name[0]);
512 NET_EPOCH_EXIT(et);
513 if (ifp == NULL)
514 goto out_locked;
515
516 LIST_FOREACH(igi, &V_igi_head, igi_link) {
517 if (ifp == igi->igi_ifp) {
518 struct igmp_ifinfo info;
519
520 info.igi_version = igi->igi_version;
521 info.igi_v1_timer = igi->igi_v1_timer;
522 info.igi_v2_timer = igi->igi_v2_timer;
523 info.igi_v3_timer = igi->igi_v3_timer;
524 info.igi_flags = igi->igi_flags;
525 info.igi_rv = igi->igi_rv;
526 info.igi_qi = igi->igi_qi;
527 info.igi_qri = igi->igi_qri;
528 info.igi_uri = igi->igi_uri;
529 error = SYSCTL_OUT(req, &info, sizeof(info));
530 break;
531 }
532 }
533
534out_locked:
535 IGMP_UNLOCK();
537 return (error);
538}
539
540/*
541 * Dispatch an entire queue of pending packet chains
542 * using the netisr.
543 * VIMAGE: Assumes the vnet pointer has been set.
544 */
545static void
546igmp_dispatch_queue(struct mbufq *mq, int limit, const int loop)
547{
548 struct epoch_tracker et;
549 struct mbuf *m;
550
551 NET_EPOCH_ENTER(et);
552 while ((m = mbufq_dequeue(mq)) != NULL) {
553 CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, mq, m);
554 if (loop)
555 m->m_flags |= M_IGMP_LOOP;
556 netisr_dispatch(NETISR_IGMP, m);
557 if (--limit == 0)
558 break;
559 }
560 NET_EPOCH_EXIT(et);
561}
562
563/*
564 * Filter outgoing IGMP report state by group.
565 *
566 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1).
567 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are
568 * disabled for all groups in the 224.0.0.0/24 link-local scope. However,
569 * this may break certain IGMP snooping switches which rely on the old
570 * report behaviour.
571 *
572 * Return zero if the given group is one for which IGMP reports
573 * should be suppressed, or non-zero if reports should be issued.
574 */
575static __inline int
577{
578
579 if (in_allhosts(addr) ||
580 ((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr)))))
581 return (0);
582
583 return (1);
584}
585
586/*
587 * Construct a Router Alert option to use in outgoing packets.
588 */
589static struct mbuf *
591{
592 struct mbuf *m;
593 struct ipoption *p;
594
595 m = m_get(M_WAITOK, MT_DATA);
596 p = mtod(m, struct ipoption *);
598 p->ipopt_list[0] = (char)IPOPT_RA; /* Router Alert Option */
599 p->ipopt_list[1] = 0x04; /* 4 bytes long */
600 p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */
601 p->ipopt_list[3] = 0x00; /* pad byte */
602 m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1];
603
604 return (m);
605}
606
607/*
608 * Attach IGMP when PF_INET is attached to an interface.
609 */
610struct igmp_ifsoftc *
611igmp_domifattach(struct ifnet *ifp)
612{
613 struct igmp_ifsoftc *igi;
614
615 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
616 __func__, ifp, ifp->if_xname);
617
618 IGMP_LOCK();
619
620 igi = igi_alloc_locked(ifp);
621 if (!(ifp->if_flags & IFF_MULTICAST))
622 igi->igi_flags |= IGIF_SILENT;
623
624 IGMP_UNLOCK();
625
626 return (igi);
627}
628
629/*
630 * VIMAGE: assume curvnet set by caller.
631 */
632static struct igmp_ifsoftc *
633igi_alloc_locked(/*const*/ struct ifnet *ifp)
634{
635 struct igmp_ifsoftc *igi;
636
638
639 igi = malloc(sizeof(struct igmp_ifsoftc), M_IGMP, M_NOWAIT|M_ZERO);
640 if (igi == NULL)
641 goto out;
642
643 igi->igi_ifp = ifp;
645 igi->igi_flags = 0;
646 igi->igi_rv = IGMP_RV_INIT;
647 igi->igi_qi = IGMP_QI_INIT;
648 igi->igi_qri = IGMP_QRI_INIT;
649 igi->igi_uri = IGMP_URI_INIT;
650 mbufq_init(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
651
652 LIST_INSERT_HEAD(&V_igi_head, igi, igi_link);
653
654 CTR2(KTR_IGMPV3, "allocate igmp_ifsoftc for ifp %p(%s)",
655 ifp, ifp->if_xname);
656
657out:
658 return (igi);
659}
660
661/*
662 * Hook for ifdetach.
663 *
664 * NOTE: Some finalization tasks need to run before the protocol domain
665 * is detached, but also before the link layer does its cleanup.
666 *
667 * SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK().
668 * XXX This is also bitten by unlocked ifma_protospec access.
669 */
670void
671igmp_ifdetach(struct ifnet *ifp)
672{
673 struct igmp_ifsoftc *igi;
674 struct ifmultiaddr *ifma, *next;
675 struct in_multi *inm;
676 struct in_multi_head inm_free_tmp;
677 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp,
678 ifp->if_xname);
679
680 SLIST_INIT(&inm_free_tmp);
681 IGMP_LOCK();
682
683 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
684 if (igi->igi_version == IGMP_VERSION_3) {
685 IF_ADDR_WLOCK(ifp);
686 restart:
687 CK_STAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) {
688 if (ifma->ifma_addr->sa_family != AF_INET ||
689 ifma->ifma_protospec == NULL)
690 continue;
691 inm = (struct in_multi *)ifma->ifma_protospec;
692 if (inm->inm_state == IGMP_LEAVING_MEMBER)
693 inm_rele_locked(&inm_free_tmp, inm);
695 if (__predict_false(ifma_restart)) {
696 ifma_restart = false;
697 goto restart;
698 }
699 }
700 IF_ADDR_WUNLOCK(ifp);
701 inm_release_list_deferred(&inm_free_tmp);
702 }
703 IGMP_UNLOCK();
704
705}
706
707/*
708 * Hook for domifdetach.
709 */
710void
711igmp_domifdetach(struct ifnet *ifp)
712{
713
714 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
715 __func__, ifp, ifp->if_xname);
716
717 IGMP_LOCK();
719 IGMP_UNLOCK();
720}
721
722static void
723igi_delete_locked(const struct ifnet *ifp)
724{
725 struct igmp_ifsoftc *igi, *tigi;
726
727 CTR3(KTR_IGMPV3, "%s: freeing igmp_ifsoftc for ifp %p(%s)",
728 __func__, ifp, ifp->if_xname);
729
731
732 LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) {
733 if (igi->igi_ifp == ifp) {
734 /*
735 * Free deferred General Query responses.
736 */
737 mbufq_drain(&igi->igi_gq);
738
739 LIST_REMOVE(igi, igi_link);
740 free(igi, M_IGMP);
741 return;
742 }
743 }
744}
745
746/*
747 * Process a received IGMPv1 query.
748 * Return non-zero if the message should be dropped.
749 *
750 * VIMAGE: The curvnet pointer is derived from the input ifp.
751 */
752static int
753igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip,
754 const struct igmp *igmp)
755{
756 struct ifmultiaddr *ifma;
757 struct igmp_ifsoftc *igi;
758 struct in_multi *inm;
759
760 NET_EPOCH_ASSERT();
761
762 /*
763 * IGMPv1 Host Mmembership Queries SHOULD always be addressed to
764 * 224.0.0.1. They are always treated as General Queries.
765 * igmp_group is always ignored. Do not drop it as a userland
766 * daemon may wish to see it.
767 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
768 */
770 IGMPSTAT_INC(igps_rcv_badqueries);
771 return (0);
772 }
773 IGMPSTAT_INC(igps_rcv_gen_queries);
774
776 IGMP_LOCK();
777
778 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
779 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
780
781 if (igi->igi_flags & IGIF_LOOPBACK) {
782 CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)",
783 ifp, ifp->if_xname);
784 goto out_locked;
785 }
786
787 /*
788 * Switch to IGMPv1 host compatibility mode.
789 */
791
792 CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname);
793
794 /*
795 * Start the timers in all of our group records
796 * for the interface on which the query arrived,
797 * except those which are already running.
798 */
799 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
800 if (ifma->ifma_addr->sa_family != AF_INET ||
801 ifma->ifma_protospec == NULL)
802 continue;
803 inm = (struct in_multi *)ifma->ifma_protospec;
804 if (inm->inm_timer != 0)
805 continue;
806 switch (inm->inm_state) {
807 case IGMP_NOT_MEMBER:
809 break;
813 case IGMP_IDLE_MEMBER:
814 case IGMP_LAZY_MEMBER:
819 IGMP_V1V2_MAX_RI * PR_FASTHZ);
821 break;
823 break;
824 }
825 }
826
827out_locked:
828 IGMP_UNLOCK();
830
831 return (0);
832}
833
834/*
835 * Process a received IGMPv2 general or group-specific query.
836 */
837static int
838igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip,
839 const struct igmp *igmp)
840{
841 struct ifmultiaddr *ifma;
842 struct igmp_ifsoftc *igi;
843 struct in_multi *inm;
844 int is_general_query;
845 uint16_t timer;
846
847 NET_EPOCH_ASSERT();
848
849 is_general_query = 0;
850
851 /*
852 * Validate address fields upfront.
853 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
854 */
856 /*
857 * IGMPv2 General Query.
858 * If this was not sent to the all-hosts group, ignore it.
859 */
860 if (!in_allhosts(ip->ip_dst))
861 return (0);
862 IGMPSTAT_INC(igps_rcv_gen_queries);
863 is_general_query = 1;
864 } else {
865 /* IGMPv2 Group-Specific Query. */
866 IGMPSTAT_INC(igps_rcv_group_queries);
867 }
868
870 IGMP_LOCK();
871
872 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
873 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
874
875 if (igi->igi_flags & IGIF_LOOPBACK) {
876 CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)",
877 ifp, ifp->if_xname);
878 goto out_locked;
879 }
880
881 /*
882 * Ignore v2 query if in v1 Compatibility Mode.
883 */
884 if (igi->igi_version == IGMP_VERSION_1)
885 goto out_locked;
886
888
889 timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE;
890 if (timer == 0)
891 timer = 1;
892
893 if (is_general_query) {
894 /*
895 * For each reporting group joined on this
896 * interface, kick the report timer.
897 */
898 CTR2(KTR_IGMPV3, "process v2 general query on ifp %p(%s)",
899 ifp, ifp->if_xname);
900 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
901 if (ifma->ifma_addr->sa_family != AF_INET ||
902 ifma->ifma_protospec == NULL)
903 continue;
904 inm = (struct in_multi *)ifma->ifma_protospec;
905 igmp_v2_update_group(inm, timer);
906 }
907 } else {
908 /*
909 * Group-specific IGMPv2 query, we need only
910 * look up the single group to process it.
911 */
912 inm = inm_lookup(ifp, igmp->igmp_group);
913 if (inm != NULL) {
914 CTR3(KTR_IGMPV3,
915 "process v2 query 0x%08x on ifp %p(%s)",
916 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
917 igmp_v2_update_group(inm, timer);
918 }
919 }
920
921out_locked:
922 IGMP_UNLOCK();
924
925 return (0);
926}
927
928/*
929 * Update the report timer on a group in response to an IGMPv2 query.
930 *
931 * If we are becoming the reporting member for this group, start the timer.
932 * If we already are the reporting member for this group, and timer is
933 * below the threshold, reset it.
934 *
935 * We may be updating the group for the first time since we switched
936 * to IGMPv3. If we are, then we must clear any recorded source lists,
937 * and transition to REPORTING state; the group timer is overloaded
938 * for group and group-source query responses.
939 *
940 * Unlike IGMPv3, the delay per group should be jittered
941 * to avoid bursts of IGMPv2 reports.
942 */
943static void
944igmp_v2_update_group(struct in_multi *inm, const int timer)
945{
946
947 CTR4(KTR_IGMPV3, "0x%08x: %s/%s timer=%d", __func__,
948 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname, timer);
949
951
952 switch (inm->inm_state) {
953 case IGMP_NOT_MEMBER:
955 break;
957 if (inm->inm_timer != 0 &&
958 inm->inm_timer <= timer) {
959 CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, "
960 "skipping.", __func__);
961 break;
962 }
963 /* FALLTHROUGH */
966 case IGMP_IDLE_MEMBER:
967 case IGMP_LAZY_MEMBER:
969 CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__);
971 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
973 break;
975 CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__);
977 break;
979 break;
980 }
981}
982
983/*
984 * Process a received IGMPv3 general, group-specific or
985 * group-and-source-specific query.
986 * Assumes m has already been pulled up to the full IGMP message length.
987 * Return 0 if successful, otherwise an appropriate error code is returned.
988 */
989static int
990igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip,
991 /*const*/ struct igmpv3 *igmpv3)
992{
993 struct igmp_ifsoftc *igi;
994 struct in_multi *inm;
995 int is_general_query;
996 uint32_t maxresp, nsrc, qqi;
997 uint16_t timer;
998 uint8_t qrv;
999
1000 is_general_query = 0;
1001
1002 CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname);
1003
1004 maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */
1005 if (maxresp >= 128) {
1006 maxresp = IGMP_MANT(igmpv3->igmp_code) <<
1007 (IGMP_EXP(igmpv3->igmp_code) + 3);
1008 }
1009
1010 /*
1011 * Robustness must never be less than 2 for on-wire IGMPv3.
1012 * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make
1013 * an exception for interfaces whose IGMPv3 state changes
1014 * are redirected to loopback (e.g. MANET).
1015 */
1016 qrv = IGMP_QRV(igmpv3->igmp_misc);
1017 if (qrv < 2) {
1018 CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__,
1019 qrv, IGMP_RV_INIT);
1020 qrv = IGMP_RV_INIT;
1021 }
1022
1023 qqi = igmpv3->igmp_qqi;
1024 if (qqi >= 128) {
1025 qqi = IGMP_MANT(igmpv3->igmp_qqi) <<
1026 (IGMP_EXP(igmpv3->igmp_qqi) + 3);
1027 }
1028
1029 timer = maxresp * PR_FASTHZ / IGMP_TIMER_SCALE;
1030 if (timer == 0)
1031 timer = 1;
1032
1033 nsrc = ntohs(igmpv3->igmp_numsrc);
1034
1035 /*
1036 * Validate address fields and versions upfront before
1037 * accepting v3 query.
1038 * XXX SMPng: Unlocked access to igmpstat counters here.
1039 */
1041 /*
1042 * IGMPv3 General Query.
1043 *
1044 * General Queries SHOULD be directed to 224.0.0.1.
1045 * A general query with a source list has undefined
1046 * behaviour; discard it.
1047 */
1048 IGMPSTAT_INC(igps_rcv_gen_queries);
1049 if (!in_allhosts(ip->ip_dst) || nsrc > 0) {
1050 IGMPSTAT_INC(igps_rcv_badqueries);
1051 return (0);
1052 }
1053 is_general_query = 1;
1054 } else {
1055 /* Group or group-source specific query. */
1056 if (nsrc == 0)
1057 IGMPSTAT_INC(igps_rcv_group_queries);
1058 else
1059 IGMPSTAT_INC(igps_rcv_gsr_queries);
1060 }
1061
1063 IGMP_LOCK();
1064
1065 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
1066 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
1067
1068 if (igi->igi_flags & IGIF_LOOPBACK) {
1069 CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)",
1070 ifp, ifp->if_xname);
1071 goto out_locked;
1072 }
1073
1074 /*
1075 * Discard the v3 query if we're in Compatibility Mode.
1076 * The RFC is not obviously worded that hosts need to stay in
1077 * compatibility mode until the Old Version Querier Present
1078 * timer expires.
1079 */
1080 if (igi->igi_version != IGMP_VERSION_3) {
1081 CTR3(KTR_IGMPV3, "ignore v3 query in v%d mode on ifp %p(%s)",
1082 igi->igi_version, ifp, ifp->if_xname);
1083 goto out_locked;
1084 }
1085
1087 igi->igi_rv = qrv;
1088 igi->igi_qi = qqi;
1089 igi->igi_qri = maxresp;
1090
1091 CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi,
1092 maxresp);
1093
1094 if (is_general_query) {
1095 /*
1096 * Schedule a current-state report on this ifp for
1097 * all groups, possibly containing source lists.
1098 * If there is a pending General Query response
1099 * scheduled earlier than the selected delay, do
1100 * not schedule any other reports.
1101 * Otherwise, reset the interface timer.
1102 */
1103 CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)",
1104 ifp, ifp->if_xname);
1105 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) {
1106 igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer);
1108 }
1109 } else {
1110 /*
1111 * Group-source-specific queries are throttled on
1112 * a per-group basis to defeat denial-of-service attempts.
1113 * Queries for groups we are not a member of on this
1114 * link are simply ignored.
1115 */
1116 inm = inm_lookup(ifp, igmpv3->igmp_group);
1117 if (inm == NULL)
1118 goto out_locked;
1119 if (nsrc > 0) {
1120 if (!ratecheck(&inm->inm_lastgsrtv,
1121 &V_igmp_gsrdelay)) {
1122 CTR1(KTR_IGMPV3, "%s: GS query throttled.",
1123 __func__);
1124 IGMPSTAT_INC(igps_drop_gsr_queries);
1125 goto out_locked;
1126 }
1127 }
1128 CTR3(KTR_IGMPV3, "process v3 0x%08x query on ifp %p(%s)",
1129 ntohl(igmpv3->igmp_group.s_addr), ifp, ifp->if_xname);
1130 /*
1131 * If there is a pending General Query response
1132 * scheduled sooner than the selected delay, no
1133 * further report need be scheduled.
1134 * Otherwise, prepare to respond to the
1135 * group-specific or group-and-source query.
1136 */
1137 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer)
1138 igmp_input_v3_group_query(inm, igi, timer, igmpv3);
1139 }
1140
1141out_locked:
1142 IGMP_UNLOCK();
1144
1145 return (0);
1146}
1147
1148/*
1149 * Process a received IGMPv3 group-specific or group-and-source-specific
1150 * query.
1151 * Return <0 if any error occurred. Currently this is ignored.
1152 */
1153static int
1155 int timer, /*const*/ struct igmpv3 *igmpv3)
1156{
1157 int retval;
1158 uint16_t nsrc;
1159
1162
1163 retval = 0;
1164
1165 switch (inm->inm_state) {
1166 case IGMP_NOT_MEMBER:
1167 case IGMP_SILENT_MEMBER:
1169 case IGMP_LAZY_MEMBER:
1171 case IGMP_IDLE_MEMBER:
1173 return (retval);
1174 break;
1178 break;
1179 }
1180
1181 nsrc = ntohs(igmpv3->igmp_numsrc);
1182
1183 /*
1184 * Deal with group-specific queries upfront.
1185 * If any group query is already pending, purge any recorded
1186 * source-list state if it exists, and schedule a query response
1187 * for this group-specific query.
1188 */
1189 if (nsrc == 0) {
1192 inm_clear_recorded(inm);
1193 timer = min(inm->inm_timer, timer);
1194 }
1196 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1198 return (retval);
1199 }
1200
1201 /*
1202 * Deal with the case where a group-and-source-specific query has
1203 * been received but a group-specific query is already pending.
1204 */
1206 timer = min(inm->inm_timer, timer);
1207 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1209 return (retval);
1210 }
1211
1212 /*
1213 * Finally, deal with the case where a group-and-source-specific
1214 * query has been received, where a response to a previous g-s-r
1215 * query exists, or none exists.
1216 * In this case, we need to parse the source-list which the Querier
1217 * has provided us with and check if we have any source list filter
1218 * entries at T1 for these sources. If we do not, there is no need
1219 * schedule a report and the query may be dropped.
1220 * If we do, we must record them and schedule a current-state
1221 * report for those sources.
1222 * FIXME: Handling source lists larger than 1 mbuf requires that
1223 * we pass the mbuf chain pointer down to this function, and use
1224 * m_getptr() to walk the chain.
1225 */
1226 if (inm->inm_nsrc > 0) {
1227 const struct in_addr *ap;
1228 int i, nrecorded;
1229
1230 ap = (const struct in_addr *)(igmpv3 + 1);
1231 nrecorded = 0;
1232 for (i = 0; i < nsrc; i++, ap++) {
1233 retval = inm_record_source(inm, ap->s_addr);
1234 if (retval < 0)
1235 break;
1236 nrecorded += retval;
1237 }
1238 if (nrecorded > 0) {
1239 CTR1(KTR_IGMPV3,
1240 "%s: schedule response to SG query", __func__);
1242 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1244 }
1245 }
1246
1247 return (retval);
1248}
1249
1250/*
1251 * Process a received IGMPv1 host membership report.
1252 *
1253 * NOTE: 0.0.0.0 workaround breaks const correctness.
1254 */
1255static int
1256igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1257 /*const*/ struct igmp *igmp)
1258{
1259 struct in_ifaddr *ia;
1260 struct in_multi *inm;
1261
1262 IGMPSTAT_INC(igps_rcv_reports);
1263
1264 if (ifp->if_flags & IFF_LOOPBACK)
1265 return (0);
1266
1267 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1269 IGMPSTAT_INC(igps_rcv_badreports);
1270 return (EINVAL);
1271 }
1272
1273 /*
1274 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1275 * Booting clients may use the source address 0.0.0.0. Some
1276 * IGMP daemons may not know how to use IP_RECVIF to determine
1277 * the interface upon which this message was received.
1278 * Replace 0.0.0.0 with the subnet address if told to do so.
1279 */
1280 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1281 IFP_TO_IA(ifp, ia);
1282 if (ia != NULL)
1283 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1284 }
1285
1286 CTR3(KTR_IGMPV3, "process v1 report 0x%08x on ifp %p(%s)",
1287 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1288
1289 /*
1290 * IGMPv1 report suppression.
1291 * If we are a member of this group, and our membership should be
1292 * reported, stop our group timer and transition to the 'lazy' state.
1293 */
1295 inm = inm_lookup(ifp, igmp->igmp_group);
1296 if (inm != NULL) {
1297 struct igmp_ifsoftc *igi;
1298
1299 igi = inm->inm_igi;
1300 if (igi == NULL) {
1301 KASSERT(igi != NULL,
1302 ("%s: no igi for ifp %p", __func__, ifp));
1303 goto out_locked;
1304 }
1305
1306 IGMPSTAT_INC(igps_rcv_ourreports);
1307
1308 /*
1309 * If we are in IGMPv3 host mode, do not allow the
1310 * other host's IGMPv1 report to suppress our reports
1311 * unless explicitly configured to do so.
1312 */
1313 if (igi->igi_version == IGMP_VERSION_3) {
1316 goto out_locked;
1317 }
1318
1319 inm->inm_timer = 0;
1320
1321 switch (inm->inm_state) {
1322 case IGMP_NOT_MEMBER:
1323 case IGMP_SILENT_MEMBER:
1324 break;
1325 case IGMP_IDLE_MEMBER:
1326 case IGMP_LAZY_MEMBER:
1328 CTR3(KTR_IGMPV3,
1329 "report suppressed for 0x%08x on ifp %p(%s)",
1330 ntohl(igmp->igmp_group.s_addr), ifp,
1331 ifp->if_xname);
1334 break;
1336 CTR3(KTR_IGMPV3,
1337 "report suppressed for 0x%08x on ifp %p(%s)",
1338 ntohl(igmp->igmp_group.s_addr), ifp,
1339 ifp->if_xname);
1340 if (igi->igi_version == IGMP_VERSION_1)
1342 else if (igi->igi_version == IGMP_VERSION_2)
1344 break;
1348 break;
1349 }
1350 }
1351
1352out_locked:
1354
1355 return (0);
1356}
1357
1358/*
1359 * Process a received IGMPv2 host membership report.
1360 *
1361 * NOTE: 0.0.0.0 workaround breaks const correctness.
1362 */
1363static int
1364igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1365 /*const*/ struct igmp *igmp)
1366{
1367 struct in_ifaddr *ia;
1368 struct in_multi *inm;
1369
1370 /*
1371 * Make sure we don't hear our own membership report. Fast
1372 * leave requires knowing that we are the only member of a
1373 * group.
1374 */
1375 IFP_TO_IA(ifp, ia);
1376 if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) {
1377 return (0);
1378 }
1379
1380 IGMPSTAT_INC(igps_rcv_reports);
1381
1382 if (ifp->if_flags & IFF_LOOPBACK) {
1383 return (0);
1384 }
1385
1386 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1388 IGMPSTAT_INC(igps_rcv_badreports);
1389 return (EINVAL);
1390 }
1391
1392 /*
1393 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1394 * Booting clients may use the source address 0.0.0.0. Some
1395 * IGMP daemons may not know how to use IP_RECVIF to determine
1396 * the interface upon which this message was received.
1397 * Replace 0.0.0.0 with the subnet address if told to do so.
1398 */
1399 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1400 if (ia != NULL)
1401 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1402 }
1403
1404 CTR3(KTR_IGMPV3, "process v2 report 0x%08x on ifp %p(%s)",
1405 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1406
1407 /*
1408 * IGMPv2 report suppression.
1409 * If we are a member of this group, and our membership should be
1410 * reported, and our group timer is pending or about to be reset,
1411 * stop our group timer by transitioning to the 'lazy' state.
1412 */
1414 inm = inm_lookup(ifp, igmp->igmp_group);
1415 if (inm != NULL) {
1416 struct igmp_ifsoftc *igi;
1417
1418 igi = inm->inm_igi;
1419 KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp));
1420
1421 IGMPSTAT_INC(igps_rcv_ourreports);
1422
1423 /*
1424 * If we are in IGMPv3 host mode, do not allow the
1425 * other host's IGMPv1 report to suppress our reports
1426 * unless explicitly configured to do so.
1427 */
1428 if (igi->igi_version == IGMP_VERSION_3) {
1431 goto out_locked;
1432 }
1433
1434 inm->inm_timer = 0;
1435
1436 switch (inm->inm_state) {
1437 case IGMP_NOT_MEMBER:
1438 case IGMP_SILENT_MEMBER:
1440 break;
1442 case IGMP_IDLE_MEMBER:
1444 CTR3(KTR_IGMPV3,
1445 "report suppressed for 0x%08x on ifp %p(%s)",
1446 ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1447 case IGMP_LAZY_MEMBER:
1449 break;
1453 break;
1454 }
1455 }
1456
1457out_locked:
1459
1460 return (0);
1461}
1462
1463int
1464igmp_input(struct mbuf **mp, int *offp, int proto)
1465{
1466 int iphlen;
1467 struct ifnet *ifp;
1468 struct igmp *igmp;
1469 struct ip *ip;
1470 struct mbuf *m;
1471 int igmplen;
1472 int minlen;
1473 int queryver;
1474
1475 CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, *mp, *offp);
1476
1477 m = *mp;
1478 ifp = m->m_pkthdr.rcvif;
1479 *mp = NULL;
1480
1481 IGMPSTAT_INC(igps_rcv_total);
1482
1483 ip = mtod(m, struct ip *);
1484 iphlen = *offp;
1485 igmplen = ntohs(ip->ip_len) - iphlen;
1486
1487 /*
1488 * Validate lengths.
1489 */
1490 if (igmplen < IGMP_MINLEN) {
1491 IGMPSTAT_INC(igps_rcv_tooshort);
1492 m_freem(m);
1493 return (IPPROTO_DONE);
1494 }
1495
1496 /*
1497 * Always pullup to the minimum size for v1/v2 or v3
1498 * to amortize calls to m_pullup().
1499 */
1500 minlen = iphlen;
1501 if (igmplen >= IGMP_V3_QUERY_MINLEN)
1502 minlen += IGMP_V3_QUERY_MINLEN;
1503 else
1504 minlen += IGMP_MINLEN;
1505 if ((!M_WRITABLE(m) || m->m_len < minlen) &&
1506 (m = m_pullup(m, minlen)) == NULL) {
1507 IGMPSTAT_INC(igps_rcv_tooshort);
1508 return (IPPROTO_DONE);
1509 }
1510 ip = mtod(m, struct ip *);
1511
1512 /*
1513 * Validate checksum.
1514 */
1515 m->m_data += iphlen;
1516 m->m_len -= iphlen;
1517 igmp = mtod(m, struct igmp *);
1518 if (in_cksum(m, igmplen)) {
1519 IGMPSTAT_INC(igps_rcv_badsum);
1520 m_freem(m);
1521 return (IPPROTO_DONE);
1522 }
1523 m->m_data -= iphlen;
1524 m->m_len += iphlen;
1525
1526 /*
1527 * IGMP control traffic is link-scope, and must have a TTL of 1.
1528 * DVMRP traffic (e.g. mrinfo, mtrace) is an exception;
1529 * probe packets may come from beyond the LAN.
1530 */
1531 if (igmp->igmp_type != IGMP_DVMRP && ip->ip_ttl != 1) {
1532 IGMPSTAT_INC(igps_rcv_badttl);
1533 m_freem(m);
1534 return (IPPROTO_DONE);
1535 }
1536
1537 switch (igmp->igmp_type) {
1539 if (igmplen == IGMP_MINLEN) {
1540 if (igmp->igmp_code == 0)
1541 queryver = IGMP_VERSION_1;
1542 else
1543 queryver = IGMP_VERSION_2;
1544 } else if (igmplen >= IGMP_V3_QUERY_MINLEN) {
1545 queryver = IGMP_VERSION_3;
1546 } else {
1547 IGMPSTAT_INC(igps_rcv_tooshort);
1548 m_freem(m);
1549 return (IPPROTO_DONE);
1550 }
1551
1552 switch (queryver) {
1553 case IGMP_VERSION_1:
1554 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1555 if (!V_igmp_v1enable)
1556 break;
1557 if (igmp_input_v1_query(ifp, ip, igmp) != 0) {
1558 m_freem(m);
1559 return (IPPROTO_DONE);
1560 }
1561 break;
1562
1563 case IGMP_VERSION_2:
1564 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1565 if (!V_igmp_v2enable)
1566 break;
1567 if (igmp_input_v2_query(ifp, ip, igmp) != 0) {
1568 m_freem(m);
1569 return (IPPROTO_DONE);
1570 }
1571 break;
1572
1573 case IGMP_VERSION_3: {
1574 struct igmpv3 *igmpv3;
1575 uint16_t igmpv3len;
1576 uint16_t nsrc;
1577
1578 IGMPSTAT_INC(igps_rcv_v3_queries);
1579 igmpv3 = (struct igmpv3 *)igmp;
1580 /*
1581 * Validate length based on source count.
1582 */
1583 nsrc = ntohs(igmpv3->igmp_numsrc);
1584 if (nsrc * sizeof(in_addr_t) >
1585 UINT16_MAX - iphlen - IGMP_V3_QUERY_MINLEN) {
1586 IGMPSTAT_INC(igps_rcv_tooshort);
1587 m_freem(m);
1588 return (IPPROTO_DONE);
1589 }
1590 /*
1591 * m_pullup() may modify m, so pullup in
1592 * this scope.
1593 */
1594 igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN +
1595 sizeof(struct in_addr) * nsrc;
1596 if ((!M_WRITABLE(m) ||
1597 m->m_len < igmpv3len) &&
1598 (m = m_pullup(m, igmpv3len)) == NULL) {
1599 IGMPSTAT_INC(igps_rcv_tooshort);
1600 return (IPPROTO_DONE);
1601 }
1602 igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *)
1603 + iphlen);
1604 if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) {
1605 m_freem(m);
1606 return (IPPROTO_DONE);
1607 }
1608 }
1609 break;
1610 }
1611 break;
1612
1614 if (!V_igmp_v1enable)
1615 break;
1616 if (igmp_input_v1_report(ifp, ip, igmp) != 0) {
1617 m_freem(m);
1618 return (IPPROTO_DONE);
1619 }
1620 break;
1621
1623 if (!V_igmp_v2enable)
1624 break;
1625 if (!ip_checkrouteralert(m))
1626 IGMPSTAT_INC(igps_rcv_nora);
1627 if (igmp_input_v2_report(ifp, ip, igmp) != 0) {
1628 m_freem(m);
1629 return (IPPROTO_DONE);
1630 }
1631 break;
1632
1634 /*
1635 * Hosts do not need to process IGMPv3 membership reports,
1636 * as report suppression is no longer required.
1637 */
1638 if (!ip_checkrouteralert(m))
1639 IGMPSTAT_INC(igps_rcv_nora);
1640 break;
1641
1642 default:
1643 break;
1644 }
1645
1646 /*
1647 * Pass all valid IGMP packets up to any process(es) listening on a
1648 * raw IGMP socket.
1649 */
1650 *mp = m;
1651 return (rip_input(mp, offp, proto));
1652}
1653
1654/*
1655 * Fast timeout handler (global).
1656 * VIMAGE: Timeout handlers are expected to service all vimages.
1657 */
1658void
1660{
1661 VNET_ITERATOR_DECL(vnet_iter);
1662
1663 VNET_LIST_RLOCK_NOSLEEP();
1664 VNET_FOREACH(vnet_iter) {
1665 CURVNET_SET(vnet_iter);
1667 CURVNET_RESTORE();
1668 }
1669 VNET_LIST_RUNLOCK_NOSLEEP();
1670}
1671
1672/*
1673 * Fast timeout handler (per-vnet).
1674 * Sends are shuffled off to a netisr to deal with Giant.
1675 *
1676 * VIMAGE: Assume caller has set up our curvnet.
1677 */
1678static void
1680{
1681 struct mbufq scq; /* State-change packets */
1682 struct mbufq qrq; /* Query response packets */
1683 struct ifnet *ifp;
1684 struct igmp_ifsoftc *igi;
1685 struct ifmultiaddr *ifma, *next;
1686 struct in_multi *inm;
1687 struct in_multi_head inm_free_tmp;
1688 int loop, uri_fasthz;
1689
1690 loop = 0;
1691 uri_fasthz = 0;
1692
1693 /*
1694 * Quick check to see if any work needs to be done, in order to
1695 * minimize the overhead of fasttimo processing.
1696 * SMPng: XXX Unlocked reads.
1697 */
1701 return;
1702
1703 SLIST_INIT(&inm_free_tmp);
1705 IGMP_LOCK();
1706
1707 /*
1708 * IGMPv3 General Query response timer processing.
1709 */
1711 CTR1(KTR_IGMPV3, "%s: interface timers running", __func__);
1712
1714 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1715 if (igi->igi_v3_timer == 0) {
1716 /* Do nothing. */
1717 } else if (--igi->igi_v3_timer == 0) {
1719 } else {
1721 }
1722 }
1723 }
1724
1727 goto out_locked;
1728
1731
1732 CTR1(KTR_IGMPV3, "%s: state change timers running", __func__);
1733
1734 /*
1735 * IGMPv1/v2/v3 host report and state-change timer processing.
1736 * Note: Processing a v3 group timer may remove a node.
1737 */
1738 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1739 ifp = igi->igi_ifp;
1740
1741 if (igi->igi_version == IGMP_VERSION_3) {
1742 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
1743 uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri *
1744 PR_FASTHZ);
1745 mbufq_init(&qrq, IGMP_MAX_G_GS_PACKETS);
1746 mbufq_init(&scq, IGMP_MAX_STATE_CHANGE_PACKETS);
1747 }
1748
1749 IF_ADDR_WLOCK(ifp);
1750 restart:
1751 CK_STAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) {
1752 if (ifma->ifma_addr->sa_family != AF_INET ||
1753 ifma->ifma_protospec == NULL)
1754 continue;
1755 inm = (struct in_multi *)ifma->ifma_protospec;
1756 switch (igi->igi_version) {
1757 case IGMP_VERSION_1:
1758 case IGMP_VERSION_2:
1760 igi->igi_version);
1761 break;
1762 case IGMP_VERSION_3:
1763 igmp_v3_process_group_timers(&inm_free_tmp, &qrq,
1764 &scq, inm, uri_fasthz);
1765 break;
1766 }
1767 if (__predict_false(ifma_restart)) {
1768 ifma_restart = false;
1769 goto restart;
1770 }
1771 }
1772 IF_ADDR_WUNLOCK(ifp);
1773
1774 if (igi->igi_version == IGMP_VERSION_3) {
1775 igmp_dispatch_queue(&qrq, 0, loop);
1776 igmp_dispatch_queue(&scq, 0, loop);
1777
1778 /*
1779 * Free the in_multi reference(s) for this
1780 * IGMP lifecycle.
1781 */
1782 inm_release_list_deferred(&inm_free_tmp);
1783 }
1784 }
1785
1786out_locked:
1787 IGMP_UNLOCK();
1789}
1790
1791/*
1792 * Update host report group timer for IGMPv1/v2.
1793 * Will update the global pending timer flags.
1794 */
1795static void
1796igmp_v1v2_process_group_timer(struct in_multi *inm, const int version)
1797{
1798 int report_timer_expired;
1799
1802
1803 if (inm->inm_timer == 0) {
1804 report_timer_expired = 0;
1805 } else if (--inm->inm_timer == 0) {
1806 report_timer_expired = 1;
1807 } else {
1809 return;
1810 }
1811
1812 switch (inm->inm_state) {
1813 case IGMP_NOT_MEMBER:
1814 case IGMP_SILENT_MEMBER:
1815 case IGMP_IDLE_MEMBER:
1816 case IGMP_LAZY_MEMBER:
1819 break;
1821 if (report_timer_expired) {
1823 (void)igmp_v1v2_queue_report(inm,
1824 (version == IGMP_VERSION_2) ?
1827 }
1828 break;
1832 break;
1833 }
1834}
1835
1836/*
1837 * Update a group's timers for IGMPv3.
1838 * Will update the global pending timer flags.
1839 * Note: Unlocked read from igi.
1840 */
1841static void
1842igmp_v3_process_group_timers(struct in_multi_head *inmh,
1843 struct mbufq *qrq, struct mbufq *scq,
1844 struct in_multi *inm, const int uri_fasthz)
1845{
1846 int query_response_timer_expired;
1847 int state_change_retransmit_timer_expired;
1848
1851
1852 query_response_timer_expired = 0;
1853 state_change_retransmit_timer_expired = 0;
1854
1855 /*
1856 * During a transition from v1/v2 compatibility mode back to v3,
1857 * a group record in REPORTING state may still have its group
1858 * timer active. This is a no-op in this function; it is easier
1859 * to deal with it here than to complicate the slow-timeout path.
1860 */
1861 if (inm->inm_timer == 0) {
1862 query_response_timer_expired = 0;
1863 } else if (--inm->inm_timer == 0) {
1864 query_response_timer_expired = 1;
1865 } else {
1867 }
1868
1869 if (inm->inm_sctimer == 0) {
1870 state_change_retransmit_timer_expired = 0;
1871 } else if (--inm->inm_sctimer == 0) {
1872 state_change_retransmit_timer_expired = 1;
1873 } else {
1875 }
1876
1877 /* We are in fasttimo, so be quick about it. */
1878 if (!state_change_retransmit_timer_expired &&
1879 !query_response_timer_expired)
1880 return;
1881
1882 switch (inm->inm_state) {
1883 case IGMP_NOT_MEMBER:
1884 case IGMP_SILENT_MEMBER:
1886 case IGMP_LAZY_MEMBER:
1888 case IGMP_IDLE_MEMBER:
1889 break;
1892 /*
1893 * Respond to a previously pending Group-Specific
1894 * or Group-and-Source-Specific query by enqueueing
1895 * the appropriate Current-State report for
1896 * immediate transmission.
1897 */
1898 if (query_response_timer_expired) {
1899 int retval __unused;
1900
1901 retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1,
1903 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
1904 __func__, retval);
1906 /* XXX Clear recorded sources for next time. */
1907 inm_clear_recorded(inm);
1908 }
1909 /* FALLTHROUGH */
1912 if (state_change_retransmit_timer_expired) {
1913 /*
1914 * State-change retransmission timer fired.
1915 * If there are any further pending retransmissions,
1916 * set the global pending state-change flag, and
1917 * reset the timer.
1918 */
1919 if (--inm->inm_scrv > 0) {
1920 inm->inm_sctimer = uri_fasthz;
1922 }
1923 /*
1924 * Retransmit the previously computed state-change
1925 * report. If there are no further pending
1926 * retransmissions, the mbuf queue will be consumed.
1927 * Update T0 state to T1 as we have now sent
1928 * a state-change.
1929 */
1930 (void)igmp_v3_merge_state_changes(inm, scq);
1931
1932 inm_commit(inm);
1933 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
1934 ntohl(inm->inm_addr.s_addr),
1935 inm->inm_ifp->if_xname);
1936
1937 /*
1938 * If we are leaving the group for good, make sure
1939 * we release IGMP's reference to it.
1940 * This release must be deferred using a SLIST,
1941 * as we are called from a loop which traverses
1942 * the in_ifmultiaddr TAILQ.
1943 */
1944 if (inm->inm_state == IGMP_LEAVING_MEMBER &&
1945 inm->inm_scrv == 0) {
1947 inm_rele_locked(inmh, inm);
1948 }
1949 }
1950 break;
1951 }
1952}
1953
1954/*
1955 * Suppress a group's pending response to a group or source/group query.
1956 *
1957 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency.
1958 * Do NOT update ST1/ST0 as this operation merely suppresses
1959 * the currently pending group record.
1960 * Do NOT suppress the response to a general query. It is possible but
1961 * it would require adding another state or flag.
1962 */
1963static void
1965{
1966
1968
1969 KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3,
1970 ("%s: not IGMPv3 mode on link", __func__));
1971
1974 return;
1975
1977 inm_clear_recorded(inm);
1978
1979 inm->inm_timer = 0;
1981}
1982
1983/*
1984 * Switch to a different IGMP version on the given interface,
1985 * as per Section 7.2.1.
1986 */
1987static void
1988igmp_set_version(struct igmp_ifsoftc *igi, const int version)
1989{
1990 int old_version_timer;
1991
1993
1994 CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__,
1995 version, igi->igi_ifp, igi->igi_ifp->if_xname);
1996
1997 if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) {
1998 /*
1999 * Compute the "Older Version Querier Present" timer as per
2000 * Section 8.12.
2001 */
2002 old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri;
2003 old_version_timer *= PR_SLOWHZ;
2004
2005 if (version == IGMP_VERSION_1) {
2006 igi->igi_v1_timer = old_version_timer;
2007 igi->igi_v2_timer = 0;
2008 } else if (version == IGMP_VERSION_2) {
2009 igi->igi_v1_timer = 0;
2010 igi->igi_v2_timer = old_version_timer;
2011 }
2012 }
2013
2014 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2015 if (igi->igi_version != IGMP_VERSION_2) {
2018 }
2019 } else if (igi->igi_v1_timer > 0) {
2020 if (igi->igi_version != IGMP_VERSION_1) {
2023 }
2024 }
2025}
2026
2027/*
2028 * Cancel pending IGMPv3 timers for the given link and all groups
2029 * joined on it; state-change, general-query, and group-query timers.
2030 *
2031 * Only ever called on a transition from v3 to Compatibility mode. Kill
2032 * the timers stone dead (this may be expensive for large N groups), they
2033 * will be restarted if Compatibility Mode deems that they must be due to
2034 * query processing.
2035 */
2036static void
2038{
2039 struct ifmultiaddr *ifma, *ifmatmp;
2040 struct ifnet *ifp;
2041 struct in_multi *inm;
2042 struct in_multi_head inm_free_tmp;
2043
2044 CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__,
2045 igi->igi_ifp, igi->igi_ifp->if_xname);
2046
2049 NET_EPOCH_ASSERT();
2050
2051 SLIST_INIT(&inm_free_tmp);
2052
2053 /*
2054 * Stop the v3 General Query Response on this link stone dead.
2055 * If fasttimo is woken up due to V_interface_timers_running,
2056 * the flag will be cleared if there are no pending link timers.
2057 */
2058 igi->igi_v3_timer = 0;
2059
2060 /*
2061 * Now clear the current-state and state-change report timers
2062 * for all memberships scoped to this link.
2063 */
2064 ifp = igi->igi_ifp;
2065 IF_ADDR_WLOCK(ifp);
2066 CK_STAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, ifmatmp) {
2067 if (ifma->ifma_addr->sa_family != AF_INET ||
2068 ifma->ifma_protospec == NULL)
2069 continue;
2070 inm = (struct in_multi *)ifma->ifma_protospec;
2071 switch (inm->inm_state) {
2072 case IGMP_NOT_MEMBER:
2073 case IGMP_SILENT_MEMBER:
2074 case IGMP_IDLE_MEMBER:
2075 case IGMP_LAZY_MEMBER:
2078 /*
2079 * These states are either not relevant in v3 mode,
2080 * or are unreported. Do nothing.
2081 */
2082 break;
2084 /*
2085 * If we are leaving the group and switching to
2086 * compatibility mode, we need to release the final
2087 * reference held for issuing the INCLUDE {}, and
2088 * transition to REPORTING to ensure the host leave
2089 * message is sent upstream to the old querier --
2090 * transition to NOT would lose the leave and race.
2091 */
2092 inm_rele_locked(&inm_free_tmp, inm);
2093 /* FALLTHROUGH */
2096 inm_clear_recorded(inm);
2097 /* FALLTHROUGH */
2100 break;
2101 }
2102 /*
2103 * Always clear state-change and group report timers.
2104 * Free any pending IGMPv3 state-change records.
2105 */
2106 inm->inm_sctimer = 0;
2107 inm->inm_timer = 0;
2108 mbufq_drain(&inm->inm_scq);
2109 }
2110 IF_ADDR_WUNLOCK(ifp);
2111
2112 inm_release_list_deferred(&inm_free_tmp);
2113}
2114
2115/*
2116 * Update the Older Version Querier Present timers for a link.
2117 * See Section 7.2.1 of RFC 3376.
2118 */
2119static void
2121{
2122
2124
2125 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) {
2126 /*
2127 * IGMPv1 and IGMPv2 Querier Present timers expired.
2128 *
2129 * Revert to IGMPv3.
2130 */
2131 if (igi->igi_version != IGMP_VERSION_3) {
2132 CTR5(KTR_IGMPV3,
2133 "%s: transition from v%d -> v%d on %p(%s)",
2134 __func__, igi->igi_version, IGMP_VERSION_3,
2135 igi->igi_ifp, igi->igi_ifp->if_xname);
2137 }
2138 } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2139 /*
2140 * IGMPv1 Querier Present timer expired,
2141 * IGMPv2 Querier Present timer running.
2142 * If IGMPv2 was disabled since last timeout,
2143 * revert to IGMPv3.
2144 * If IGMPv2 is enabled, revert to IGMPv2.
2145 */
2146 if (!V_igmp_v2enable) {
2147 CTR5(KTR_IGMPV3,
2148 "%s: transition from v%d -> v%d on %p(%s)",
2149 __func__, igi->igi_version, IGMP_VERSION_3,
2150 igi->igi_ifp, igi->igi_ifp->if_xname);
2151 igi->igi_v2_timer = 0;
2153 } else {
2154 --igi->igi_v2_timer;
2155 if (igi->igi_version != IGMP_VERSION_2) {
2156 CTR5(KTR_IGMPV3,
2157 "%s: transition from v%d -> v%d on %p(%s)",
2158 __func__, igi->igi_version, IGMP_VERSION_2,
2159 igi->igi_ifp, igi->igi_ifp->if_xname);
2162 }
2163 }
2164 } else if (igi->igi_v1_timer > 0) {
2165 /*
2166 * IGMPv1 Querier Present timer running.
2167 * Stop IGMPv2 timer if running.
2168 *
2169 * If IGMPv1 was disabled since last timeout,
2170 * revert to IGMPv3.
2171 * If IGMPv1 is enabled, reset IGMPv2 timer if running.
2172 */
2173 if (!V_igmp_v1enable) {
2174 CTR5(KTR_IGMPV3,
2175 "%s: transition from v%d -> v%d on %p(%s)",
2176 __func__, igi->igi_version, IGMP_VERSION_3,
2177 igi->igi_ifp, igi->igi_ifp->if_xname);
2178 igi->igi_v1_timer = 0;
2180 } else {
2181 --igi->igi_v1_timer;
2182 }
2183 if (igi->igi_v2_timer > 0) {
2184 CTR3(KTR_IGMPV3,
2185 "%s: cancel v2 timer on %p(%s)",
2186 __func__, igi->igi_ifp, igi->igi_ifp->if_xname);
2187 igi->igi_v2_timer = 0;
2188 }
2189 }
2190}
2191
2192/*
2193 * Global slowtimo handler.
2194 * VIMAGE: Timeout handlers are expected to service all vimages.
2195 */
2196void
2198{
2199 VNET_ITERATOR_DECL(vnet_iter);
2200
2201 VNET_LIST_RLOCK_NOSLEEP();
2202 VNET_FOREACH(vnet_iter) {
2203 CURVNET_SET(vnet_iter);
2205 CURVNET_RESTORE();
2206 }
2207 VNET_LIST_RUNLOCK_NOSLEEP();
2208}
2209
2210/*
2211 * Per-vnet slowtimo handler.
2212 */
2213static void
2215{
2216 struct igmp_ifsoftc *igi;
2217
2218 IGMP_LOCK();
2219
2220 LIST_FOREACH(igi, &V_igi_head, igi_link) {
2222 }
2223
2224 IGMP_UNLOCK();
2225}
2226
2227/*
2228 * Dispatch an IGMPv1/v2 host report or leave message.
2229 * These are always small enough to fit inside a single mbuf.
2230 */
2231static int
2232igmp_v1v2_queue_report(struct in_multi *inm, const int type)
2233{
2234 struct epoch_tracker et;
2235 struct ifnet *ifp;
2236 struct igmp *igmp;
2237 struct ip *ip;
2238 struct mbuf *m;
2239
2242
2243 ifp = inm->inm_ifp;
2244
2245 m = m_gethdr(M_NOWAIT, MT_DATA);
2246 if (m == NULL)
2247 return (ENOMEM);
2248 M_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp));
2249
2250 m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp);
2251
2252 m->m_data += sizeof(struct ip);
2253 m->m_len = sizeof(struct igmp);
2254
2255 igmp = mtod(m, struct igmp *);
2256 igmp->igmp_type = type;
2257 igmp->igmp_code = 0;
2258 igmp->igmp_group = inm->inm_addr;
2259 igmp->igmp_cksum = 0;
2260 igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp));
2261
2262 m->m_data -= sizeof(struct ip);
2263 m->m_len += sizeof(struct ip);
2264
2265 ip = mtod(m, struct ip *);
2266 ip->ip_tos = 0;
2267 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct igmp));
2268 ip->ip_off = 0;
2269 ip->ip_p = IPPROTO_IGMP;
2270 ip->ip_src.s_addr = INADDR_ANY;
2271
2272 if (type == IGMP_HOST_LEAVE_MESSAGE)
2273 ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP);
2274 else
2275 ip->ip_dst = inm->inm_addr;
2276
2277 igmp_save_context(m, ifp);
2278
2279 m->m_flags |= M_IGMPV2;
2280 if (inm->inm_igi->igi_flags & IGIF_LOOPBACK)
2281 m->m_flags |= M_IGMP_LOOP;
2282
2283 CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m);
2284 NET_EPOCH_ENTER(et);
2285 netisr_dispatch(NETISR_IGMP, m);
2286 NET_EPOCH_EXIT(et);
2287
2288 return (0);
2289}
2290
2291/*
2292 * Process a state change from the upper layer for the given IPv4 group.
2293 *
2294 * Each socket holds a reference on the in_multi in its own ip_moptions.
2295 * The socket layer will have made the necessary updates to.the group
2296 * state, it is now up to IGMP to issue a state change report if there
2297 * has been any change between T0 (when the last state-change was issued)
2298 * and T1 (now).
2299 *
2300 * We use the IGMPv3 state machine at group level. The IGMP module
2301 * however makes the decision as to which IGMP protocol version to speak.
2302 * A state change *from* INCLUDE {} always means an initial join.
2303 * A state change *to* INCLUDE {} always means a final leave.
2304 *
2305 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can
2306 * save ourselves a bunch of work; any exclusive mode groups need not
2307 * compute source filter lists.
2308 *
2309 * VIMAGE: curvnet should have been set by caller, as this routine
2310 * is called from the socket option handlers.
2311 */
2312int
2314{
2315 struct igmp_ifsoftc *igi;
2316 struct ifnet *ifp;
2317 int error;
2318
2319 error = 0;
2321 /*
2322 * Try to detect if the upper layer just asked us to change state
2323 * for an interface which has now gone away.
2324 */
2325 KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__));
2326 ifp = inm->inm_ifma->ifma_ifp;
2327 /*
2328 * Sanity check that netinet's notion of ifp is the
2329 * same as net's.
2330 */
2331 KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__));
2332
2333 IGMP_LOCK();
2334
2335 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
2336 KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
2337
2338 /*
2339 * If we detect a state transition to or from MCAST_UNDEFINED
2340 * for this group, then we are starting or finishing an IGMP
2341 * life cycle for this group.
2342 */
2343 if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) {
2344 CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__,
2345 inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode);
2346 if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) {
2347 CTR1(KTR_IGMPV3, "%s: initial join", __func__);
2348 error = igmp_initial_join(inm, igi);
2349 goto out_locked;
2350 } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) {
2351 CTR1(KTR_IGMPV3, "%s: final leave", __func__);
2352 igmp_final_leave(inm, igi);
2353 goto out_locked;
2354 }
2355 } else {
2356 CTR1(KTR_IGMPV3, "%s: filter set change", __func__);
2357 }
2358
2359 error = igmp_handle_state_change(inm, igi);
2360
2361out_locked:
2362 IGMP_UNLOCK();
2363 return (error);
2364}
2365
2366/*
2367 * Perform the initial join for an IGMP group.
2368 *
2369 * When joining a group:
2370 * If the group should have its IGMP traffic suppressed, do nothing.
2371 * IGMPv1 starts sending IGMPv1 host membership reports.
2372 * IGMPv2 starts sending IGMPv2 host membership reports.
2373 * IGMPv3 will schedule an IGMPv3 state-change report containing the
2374 * initial state of the membership.
2375 */
2376static int
2377igmp_initial_join(struct in_multi *inm, struct igmp_ifsoftc *igi)
2378{
2379 struct ifnet *ifp;
2380 struct mbufq *mq;
2381 int error, retval, syncstates;
2382
2383 CTR4(KTR_IGMPV3, "%s: initial join 0x%08x on ifp %p(%s)", __func__,
2384 ntohl(inm->inm_addr.s_addr), inm->inm_ifp, inm->inm_ifp->if_xname);
2385
2386 error = 0;
2387 syncstates = 1;
2388
2389 ifp = inm->inm_ifp;
2390
2393
2394 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2395
2396 /*
2397 * Groups joined on loopback or marked as 'not reported',
2398 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and
2399 * are never reported in any IGMP protocol exchanges.
2400 * All other groups enter the appropriate IGMP state machine
2401 * for the version in use on this link.
2402 * A link marked as IGIF_SILENT causes IGMP to be completely
2403 * disabled for the link.
2404 */
2405 if ((ifp->if_flags & IFF_LOOPBACK) ||
2406 (igi->igi_flags & IGIF_SILENT) ||
2408 CTR1(KTR_IGMPV3,
2409"%s: not kicking state machine for silent group", __func__);
2411 inm->inm_timer = 0;
2412 } else {
2413 /*
2414 * Deal with overlapping in_multi lifecycle.
2415 * If this group was LEAVING, then make sure
2416 * we drop the reference we picked up to keep the
2417 * group around for the final INCLUDE {} enqueue.
2418 */
2419 if (igi->igi_version == IGMP_VERSION_3 &&
2421 MPASS(inm->inm_refcount > 1);
2422 inm_rele_locked(NULL, inm);
2423 }
2425
2426 switch (igi->igi_version) {
2427 case IGMP_VERSION_1:
2428 case IGMP_VERSION_2:
2430 error = igmp_v1v2_queue_report(inm,
2431 (igi->igi_version == IGMP_VERSION_2) ?
2434 if (error == 0) {
2436 IGMP_V1V2_MAX_RI * PR_FASTHZ);
2438 }
2439 break;
2440
2441 case IGMP_VERSION_3:
2442 /*
2443 * Defer update of T0 to T1, until the first copy
2444 * of the state change has been transmitted.
2445 */
2446 syncstates = 0;
2447
2448 /*
2449 * Immediately enqueue a State-Change Report for
2450 * this interface, freeing any previous reports.
2451 * Don't kick the timers if there is nothing to do,
2452 * or if an error occurred.
2453 */
2454 mq = &inm->inm_scq;
2455 mbufq_drain(mq);
2456 retval = igmp_v3_enqueue_group_record(mq, inm, 1,
2457 0, 0);
2458 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
2459 __func__, retval);
2460 if (retval <= 0) {
2461 error = retval * -1;
2462 break;
2463 }
2464
2465 /*
2466 * Schedule transmission of pending state-change
2467 * report up to RV times for this link. The timer
2468 * will fire at the next igmp_fasttimo (~200ms),
2469 * giving us an opportunity to merge the reports.
2470 */
2471 if (igi->igi_flags & IGIF_LOOPBACK) {
2472 inm->inm_scrv = 1;
2473 } else {
2474 KASSERT(igi->igi_rv > 1,
2475 ("%s: invalid robustness %d", __func__,
2476 igi->igi_rv));
2477 inm->inm_scrv = igi->igi_rv;
2478 }
2479 inm->inm_sctimer = 1;
2481
2482 error = 0;
2483 break;
2484 }
2485 }
2486
2487 /*
2488 * Only update the T0 state if state change is atomic,
2489 * i.e. we don't need to wait for a timer to fire before we
2490 * can consider the state change to have been communicated.
2491 */
2492 if (syncstates) {
2493 inm_commit(inm);
2494 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2495 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2496 }
2497
2498 return (error);
2499}
2500
2501/*
2502 * Issue an intermediate state change during the IGMP life-cycle.
2503 */
2504static int
2506{
2507 struct ifnet *ifp;
2508 int retval;
2509
2510 CTR4(KTR_IGMPV3, "%s: state change for 0x%08x on ifp %p(%s)", __func__,
2511 ntohl(inm->inm_addr.s_addr), inm->inm_ifp, inm->inm_ifp->if_xname);
2512
2513 ifp = inm->inm_ifp;
2514
2517
2518 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2519
2520 if ((ifp->if_flags & IFF_LOOPBACK) ||
2521 (igi->igi_flags & IGIF_SILENT) ||
2523 (igi->igi_version != IGMP_VERSION_3)) {
2524 if (!igmp_isgroupreported(inm->inm_addr)) {
2525 CTR1(KTR_IGMPV3,
2526"%s: not kicking state machine for silent group", __func__);
2527 }
2528 CTR1(KTR_IGMPV3, "%s: nothing to do", __func__);
2529 inm_commit(inm);
2530 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2531 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2532 return (0);
2533 }
2534
2535 mbufq_drain(&inm->inm_scq);
2536
2537 retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0);
2538 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval);
2539 if (retval <= 0)
2540 return (-retval);
2541
2542 /*
2543 * If record(s) were enqueued, start the state-change
2544 * report timer for this group.
2545 */
2546 inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv);
2547 inm->inm_sctimer = 1;
2549
2550 return (0);
2551}
2552
2553/*
2554 * Perform the final leave for an IGMP group.
2555 *
2556 * When leaving a group:
2557 * IGMPv1 does nothing.
2558 * IGMPv2 sends a host leave message, if and only if we are the reporter.
2559 * IGMPv3 enqueues a state-change report containing a transition
2560 * to INCLUDE {} for immediate transmission.
2561 */
2562static void
2563igmp_final_leave(struct in_multi *inm, struct igmp_ifsoftc *igi)
2564{
2565 int syncstates;
2566
2567 syncstates = 1;
2568
2569 CTR4(KTR_IGMPV3, "%s: final leave 0x%08x on ifp %p(%s)",
2570 __func__, ntohl(inm->inm_addr.s_addr), inm->inm_ifp,
2571 inm->inm_ifp->if_xname);
2572
2575
2576 switch (inm->inm_state) {
2577 case IGMP_NOT_MEMBER:
2578 case IGMP_SILENT_MEMBER:
2580 /* Already leaving or left; do nothing. */
2581 CTR1(KTR_IGMPV3,
2582"%s: not kicking state machine for silent group", __func__);
2583 break;
2585 case IGMP_IDLE_MEMBER:
2588 if (igi->igi_version == IGMP_VERSION_2) {
2589#ifdef INVARIANTS
2592 panic("%s: IGMPv3 state reached, not IGMPv3 mode",
2593 __func__);
2594#endif
2597 } else if (igi->igi_version == IGMP_VERSION_3) {
2598 /*
2599 * Stop group timer and all pending reports.
2600 * Immediately enqueue a state-change report
2601 * TO_IN {} to be sent on the next fast timeout,
2602 * giving us an opportunity to merge reports.
2603 */
2604 mbufq_drain(&inm->inm_scq);
2605 inm->inm_timer = 0;
2606 if (igi->igi_flags & IGIF_LOOPBACK) {
2607 inm->inm_scrv = 1;
2608 } else {
2609 inm->inm_scrv = igi->igi_rv;
2610 }
2611 CTR4(KTR_IGMPV3, "%s: Leaving 0x%08x/%s with %d "
2612 "pending retransmissions.", __func__,
2613 ntohl(inm->inm_addr.s_addr),
2614 inm->inm_ifp->if_xname, inm->inm_scrv);
2615 if (inm->inm_scrv == 0) {
2617 inm->inm_sctimer = 0;
2618 } else {
2619 int retval __unused;
2620
2621 inm_acquire_locked(inm);
2622
2624 &inm->inm_scq, inm, 1, 0, 0);
2625 KASSERT(retval != 0,
2626 ("%s: enqueue record = %d", __func__,
2627 retval));
2628
2630 inm->inm_sctimer = 1;
2632 syncstates = 0;
2633 }
2634 break;
2635 }
2636 break;
2637 case IGMP_LAZY_MEMBER:
2640 /* Our reports are suppressed; do nothing. */
2641 break;
2642 }
2643
2644 if (syncstates) {
2645 inm_commit(inm);
2646 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2647 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2648 inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
2649 CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for 0x%08x/%s",
2650 __func__, ntohl(inm->inm_addr.s_addr),
2651 inm->inm_ifp->if_xname);
2652 }
2653}
2654
2655/*
2656 * Enqueue an IGMPv3 group record to the given output queue.
2657 *
2658 * XXX This function could do with having the allocation code
2659 * split out, and the multiple-tree-walks coalesced into a single
2660 * routine as has been done in igmp_v3_enqueue_filter_change().
2661 *
2662 * If is_state_change is zero, a current-state record is appended.
2663 * If is_state_change is non-zero, a state-change report is appended.
2664 *
2665 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2666 * If is_group_query is zero, and if there is a packet with free space
2667 * at the tail of the queue, it will be appended to providing there
2668 * is enough free space.
2669 * Otherwise a new mbuf packet chain is allocated.
2670 *
2671 * If is_source_query is non-zero, each source is checked to see if
2672 * it was recorded for a Group-Source query, and will be omitted if
2673 * it is not both in-mode and recorded.
2674 *
2675 * The function will attempt to allocate leading space in the packet
2676 * for the IP/IGMP header to be prepended without fragmenting the chain.
2677 *
2678 * If successful the size of all data appended to the queue is returned,
2679 * otherwise an error code less than zero is returned, or zero if
2680 * no record(s) were appended.
2681 */
2682static int
2683igmp_v3_enqueue_group_record(struct mbufq *mq, struct in_multi *inm,
2684 const int is_state_change, const int is_group_query,
2685 const int is_source_query)
2686{
2687 struct igmp_grouprec ig;
2688 struct igmp_grouprec *pig;
2689 struct ifnet *ifp;
2690 struct ip_msource *ims, *nims;
2691 struct mbuf *m0, *m, *md;
2692 int is_filter_list_change;
2693 int minrec0len, m0srcs, msrcs, nbytes, off;
2694 int record_has_sources;
2695 int now;
2696 int type;
2697 in_addr_t naddr;
2698 uint8_t mode;
2699
2701
2702 ifp = inm->inm_ifp;
2703 is_filter_list_change = 0;
2704 m = NULL;
2705 m0 = NULL;
2706 m0srcs = 0;
2707 msrcs = 0;
2708 nbytes = 0;
2709 nims = NULL;
2710 record_has_sources = 1;
2711 pig = NULL;
2712 type = IGMP_DO_NOTHING;
2713 mode = inm->inm_st[1].iss_fmode;
2714
2715 /*
2716 * If we did not transition out of ASM mode during t0->t1,
2717 * and there are no source nodes to process, we can skip
2718 * the generation of source records.
2719 */
2720 if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 &&
2721 inm->inm_nsrc == 0)
2722 record_has_sources = 0;
2723
2724 if (is_state_change) {
2725 /*
2726 * Queue a state change record.
2727 * If the mode did not change, and there are non-ASM
2728 * listeners or source filters present,
2729 * we potentially need to issue two records for the group.
2730 * If we are transitioning to MCAST_UNDEFINED, we need
2731 * not send any sources.
2732 * If there are ASM listeners, and there was no filter
2733 * mode transition of any kind, do nothing.
2734 */
2735 if (mode != inm->inm_st[0].iss_fmode) {
2736 if (mode == MCAST_EXCLUDE) {
2737 CTR1(KTR_IGMPV3, "%s: change to EXCLUDE",
2738 __func__);
2740 } else {
2741 CTR1(KTR_IGMPV3, "%s: change to INCLUDE",
2742 __func__);
2744 if (mode == MCAST_UNDEFINED)
2745 record_has_sources = 0;
2746 }
2747 } else {
2748 if (record_has_sources) {
2749 is_filter_list_change = 1;
2750 } else {
2751 type = IGMP_DO_NOTHING;
2752 }
2753 }
2754 } else {
2755 /*
2756 * Queue a current state record.
2757 */
2758 if (mode == MCAST_EXCLUDE) {
2759 type = IGMP_MODE_IS_EXCLUDE;
2760 } else if (mode == MCAST_INCLUDE) {
2761 type = IGMP_MODE_IS_INCLUDE;
2762 KASSERT(inm->inm_st[1].iss_asm == 0,
2763 ("%s: inm %p is INCLUDE but ASM count is %d",
2764 __func__, inm, inm->inm_st[1].iss_asm));
2765 }
2766 }
2767
2768 /*
2769 * Generate the filter list changes using a separate function.
2770 */
2771 if (is_filter_list_change)
2772 return (igmp_v3_enqueue_filter_change(mq, inm));
2773
2774 if (type == IGMP_DO_NOTHING) {
2775 CTR3(KTR_IGMPV3, "%s: nothing to do for 0x%08x/%s", __func__,
2776 ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2777 return (0);
2778 }
2779
2780 /*
2781 * If any sources are present, we must be able to fit at least
2782 * one in the trailing space of the tail packet's mbuf,
2783 * ideally more.
2784 */
2785 minrec0len = sizeof(struct igmp_grouprec);
2786 if (record_has_sources)
2787 minrec0len += sizeof(in_addr_t);
2788
2789 CTR4(KTR_IGMPV3, "%s: queueing %s for 0x%08x/%s", __func__,
2790 igmp_rec_type_to_str(type), ntohl(inm->inm_addr.s_addr),
2791 inm->inm_ifp->if_xname);
2792
2793 /*
2794 * Check if we have a packet in the tail of the queue for this
2795 * group into which the first group record for this group will fit.
2796 * Otherwise allocate a new packet.
2797 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT.
2798 * Note: Group records for G/GSR query responses MUST be sent
2799 * in their own packet.
2800 */
2801 m0 = mbufq_last(mq);
2802 if (!is_group_query &&
2803 m0 != NULL &&
2804 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) &&
2805 (m0->m_pkthdr.len + minrec0len) <
2806 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
2807 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2808 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2809 m = m0;
2810 CTR1(KTR_IGMPV3, "%s: use existing packet", __func__);
2811 } else {
2812 if (mbufq_full(mq)) {
2813 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2814 return (-ENOMEM);
2815 }
2816 m = NULL;
2817 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2818 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2819 if (!is_state_change && !is_group_query) {
2820 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2821 if (m)
2822 m->m_data += IGMP_LEADINGSPACE;
2823 }
2824 if (m == NULL) {
2825 m = m_gethdr(M_NOWAIT, MT_DATA);
2826 if (m)
2827 M_ALIGN(m, IGMP_LEADINGSPACE);
2828 }
2829 if (m == NULL)
2830 return (-ENOMEM);
2831
2832 igmp_save_context(m, ifp);
2833
2834 CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__);
2835 }
2836
2837 /*
2838 * Append group record.
2839 * If we have sources, we don't know how many yet.
2840 */
2841 ig.ig_type = type;
2842 ig.ig_datalen = 0;
2843 ig.ig_numsrc = 0;
2844 ig.ig_group = inm->inm_addr;
2845 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2846 if (m != m0)
2847 m_freem(m);
2848 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2849 return (-ENOMEM);
2850 }
2851 nbytes += sizeof(struct igmp_grouprec);
2852
2853 /*
2854 * Append as many sources as will fit in the first packet.
2855 * If we are appending to a new packet, the chain allocation
2856 * may potentially use clusters; use m_getptr() in this case.
2857 * If we are appending to an existing packet, we need to obtain
2858 * a pointer to the group record after m_append(), in case a new
2859 * mbuf was allocated.
2860 * Only append sources which are in-mode at t1. If we are
2861 * transitioning to MCAST_UNDEFINED state on the group, do not
2862 * include source entries.
2863 * Only report recorded sources in our filter set when responding
2864 * to a group-source query.
2865 */
2866 if (record_has_sources) {
2867 if (m == m0) {
2868 md = m_last(m);
2869 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2870 md->m_len - nbytes);
2871 } else {
2872 md = m_getptr(m, 0, &off);
2873 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2874 off);
2875 }
2876 msrcs = 0;
2877 RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) {
2878 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x", __func__,
2879 ims->ims_haddr);
2880 now = ims_get_mode(inm, ims, 1);
2881 CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now);
2882 if ((now != mode) ||
2883 (now == mode && mode == MCAST_UNDEFINED)) {
2884 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2885 continue;
2886 }
2887 if (is_source_query && ims->ims_stp == 0) {
2888 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2889 __func__);
2890 continue;
2891 }
2892 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2893 naddr = htonl(ims->ims_haddr);
2894 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2895 if (m != m0)
2896 m_freem(m);
2897 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2898 __func__);
2899 return (-ENOMEM);
2900 }
2901 nbytes += sizeof(in_addr_t);
2902 ++msrcs;
2903 if (msrcs == m0srcs)
2904 break;
2905 }
2906 CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__,
2907 msrcs);
2908 pig->ig_numsrc = htons(msrcs);
2909 nbytes += (msrcs * sizeof(in_addr_t));
2910 }
2911
2912 if (is_source_query && msrcs == 0) {
2913 CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__);
2914 if (m != m0)
2915 m_freem(m);
2916 return (0);
2917 }
2918
2919 /*
2920 * We are good to go with first packet.
2921 */
2922 if (m != m0) {
2923 CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__);
2924 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2925 mbufq_enqueue(mq, m);
2926 } else
2927 m->m_pkthdr.PH_vt.vt_nrecs++;
2928
2929 /*
2930 * No further work needed if no source list in packet(s).
2931 */
2932 if (!record_has_sources)
2933 return (nbytes);
2934
2935 /*
2936 * Whilst sources remain to be announced, we need to allocate
2937 * a new packet and fill out as many sources as will fit.
2938 * Always try for a cluster first.
2939 */
2940 while (nims != NULL) {
2941 if (mbufq_full(mq)) {
2942 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2943 return (-ENOMEM);
2944 }
2945 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2946 if (m)
2947 m->m_data += IGMP_LEADINGSPACE;
2948 if (m == NULL) {
2949 m = m_gethdr(M_NOWAIT, MT_DATA);
2950 if (m)
2951 M_ALIGN(m, IGMP_LEADINGSPACE);
2952 }
2953 if (m == NULL)
2954 return (-ENOMEM);
2955 igmp_save_context(m, ifp);
2956 md = m_getptr(m, 0, &off);
2957 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off);
2958 CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__);
2959
2960 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2961 if (m != m0)
2962 m_freem(m);
2963 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2964 return (-ENOMEM);
2965 }
2966 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2967 nbytes += sizeof(struct igmp_grouprec);
2968
2969 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2970 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2971
2972 msrcs = 0;
2973 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
2974 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x", __func__,
2975 ims->ims_haddr);
2976 now = ims_get_mode(inm, ims, 1);
2977 if ((now != mode) ||
2978 (now == mode && mode == MCAST_UNDEFINED)) {
2979 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2980 continue;
2981 }
2982 if (is_source_query && ims->ims_stp == 0) {
2983 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2984 __func__);
2985 continue;
2986 }
2987 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2988 naddr = htonl(ims->ims_haddr);
2989 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2990 if (m != m0)
2991 m_freem(m);
2992 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2993 __func__);
2994 return (-ENOMEM);
2995 }
2996 ++msrcs;
2997 if (msrcs == m0srcs)
2998 break;
2999 }
3000 pig->ig_numsrc = htons(msrcs);
3001 nbytes += (msrcs * sizeof(in_addr_t));
3002
3003 CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__);
3004 mbufq_enqueue(mq, m);
3005 }
3006
3007 return (nbytes);
3008}
3009
3010/*
3011 * Type used to mark record pass completion.
3012 * We exploit the fact we can cast to this easily from the
3013 * current filter modes on each ip_msource node.
3014 */
3015typedef enum {
3016 REC_NONE = 0x00, /* MCAST_UNDEFINED */
3017 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
3018 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
3021
3022/*
3023 * Enqueue an IGMPv3 filter list change to the given output queue.
3024 *
3025 * Source list filter state is held in an RB-tree. When the filter list
3026 * for a group is changed without changing its mode, we need to compute
3027 * the deltas between T0 and T1 for each source in the filter set,
3028 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
3029 *
3030 * As we may potentially queue two record types, and the entire R-B tree
3031 * needs to be walked at once, we break this out into its own function
3032 * so we can generate a tightly packed queue of packets.
3033 *
3034 * XXX This could be written to only use one tree walk, although that makes
3035 * serializing into the mbuf chains a bit harder. For now we do two walks
3036 * which makes things easier on us, and it may or may not be harder on
3037 * the L2 cache.
3038 *
3039 * If successful the size of all data appended to the queue is returned,
3040 * otherwise an error code less than zero is returned, or zero if
3041 * no record(s) were appended.
3042 */
3043static int
3044igmp_v3_enqueue_filter_change(struct mbufq *mq, struct in_multi *inm)
3045{
3046 static const int MINRECLEN =
3047 sizeof(struct igmp_grouprec) + sizeof(in_addr_t);
3048 struct ifnet *ifp;
3049 struct igmp_grouprec ig;
3050 struct igmp_grouprec *pig;
3051 struct ip_msource *ims, *nims;
3052 struct mbuf *m, *m0, *md;
3053 in_addr_t naddr;
3054 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
3055 int nallow, nblock;
3056 uint8_t mode, now, then;
3057 rectype_t crt, drt, nrt;
3058
3060
3061 if (inm->inm_nsrc == 0 ||
3062 (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0))
3063 return (0);
3064
3065 ifp = inm->inm_ifp; /* interface */
3066 mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */
3067 crt = REC_NONE; /* current group record type */
3068 drt = REC_NONE; /* mask of completed group record types */
3069 nrt = REC_NONE; /* record type for current node */
3070 m0srcs = 0; /* # source which will fit in current mbuf chain */
3071 nbytes = 0; /* # of bytes appended to group's state-change queue */
3072 npbytes = 0; /* # of bytes appended this packet */
3073 rsrcs = 0; /* # sources encoded in current record */
3074 schanged = 0; /* # nodes encoded in overall filter change */
3075 nallow = 0; /* # of source entries in ALLOW_NEW */
3076 nblock = 0; /* # of source entries in BLOCK_OLD */
3077 nims = NULL; /* next tree node pointer */
3078
3079 /*
3080 * For each possible filter record mode.
3081 * The first kind of source we encounter tells us which
3082 * is the first kind of record we start appending.
3083 * If a node transitioned to UNDEFINED at t1, its mode is treated
3084 * as the inverse of the group's filter mode.
3085 */
3086 while (drt != REC_FULL) {
3087 do {
3088 m0 = mbufq_last(mq);
3089 if (m0 != NULL &&
3090 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
3092 (m0->m_pkthdr.len + MINRECLEN) <
3093 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
3094 m = m0;
3095 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3096 sizeof(struct igmp_grouprec)) /
3097 sizeof(in_addr_t);
3098 CTR1(KTR_IGMPV3,
3099 "%s: use previous packet", __func__);
3100 } else {
3101 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3102 if (m)
3103 m->m_data += IGMP_LEADINGSPACE;
3104 if (m == NULL) {
3105 m = m_gethdr(M_NOWAIT, MT_DATA);
3106 if (m)
3107 M_ALIGN(m, IGMP_LEADINGSPACE);
3108 }
3109 if (m == NULL) {
3110 CTR1(KTR_IGMPV3,
3111 "%s: m_get*() failed", __func__);
3112 return (-ENOMEM);
3113 }
3114 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3115 igmp_save_context(m, ifp);
3116 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
3117 sizeof(struct igmp_grouprec)) /
3118 sizeof(in_addr_t);
3119 npbytes = 0;
3120 CTR1(KTR_IGMPV3,
3121 "%s: allocated new packet", __func__);
3122 }
3123 /*
3124 * Append the IGMP group record header to the
3125 * current packet's data area.
3126 * Recalculate pointer to free space for next
3127 * group record, in case m_append() allocated
3128 * a new mbuf or cluster.
3129 */
3130 memset(&ig, 0, sizeof(ig));
3131 ig.ig_group = inm->inm_addr;
3132 if (!m_append(m, sizeof(ig), (void *)&ig)) {
3133 if (m != m0)
3134 m_freem(m);
3135 CTR1(KTR_IGMPV3,
3136 "%s: m_append() failed", __func__);
3137 return (-ENOMEM);
3138 }
3139 npbytes += sizeof(struct igmp_grouprec);
3140 if (m != m0) {
3141 /* new packet; offset in c hain */
3142 md = m_getptr(m, npbytes -
3143 sizeof(struct igmp_grouprec), &off);
3144 pig = (struct igmp_grouprec *)(mtod(md,
3145 uint8_t *) + off);
3146 } else {
3147 /* current packet; offset from last append */
3148 md = m_last(m);
3149 pig = (struct igmp_grouprec *)(mtod(md,
3150 uint8_t *) + md->m_len -
3151 sizeof(struct igmp_grouprec));
3152 }
3153 /*
3154 * Begin walking the tree for this record type
3155 * pass, or continue from where we left off
3156 * previously if we had to allocate a new packet.
3157 * Only report deltas in-mode at t1.
3158 * We need not report included sources as allowed
3159 * if we are in inclusive mode on the group,
3160 * however the converse is not true.
3161 */
3162 rsrcs = 0;
3163 if (nims == NULL)
3164 nims = RB_MIN(ip_msource_tree, &inm->inm_srcs);
3165 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
3166 CTR2(KTR_IGMPV3, "%s: visit node 0x%08x",
3167 __func__, ims->ims_haddr);
3168 now = ims_get_mode(inm, ims, 1);
3169 then = ims_get_mode(inm, ims, 0);
3170 CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d",
3171 __func__, then, now);
3172 if (now == then) {
3173 CTR1(KTR_IGMPV3,
3174 "%s: skip unchanged", __func__);
3175 continue;
3176 }
3177 if (mode == MCAST_EXCLUDE &&
3178 now == MCAST_INCLUDE) {
3179 CTR1(KTR_IGMPV3,
3180 "%s: skip IN src on EX group",
3181 __func__);
3182 continue;
3183 }
3184 nrt = (rectype_t)now;
3185 if (nrt == REC_NONE)
3186 nrt = (rectype_t)(~mode & REC_FULL);
3187 if (schanged++ == 0) {
3188 crt = nrt;
3189 } else if (crt != nrt)
3190 continue;
3191 naddr = htonl(ims->ims_haddr);
3192 if (!m_append(m, sizeof(in_addr_t),
3193 (void *)&naddr)) {
3194 if (m != m0)
3195 m_freem(m);
3196 CTR1(KTR_IGMPV3,
3197 "%s: m_append() failed", __func__);
3198 return (-ENOMEM);
3199 }
3200 nallow += !!(crt == REC_ALLOW);
3201 nblock += !!(crt == REC_BLOCK);
3202 if (++rsrcs == m0srcs)
3203 break;
3204 }
3205 /*
3206 * If we did not append any tree nodes on this
3207 * pass, back out of allocations.
3208 */
3209 if (rsrcs == 0) {
3210 npbytes -= sizeof(struct igmp_grouprec);
3211 if (m != m0) {
3212 CTR1(KTR_IGMPV3,
3213 "%s: m_free(m)", __func__);
3214 m_freem(m);
3215 } else {
3216 CTR1(KTR_IGMPV3,
3217 "%s: m_adj(m, -ig)", __func__);
3218 m_adj(m, -((int)sizeof(
3219 struct igmp_grouprec)));
3220 }
3221 continue;
3222 }
3223 npbytes += (rsrcs * sizeof(in_addr_t));
3224 if (crt == REC_ALLOW)
3226 else if (crt == REC_BLOCK)
3228 pig->ig_numsrc = htons(rsrcs);
3229 /*
3230 * Count the new group record, and enqueue this
3231 * packet if it wasn't already queued.
3232 */
3233 m->m_pkthdr.PH_vt.vt_nrecs++;
3234 if (m != m0)
3235 mbufq_enqueue(mq, m);
3236 nbytes += npbytes;
3237 } while (nims != NULL);
3238 drt |= crt;
3239 crt = (~crt & REC_FULL);
3240 }
3241
3242 CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
3243 nallow, nblock);
3244
3245 return (nbytes);
3246}
3247
3248static int
3249igmp_v3_merge_state_changes(struct in_multi *inm, struct mbufq *scq)
3250{
3251 struct mbufq *gq;
3252 struct mbuf *m; /* pending state-change */
3253 struct mbuf *m0; /* copy of pending state-change */
3254 struct mbuf *mt; /* last state-change in packet */
3255 int docopy, domerge;
3256 u_int recslen;
3257
3258 docopy = 0;
3259 domerge = 0;
3260 recslen = 0;
3261
3264
3265 /*
3266 * If there are further pending retransmissions, make a writable
3267 * copy of each queued state-change message before merging.
3268 */
3269 if (inm->inm_scrv > 0)
3270 docopy = 1;
3271
3272 gq = &inm->inm_scq;
3273#ifdef KTR
3274 if (mbufq_first(gq) == NULL) {
3275 CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty",
3276 __func__, inm);
3277 }
3278#endif
3279
3280 m = mbufq_first(gq);
3281 while (m != NULL) {
3282 /*
3283 * Only merge the report into the current packet if
3284 * there is sufficient space to do so; an IGMPv3 report
3285 * packet may only contain 65,535 group records.
3286 * Always use a simple mbuf chain concatentation to do this,
3287 * as large state changes for single groups may have
3288 * allocated clusters.
3289 */
3290 domerge = 0;
3291 mt = mbufq_last(scq);
3292 if (mt != NULL) {
3293 recslen = m_length(m, NULL);
3294
3295 if ((mt->m_pkthdr.PH_vt.vt_nrecs +
3296 m->m_pkthdr.PH_vt.vt_nrecs <=
3298 (mt->m_pkthdr.len + recslen <=
3299 (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE)))
3300 domerge = 1;
3301 }
3302
3303 if (!domerge && mbufq_full(gq)) {
3304 CTR2(KTR_IGMPV3,
3305 "%s: outbound queue full, skipping whole packet %p",
3306 __func__, m);
3307 mt = m->m_nextpkt;
3308 if (!docopy)
3309 m_freem(m);
3310 m = mt;
3311 continue;
3312 }
3313
3314 if (!docopy) {
3315 CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m);
3316 m0 = mbufq_dequeue(gq);
3317 m = m0->m_nextpkt;
3318 } else {
3319 CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m);
3320 m0 = m_dup(m, M_NOWAIT);
3321 if (m0 == NULL)
3322 return (ENOMEM);
3323 m0->m_nextpkt = NULL;
3324 m = m->m_nextpkt;
3325 }
3326
3327 if (!domerge) {
3328 CTR3(KTR_IGMPV3, "%s: queueing %p to scq %p)",
3329 __func__, m0, scq);
3330 mbufq_enqueue(scq, m0);
3331 } else {
3332 struct mbuf *mtl; /* last mbuf of packet mt */
3333
3334 CTR3(KTR_IGMPV3, "%s: merging %p with scq tail %p)",
3335 __func__, m0, mt);
3336
3337 mtl = m_last(mt);
3338 m0->m_flags &= ~M_PKTHDR;
3339 mt->m_pkthdr.len += recslen;
3340 mt->m_pkthdr.PH_vt.vt_nrecs +=
3341 m0->m_pkthdr.PH_vt.vt_nrecs;
3342
3343 mtl->m_next = m0;
3344 }
3345 }
3346
3347 return (0);
3348}
3349
3350/*
3351 * Respond to a pending IGMPv3 General Query.
3352 */
3353static void
3355{
3356 struct ifmultiaddr *ifma;
3357 struct ifnet *ifp;
3358 struct in_multi *inm;
3359 int retval __unused, loop;
3360
3363 NET_EPOCH_ASSERT();
3364
3365 KASSERT(igi->igi_version == IGMP_VERSION_3,
3366 ("%s: called when version %d", __func__, igi->igi_version));
3367
3368 /*
3369 * Check that there are some packets queued. If so, send them first.
3370 * For large number of groups the reply to general query can take
3371 * many packets, we should finish sending them before starting of
3372 * queuing the new reply.
3373 */
3374 if (mbufq_len(&igi->igi_gq) != 0)
3375 goto send;
3376
3377 ifp = igi->igi_ifp;
3378
3379 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3380 if (ifma->ifma_addr->sa_family != AF_INET ||
3381 ifma->ifma_protospec == NULL)
3382 continue;
3383
3384 inm = (struct in_multi *)ifma->ifma_protospec;
3385 KASSERT(ifp == inm->inm_ifp,
3386 ("%s: inconsistent ifp", __func__));
3387
3388 switch (inm->inm_state) {
3389 case IGMP_NOT_MEMBER:
3390 case IGMP_SILENT_MEMBER:
3391 break;
3393 case IGMP_IDLE_MEMBER:
3394 case IGMP_LAZY_MEMBER:
3398 retval = igmp_v3_enqueue_group_record(&igi->igi_gq,
3399 inm, 0, 0, 0);
3400 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
3401 __func__, retval);
3402 break;
3406 break;
3407 }
3408 }
3409
3410send:
3411 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
3413
3414 /*
3415 * Slew transmission of bursts over 500ms intervals.
3416 */
3417 if (mbufq_first(&igi->igi_gq) != NULL) {
3421 }
3422}
3423
3424/*
3425 * Transmit the next pending IGMP message in the output queue.
3426 *
3427 * We get called from netisr_processqueue(). A mutex private to igmpoq
3428 * will be acquired and released around this routine.
3429 *
3430 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3431 * MRT: Nothing needs to be done, as IGMP traffic is always local to
3432 * a link and uses a link-scope multicast address.
3433 */
3434static void
3435igmp_intr(struct mbuf *m)
3436{
3437 struct ip_moptions imo;
3438 struct ifnet *ifp;
3439 struct mbuf *ipopts, *m0;
3440 int error;
3441 uint32_t ifindex;
3442
3443 CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m);
3444
3445 /*
3446 * Set VNET image pointer from enqueued mbuf chain
3447 * before doing anything else. Whilst we use interface
3448 * indexes to guard against interface detach, they are
3449 * unique to each VIMAGE and must be retrieved.
3450 */
3451 CURVNET_SET((struct vnet *)(m->m_pkthdr.PH_loc.ptr));
3452 ifindex = igmp_restore_context(m);
3453
3454 /*
3455 * Check if the ifnet still exists. This limits the scope of
3456 * any race in the absence of a global ifp lock for low cost
3457 * (an array lookup).
3458 */
3459 ifp = ifnet_byindex(ifindex);
3460 if (ifp == NULL) {
3461 CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.",
3462 __func__, m, ifindex);
3463 m_freem(m);
3464 IPSTAT_INC(ips_noroute);
3465 goto out;
3466 }
3467
3468 ipopts = V_igmp_sendra ? m_raopt : NULL;
3469
3470 imo.imo_multicast_ttl = 1;
3471 imo.imo_multicast_vif = -1;
3472 imo.imo_multicast_loop = (V_ip_mrouter != NULL);
3473
3474 /*
3475 * If the user requested that IGMP traffic be explicitly
3476 * redirected to the loopback interface (e.g. they are running a
3477 * MANET interface and the routing protocol needs to see the
3478 * updates), handle this now.
3479 */
3480 if (m->m_flags & M_IGMP_LOOP)
3481 imo.imo_multicast_ifp = V_loif;
3482 else
3483 imo.imo_multicast_ifp = ifp;
3484
3485 if (m->m_flags & M_IGMPV2) {
3486 m0 = m;
3487 } else {
3488 m0 = igmp_v3_encap_report(ifp, m);
3489 if (m0 == NULL) {
3490 CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m);
3491 m_freem(m);
3492 IPSTAT_INC(ips_odropped);
3493 goto out;
3494 }
3495 }
3496
3498 m_clrprotoflags(m);
3499 m0->m_pkthdr.rcvif = V_loif;
3500#ifdef MAC
3501 mac_netinet_igmp_send(ifp, m0);
3502#endif
3503 error = ip_output(m0, ipopts, NULL, 0, &imo, NULL);
3504 if (error) {
3505 CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error);
3506 goto out;
3507 }
3508
3509 IGMPSTAT_INC(igps_snd_reports);
3510
3511out:
3512 /*
3513 * We must restore the existing vnet pointer before
3514 * continuing as we are run from netisr context.
3515 */
3516 CURVNET_RESTORE();
3517}
3518
3519/*
3520 * Encapsulate an IGMPv3 report.
3521 *
3522 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf
3523 * chain has already had its IP/IGMPv3 header prepended. In this case
3524 * the function will not attempt to prepend; the lengths and checksums
3525 * will however be re-computed.
3526 *
3527 * Returns a pointer to the new mbuf chain head, or NULL if the
3528 * allocation failed.
3529 */
3530static struct mbuf *
3531igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m)
3532{
3533 struct igmp_report *igmp;
3534 struct ip *ip;
3535 int hdrlen, igmpreclen;
3536
3537 KASSERT((m->m_flags & M_PKTHDR),
3538 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3539
3540 igmpreclen = m_length(m, NULL);
3541 hdrlen = sizeof(struct ip) + sizeof(struct igmp_report);
3542
3543 if (m->m_flags & M_IGMPV3_HDR) {
3544 igmpreclen -= hdrlen;
3545 } else {
3546 M_PREPEND(m, hdrlen, M_NOWAIT);
3547 if (m == NULL)
3548 return (NULL);
3549 m->m_flags |= M_IGMPV3_HDR;
3550 }
3551
3552 CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen);
3553
3554 m->m_data += sizeof(struct ip);
3555 m->m_len -= sizeof(struct ip);
3556
3557 igmp = mtod(m, struct igmp_report *);
3559 igmp->ir_rsv1 = 0;
3560 igmp->ir_rsv2 = 0;
3561 igmp->ir_numgrps = htons(m->m_pkthdr.PH_vt.vt_nrecs);
3562 igmp->ir_cksum = 0;
3563 igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen);
3564 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3565
3566 m->m_data -= sizeof(struct ip);
3567 m->m_len += sizeof(struct ip);
3568
3569 ip = mtod(m, struct ip *);
3571 ip->ip_len = htons(hdrlen + igmpreclen);
3572 ip->ip_off = htons(IP_DF);
3573 ip->ip_p = IPPROTO_IGMP;
3574 ip->ip_sum = 0;
3575
3576 ip->ip_src.s_addr = INADDR_ANY;
3577
3578 if (m->m_flags & M_IGMP_LOOP) {
3579 struct in_ifaddr *ia;
3580
3581 IFP_TO_IA(ifp, ia);
3582 if (ia != NULL)
3583 ip->ip_src = ia->ia_addr.sin_addr;
3584 }
3585
3586 ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP);
3587
3588 return (m);
3589}
3590
3591#ifdef KTR
3592static char *
3593igmp_rec_type_to_str(const int type)
3594{
3595
3596 switch (type) {
3598 return "TO_EX";
3599 break;
3601 return "TO_IN";
3602 break;
3604 return "MODE_EX";
3605 break;
3607 return "MODE_IN";
3608 break;
3610 return "ALLOW_NEW";
3611 break;
3613 return "BLOCK_OLD";
3614 break;
3615 default:
3616 break;
3617 }
3618 return "unknown";
3619}
3620#endif
3621
3622#ifdef VIMAGE
3623static void
3624vnet_igmp_init(const void *unused __unused)
3625{
3626
3627 netisr_register_vnet(&igmp_nh);
3628}
3629VNET_SYSINIT(vnet_igmp_init, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3630 vnet_igmp_init, NULL);
3631
3632static void
3633vnet_igmp_uninit(const void *unused __unused)
3634{
3635
3636 /* This can happen when we shutdown the entire network stack. */
3637 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3638
3639 netisr_unregister_vnet(&igmp_nh);
3640}
3641VNET_SYSUNINIT(vnet_igmp_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3642 vnet_igmp_uninit, NULL);
3643#endif
3644
3645#ifdef DDB
3646DB_SHOW_COMMAND(igi_list, db_show_igi_list)
3647{
3648 struct igmp_ifsoftc *igi, *tigi;
3649 LIST_HEAD(_igi_list, igmp_ifsoftc) *igi_head;
3650
3651 if (!have_addr) {
3652 db_printf("usage: show igi_list <addr>\n");
3653 return;
3654 }
3655 igi_head = (struct _igi_list *)addr;
3656
3657 LIST_FOREACH_SAFE(igi, igi_head, igi_link, tigi) {
3658 db_printf("igmp_ifsoftc %p:\n", igi);
3659 db_printf(" ifp %p\n", igi->igi_ifp);
3660 db_printf(" version %u\n", igi->igi_version);
3661 db_printf(" v1_timer %u\n", igi->igi_v1_timer);
3662 db_printf(" v2_timer %u\n", igi->igi_v2_timer);
3663 db_printf(" v3_timer %u\n", igi->igi_v3_timer);
3664 db_printf(" flags %#x\n", igi->igi_flags);
3665 db_printf(" rv %u\n", igi->igi_rv);
3666 db_printf(" qi %u\n", igi->igi_qi);
3667 db_printf(" qri %u\n", igi->igi_qri);
3668 db_printf(" uri %u\n", igi->igi_uri);
3669 /* struct mbufq igi_gq; */
3670 db_printf("\n");
3671 }
3672}
3673#endif
3674
3675static int
3676igmp_modevent(module_t mod, int type, void *unused __unused)
3677{
3678
3679 switch (type) {
3680 case MOD_LOAD:
3681 CTR1(KTR_IGMPV3, "%s: initializing", __func__);
3684 netisr_register(&igmp_nh);
3685 break;
3686 case MOD_UNLOAD:
3687 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3688 netisr_unregister(&igmp_nh);
3689 m_free(m_raopt);
3690 m_raopt = NULL;
3692 break;
3693 default:
3694 return (EOPNOTSUPP);
3695 }
3696 return (0);
3697}
3698
3699static moduledata_t igmp_mod = {
3700 "igmp",
3702 0
3703};
3704DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE);
VNET_SYSINIT(vnet_cc_sysinit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, vnet_cc_sysinit, NULL)
static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS)
Definition: igmp.c:404
static int igmp_initial_join(struct in_multi *, struct igmp_ifsoftc *)
Definition: igmp.c:2377
#define V_igmp_recvifkludge
Definition: igmp.c:251
static struct igmp_ifsoftc * igi_alloc_locked(struct ifnet *)
Definition: igmp.c:633
static int igmp_isgroupreported(const struct in_addr)
Definition: igmp.c:576
static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS)
Definition: igmp.c:443
#define KTR_IGMPV3
Definition: igmp.c:93
#define V_igmp_sendra
Definition: igmp.c:252
static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
Definition: igmp.c:483
struct mbuf * m_raopt
Definition: igmp.c:198
#define V_igmp_default_version
Definition: igmp.c:257
static void igi_delete_locked(const struct ifnet *)
Definition: igmp.c:723
static int igmp_v3_enqueue_filter_change(struct mbufq *, struct in_multi *)
Definition: igmp.c:3044
static int igmp_input_v1_query(struct ifnet *, const struct ip *, const struct igmp *)
Definition: igmp.c:753
#define V_interface_timers_running
Definition: igmp.c:228
static void igmp_v1v2_process_group_timer(struct in_multi *, const int)
Definition: igmp.c:1796
static int igmp_input_v3_group_query(struct in_multi *, struct igmp_ifsoftc *, int, struct igmpv3 *)
Definition: igmp.c:1154
#define V_igi_head
Definition: igmp.c:240
VNET_PCPUSTAT_SYSUNINIT(igmpstat)
static moduledata_t igmp_mod
Definition: igmp.c:3699
static void igmp_intr(struct mbuf *)
Definition: igmp.c:3435
static int sysctl_igmp_stat(SYSCTL_HANDLER_ARGS)
Definition: igmp.c:343
static void igmp_v3_dispatch_general_query(struct igmp_ifsoftc *)
Definition: igmp.c:3354
static MALLOC_DEFINE(M_IGMP, "igmp", "igmp state")
#define V_igmp_sendlocal
Definition: igmp.c:253
VNET_DEFINE_STATIC(int, interface_timers_running)
void igmp_domifdetach(struct ifnet *ifp)
Definition: igmp.c:711
#define V_current_state_timers_running
Definition: igmp.c:230
static __inline void igmp_scrub_context(struct mbuf *m)
Definition: igmp.c:312
static int igmp_input_v2_report(struct ifnet *, struct ip *, struct igmp *)
Definition: igmp.c:1364
#define V_igmp_v2enable
Definition: igmp.c:255
static int igmp_v3_enqueue_group_record(struct mbufq *, struct in_multi *, const int, const int, const int)
Definition: igmp.c:2683
rectype_t
Definition: igmp.c:3015
@ REC_ALLOW
Definition: igmp.c:3017
@ REC_NONE
Definition: igmp.c:3016
@ REC_FULL
Definition: igmp.c:3019
@ REC_BLOCK
Definition: igmp.c:3018
#define V_igmp_legacysupp
Definition: igmp.c:256
#define V_state_change_timers_running
Definition: igmp.c:229
static void igmp_set_version(struct igmp_ifsoftc *, const int)
Definition: igmp.c:1988
__FBSDID("$FreeBSD$")
static void igmp_v3_suppress_group_record(struct in_multi *)
Definition: igmp.c:1964
int igmp_input(struct mbuf **mp, int *offp, int proto)
Definition: igmp.c:1464
static void igmp_final_leave(struct in_multi *, struct igmp_ifsoftc *)
Definition: igmp.c:2563
static void igmp_v3_cancel_link_timers(struct igmp_ifsoftc *)
Definition: igmp.c:2037
DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE)
static struct mbuf * igmp_v3_encap_report(struct ifnet *, struct mbuf *)
Definition: igmp.c:3531
static void igmp_v2_update_group(struct in_multi *, const int)
Definition: igmp.c:944
static void igmp_slowtimo_vnet(void)
Definition: igmp.c:2214
struct igmp_ifsoftc * igmp_domifattach(struct ifnet *ifp)
Definition: igmp.c:611
static void igmp_fasttimo_vnet(void)
Definition: igmp.c:1679
static void igmp_v3_process_group_timers(struct in_multi_head *, struct mbufq *, struct mbufq *, struct in_multi *, const int)
Definition: igmp.c:1842
static int igmp_input_v3_query(struct ifnet *, const struct ip *, struct igmpv3 *)
Definition: igmp.c:990
void igmp_slowtimo(void)
Definition: igmp.c:2197
static __inline void igmp_save_context(struct mbuf *m, struct ifnet *ifp)
Definition: igmp.c:301
static void igmp_v1v2_process_querier_timers(struct igmp_ifsoftc *)
Definition: igmp.c:2120
#define V_igmp_v1enable
Definition: igmp.c:254
static struct mbuf * igmp_ra_alloc(void)
Definition: igmp.c:590
VNET_PCPUSTAT_SYSINIT(igmpstat)
struct mtx igmp_mtx
Definition: igmp.c:196
static int igmp_input_v2_query(struct ifnet *, const struct ip *, const struct igmp *)
Definition: igmp.c:838
VNET_PCPUSTAT_DEFINE(struct igmpstat, igmpstat)
void igmp_ifdetach(struct ifnet *ifp)
Definition: igmp.c:671
static int igmp_handle_state_change(struct in_multi *, struct igmp_ifsoftc *)
Definition: igmp.c:2505
#define V_igmp_gsrdelay
Definition: igmp.c:241
static int igmp_v3_merge_state_changes(struct in_multi *, struct mbufq *)
Definition: igmp.c:3249
static __inline uint32_t igmp_restore_context(struct mbuf *m)
Definition: igmp.c:327
void igmp_fasttimo(void)
Definition: igmp.c:1659
static SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo, CTLFLAG_RD|CTLFLAG_MPSAFE, sysctl_igmp_ifinfo, "Per-interface IGMPv3 state")
static const struct netisr_handler igmp_nh
Definition: igmp.c:149
static int igmp_v1v2_queue_report(struct in_multi *, const int)
Definition: igmp.c:2232
SYSCTL_PROC(_net_inet_igmp, IGMPCTL_STATS, stats, CTLFLAG_VNET|CTLTYPE_STRUCT|CTLFLAG_RW|CTLFLAG_MPSAFE, &VNET_NAME(igmpstat), 0, sysctl_igmp_stat, "S,igmpstat", "IGMP statistics (struct igmpstat, netinet/igmp_var.h)")
int igmp_change_state(struct in_multi *inm)
Definition: igmp.c:2313
SYSCTL_INT(_net_inet_igmp, OID_AUTO, recvifkludge, CTLFLAG_VNET|CTLFLAG_RW, &VNET_NAME(igmp_recvifkludge), 0, "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address")
static int igmp_input_v1_report(struct ifnet *, struct ip *, struct igmp *)
Definition: igmp.c:1256
static void igmp_dispatch_queue(struct mbufq *, int, const int)
Definition: igmp.c:546
static int igmp_modevent(module_t mod, int type, void *unused __unused)
Definition: igmp.c:3676
#define IGMP_MODE_IS_EXCLUDE
Definition: igmp.h:125
#define IGMP_DVMRP
Definition: igmp.h:112
#define IGMP_DO_NOTHING
Definition: igmp.h:123
#define IGMP_BLOCK_OLD_SOURCES
Definition: igmp.h:129
#define IGMP_v3_HOST_MEMBERSHIP_REPORT
Definition: igmp.h:118
#define IGMP_V3_QUERY_MINLEN
Definition: igmp.h:77
#define IGMP_TIMER_SCALE
Definition: igmp.h:148
#define IGMP_ALLOW_NEW_SOURCES
Definition: igmp.h:128
#define IGMP_MODE_IS_INCLUDE
Definition: igmp.h:124
#define IGMP_v1_HOST_MEMBERSHIP_REPORT
Definition: igmp.h:111
#define IGMP_V3_REPORT_MAXRECS
Definition: igmp.h:105
#define IGMP_V1V2_MAX_RI
Definition: igmp.h:141
#define IGMP_HOST_LEAVE_MESSAGE
Definition: igmp.h:115
#define IGMP_QRV(x)
Definition: igmp.h:82
#define IGMP_HOST_MEMBERSHIP_QUERY
Definition: igmp.h:110
#define IGMP_EXP(x)
Definition: igmp.h:78
#define IGMP_CHANGE_TO_EXCLUDE_MODE
Definition: igmp.h:127
#define IGMP_v2_HOST_MEMBERSHIP_REPORT
Definition: igmp.h:114
#define IGMP_MINLEN
Definition: igmp.h:51
#define IGMP_MANT(x)
Definition: igmp.h:79
#define IGMP_CHANGE_TO_INCLUDE_MODE
Definition: igmp.h:126
#define IGMP_LOCK()
Definition: igmp_var.h:202
#define IGMP_LOCK_ASSERT()
Definition: igmp_var.h:203
#define M_IGMPV2
Definition: igmp_var.h:156
#define IGMP_AWAKENING_MEMBER
Definition: igmp_var.h:114
#define IGMP_LOCK_DESTROY()
Definition: igmp_var.h:201
#define IGMP_G_QUERY_PENDING_MEMBER
Definition: igmp_var.h:115
#define IGMP_VERSION_3
Definition: igmp_var.h:126
#define IGMP_LOCK_INIT()
Definition: igmp_var.h:200
#define IGMP_URI_INIT
Definition: igmp_var.h:143
#define IGMP_UNLOCK()
Definition: igmp_var.h:204
#define IGMP_QI_INIT
Definition: igmp_var.h:135
#define IGMP_LEADINGSPACE
Definition: igmp_var.h:167
#define IGMP_REPORTING_MEMBER
Definition: igmp_var.h:110
#define IGMP_SLEEPING_MEMBER
Definition: igmp_var.h:113
#define IGMP_RESPONSE_BURST_INTERVAL
Definition: igmp_var.h:151
#define IGMP_VERSION_2
Definition: igmp_var.h:125
#define IGMP_MAX_RESPONSE_BURST
Definition: igmp_var.h:150
#define IGIF_SILENT
Definition: igmp_var.h:179
#define IGIF_LOOPBACK
Definition: igmp_var.h:180
#define IGMP_RANDOM_DELAY(X)
Definition: igmp_var.h:102
#define IGMP_VERSION_1
Definition: igmp_var.h:124
#define M_IGMP_LOOP
Definition: igmp_var.h:159
#define IGMP_LAZY_MEMBER
Definition: igmp_var.h:112
#define IGMP_RV_INIT
Definition: igmp_var.h:131
#define IGMP_MAX_G_GS_PACKETS
Definition: igmp_var.h:147
#define IGMP_MAX_STATE_CHANGE_PACKETS
Definition: igmp_var.h:148
#define IGMP_SG_QUERY_PENDING_MEMBER
Definition: igmp_var.h:116
#define IGMP_IDLE_MEMBER
Definition: igmp_var.h:111
#define IGMP_MAX_RESPONSE_PACKETS
Definition: igmp_var.h:149
#define M_IGMPV3_HDR
Definition: igmp_var.h:157
#define IGMP_LEAVING_MEMBER
Definition: igmp_var.h:117
#define IGMPCTL_STATS
Definition: igmp_var.h:100
#define IGMP_NOT_MEMBER
Definition: igmp_var.h:108
#define IGPS_VERSION_3
Definition: igmp_var.h:91
#define IGPS_VERSION3_LEN
Definition: igmp_var.h:92
#define IGMP_SILENT_MEMBER
Definition: igmp_var.h:109
#define IGMPSTAT_INC(name)
Definition: igmp_var.h:193
#define IGMP_QRI_INIT
Definition: igmp_var.h:139
__uint32_t uint32_t
Definition: in.h:62
__uint16_t uint16_t
Definition: in.h:57
__uint8_t uint8_t
Definition: in.h:52
#define in_nullhost(x)
Definition: in.h:675
#define INADDR_ANY
Definition: in.h:48
#define in_allhosts(x)
Definition: in.h:676
uint32_t in_addr_t
Definition: in.h:67
#define in_hosteq(s, t)
Definition: in.h:674
void inm_release_list_deferred(struct in_multi_head *inmh)
Definition: in_mcast.c:246
int inm_record_source(struct in_multi *inm, const in_addr_t naddr)
Definition: in_mcast.c:692
int ifma_restart
Definition: in_mcast.c:116
void inm_commit(struct in_multi *inm)
Definition: in_mcast.c:1143
void inm_clear_recorded(struct in_multi *inm)
Definition: in_mcast.c:653
struct in_multi * inm_lookup(struct ifnet *ifp, const struct in_addr ina)
Definition: in_mcast.c:390
static __inline uint8_t ims_get_mode(const struct in_multi *inm, const struct ip_msource *ims, uint8_t t)
Definition: in_var.h:346
#define IA_SIN(ia)
Definition: in_var.h:96
#define IN_MULTI_LOCK_ASSERT()
Definition: in_var.h:381
#define IN_MULTI_LIST_UNLOCK()
Definition: in_var.h:375
static __inline void inm_acquire_locked(struct in_multi *inm)
Definition: in_var.h:389
#define IFP_TO_IA(ifp, ia)
Definition: in_var.h:159
static __inline void inm_rele_locked(struct in_multi_head *inmh, struct in_multi *inm)
Definition: in_var.h:405
#define IN_MULTI_LIST_LOCK()
Definition: in_var.h:374
#define IN_MULTI_LIST_LOCK_ASSERT()
Definition: in_var.h:376
#define IPTOS_PREC_INTERNETCONTROL
Definition: ip.h:88
#define IPOPT_EOL
Definition: ip.h:149
#define IPOPT_RA
Definition: ip.h:160
#define IP_DF
Definition: ip.h:13
static LIST_HEAD(carp_softc)
Definition: ip_carp.c:333
VNET_SYSUNINIT(divert, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, div_destroy, NULL)
ipfw_dyn_rule * next
Definition: ip_fw.h:0
int ip_checkrouteralert(struct mbuf *m)
Definition: ip_options.c:718
int ip_output(struct mbuf *m, struct mbuf *opt, struct route *ro, int flags, struct ip_moptions *imo, struct inpcb *inp)
Definition: ip_output.c:320
#define IPSTAT_INC(name)
Definition: ip_var.h:151
#define V_ip_mrouter
Definition: ip_var.h:209
int rip_input(struct mbuf **, int *, int)
u_short ig_numsrc
Definition: igmp.h:87
u_char ig_datalen
Definition: igmp.h:86
u_char ig_type
Definition: igmp.h:85
struct in_addr ig_group
Definition: igmp.h:88
uint32_t igi_v2_timer
Definition: igmp_var.h:176
uint32_t igi_flags
Definition: igmp_var.h:178
uint32_t igi_version
Definition: igmp_var.h:174
uint32_t igi_v1_timer
Definition: igmp_var.h:175
uint32_t igi_rv
Definition: igmp_var.h:181
uint32_t igi_qi
Definition: igmp_var.h:182
uint32_t igi_v3_timer
Definition: igmp_var.h:177
uint32_t igi_qri
Definition: igmp_var.h:183
uint32_t igi_uri
Definition: igmp_var.h:184
uint32_t igi_qi
Definition: igmp_var.h:219
uint32_t igi_v3_timer
Definition: igmp_var.h:216
uint32_t igi_uri
Definition: igmp_var.h:221
uint32_t igi_flags
Definition: igmp_var.h:217
uint32_t igi_rv
Definition: igmp_var.h:218
struct mbufq igi_gq
Definition: igmp_var.h:222
uint32_t igi_qri
Definition: igmp_var.h:220
struct ifnet * igi_ifp
Definition: igmp_var.h:212
uint32_t igi_v2_timer
Definition: igmp_var.h:215
uint32_t igi_version
Definition: igmp_var.h:213
uint32_t igi_v1_timer
Definition: igmp_var.h:214
Definition: igmp.h:56
u_short igmp_cksum
Definition: igmp.h:59
u_char igmp_code
Definition: igmp.h:58
struct in_addr igmp_group
Definition: igmp.h:60
u_char igmp_type
Definition: igmp.h:57
uint32_t igps_version
Definition: igmp_var.h:59
uint32_t igps_len
Definition: igmp_var.h:60
Definition: igmp.h:66
u_short igmp_numsrc
Definition: igmp.h:74
struct in_addr igmp_group
Definition: igmp.h:70
u_char igmp_qqi
Definition: igmp.h:73
u_char igmp_code
Definition: igmp.h:68
u_char igmp_misc
Definition: igmp.h:72
Definition: in.h:83
in_addr_t s_addr
Definition: in.h:84
struct sockaddr_in ia_addr
Definition: in_var.h:84
u_long ia_subnet
Definition: in_var.h:80
uint16_t iss_asm
Definition: in_var.h:330
uint16_t iss_fmode
Definition: in_var.h:329
uint16_t inm_sctimer
Definition: in_var.h:318
struct mbufq inm_scq
Definition: in_var.h:315
u_long inm_nsrc
Definition: in_var.h:313
u_int inm_timer
Definition: in_var.h:304
uint16_t inm_scrv
Definition: in_var.h:319
struct igmp_ifsoftc * inm_igi
Definition: in_var.h:310
struct ifnet * inm_ifp
Definition: in_var.h:302
u_int inm_refcount
Definition: in_var.h:307
struct ifmultiaddr * inm_ifma
Definition: in_var.h:303
struct in_multi::inm_st inm_st[2]
struct in_addr inm_addr
Definition: in_var.h:301
u_int inm_state
Definition: in_var.h:305
struct ip_msource_tree inm_srcs
Definition: in_var.h:312
struct timeval inm_lastgsrtv
Definition: in_var.h:317
in_addr_t ims_haddr
Definition: in_var.h:185
uint8_t ims_stp
Definition: in_var.h:190
Definition: ip.h:51
u_char ip_p
Definition: ip.h:69
struct in_addr ip_src ip_dst
Definition: ip.h:71
u_char ip_tos
Definition: ip.h:60
u_short ip_sum
Definition: ip.h:70
u_short ip_len
Definition: ip.h:61
u_char ip_ttl
Definition: ip.h:68
u_short ip_off
Definition: ip.h:63
struct in_addr ipopt_dst
Definition: ip_var.h:84
char ipopt_list[MAX_IPOPTLEN]
Definition: ip_var.h:85
struct in_addr sin_addr
Definition: in.h:101