1170613Sbms/*- 2189592Sbms * Copyright (c) 2007-2009 Bruce Simpson. 3170613Sbms * Copyright (c) 2005 Robert N. M. Watson. 4170613Sbms * All rights reserved. 5170613Sbms * 6170613Sbms * Redistribution and use in source and binary forms, with or without 7170613Sbms * modification, are permitted provided that the following conditions 8170613Sbms * are met: 9170613Sbms * 1. Redistributions of source code must retain the above copyright 10170613Sbms * notice, this list of conditions and the following disclaimer. 11170613Sbms * 2. Redistributions in binary form must reproduce the above copyright 12170613Sbms * notice, this list of conditions and the following disclaimer in the 13170613Sbms * documentation and/or other materials provided with the distribution. 14170613Sbms * 3. The name of the author may not be used to endorse or promote 15170613Sbms * products derived from this software without specific prior written 16170613Sbms * permission. 17170613Sbms * 18170613Sbms * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19170613Sbms * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20170613Sbms * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21170613Sbms * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22170613Sbms * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23170613Sbms * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24170613Sbms * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25170613Sbms * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26170613Sbms * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27170613Sbms * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28170613Sbms * SUCH DAMAGE. 29170613Sbms */ 30170613Sbms 31170613Sbms/* 32170613Sbms * IPv4 multicast socket, group, and socket option processing module. 33170613Sbms */ 34170613Sbms 35170613Sbms#include <sys/cdefs.h> 36170613Sbms__FBSDID("$FreeBSD$"); 37170613Sbms 38170613Sbms#include <sys/param.h> 39170613Sbms#include <sys/systm.h> 40170613Sbms#include <sys/kernel.h> 41170613Sbms#include <sys/malloc.h> 42170613Sbms#include <sys/mbuf.h> 43171746Scsjp#include <sys/protosw.h> 44170613Sbms#include <sys/socket.h> 45170613Sbms#include <sys/socketvar.h> 46189592Sbms#include <sys/protosw.h> 47170613Sbms#include <sys/sysctl.h> 48189592Sbms#include <sys/ktr.h> 49228969Sjhb#include <sys/taskqueue.h> 50189592Sbms#include <sys/tree.h> 51170613Sbms 52170613Sbms#include <net/if.h> 53170613Sbms#include <net/if_dl.h> 54170613Sbms#include <net/route.h> 55185571Sbz#include <net/vnet.h> 56170613Sbms 57170613Sbms#include <netinet/in.h> 58170613Sbms#include <netinet/in_systm.h> 59170613Sbms#include <netinet/in_pcb.h> 60170613Sbms#include <netinet/in_var.h> 61170613Sbms#include <netinet/ip_var.h> 62170613Sbms#include <netinet/igmp_var.h> 63170613Sbms 64189592Sbms#ifndef KTR_IGMPV3 65191659Sbms#define KTR_IGMPV3 KTR_INET 66189592Sbms#endif 67189592Sbms 68170613Sbms#ifndef __SOCKUNION_DECLARED 69170613Sbmsunion sockunion { 70170613Sbms struct sockaddr_storage ss; 71170613Sbms struct sockaddr sa; 72170613Sbms struct sockaddr_dl sdl; 73170613Sbms struct sockaddr_in sin; 74170613Sbms}; 75170613Sbmstypedef union sockunion sockunion_t; 76170613Sbms#define __SOCKUNION_DECLARED 77170613Sbms#endif /* __SOCKUNION_DECLARED */ 78170613Sbms 79189592Sbmsstatic MALLOC_DEFINE(M_INMFILTER, "in_mfilter", 80189592Sbms "IPv4 multicast PCB-layer source filter"); 81170613Sbmsstatic MALLOC_DEFINE(M_IPMADDR, "in_multi", "IPv4 multicast group"); 82170613Sbmsstatic MALLOC_DEFINE(M_IPMOPTS, "ip_moptions", "IPv4 multicast options"); 83189592Sbmsstatic MALLOC_DEFINE(M_IPMSOURCE, "ip_msource", 84189592Sbms "IPv4 multicast IGMP-layer source filter"); 85170613Sbms 86170613Sbms/* 87189592Sbms * Locking: 88189592Sbms * - Lock order is: Giant, INP_WLOCK, IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK. 89189592Sbms * - The IF_ADDR_LOCK is implicitly taken by inm_lookup() earlier, however 90189592Sbms * it can be taken by code in net/if.c also. 91189592Sbms * - ip_moptions and in_mfilter are covered by the INP_WLOCK. 92189592Sbms * 93189592Sbms * struct in_multi is covered by IN_MULTI_LOCK. There isn't strictly 94189592Sbms * any need for in_multi itself to be virtualized -- it is bound to an ifp 95189592Sbms * anyway no matter what happens. 96170613Sbms */ 97170613Sbmsstruct mtx in_multi_mtx; 98189592SbmsMTX_SYSINIT(in_multi_mtx, &in_multi_mtx, "in_multi_mtx", MTX_DEF); 99170613Sbms 100170613Sbms/* 101170613Sbms * Functions with non-static linkage defined in this file should be 102170613Sbms * declared in in_var.h: 103189592Sbms * imo_multi_filter() 104170613Sbms * in_addmulti() 105170613Sbms * in_delmulti() 106189592Sbms * in_joingroup() 107189592Sbms * in_joingroup_locked() 108189592Sbms * in_leavegroup() 109189592Sbms * in_leavegroup_locked() 110170613Sbms * and ip_var.h: 111170613Sbms * inp_freemoptions() 112170613Sbms * inp_getmoptions() 113170613Sbms * inp_setmoptions() 114189592Sbms * 115189592Sbms * XXX: Both carp and pf need to use the legacy (*,G) KPIs in_addmulti() 116189592Sbms * and in_delmulti(). 117170613Sbms */ 118189592Sbmsstatic void imf_commit(struct in_mfilter *); 119189592Sbmsstatic int imf_get_source(struct in_mfilter *imf, 120189592Sbms const struct sockaddr_in *psin, 121189592Sbms struct in_msource **); 122189592Sbmsstatic struct in_msource * 123189592Sbms imf_graft(struct in_mfilter *, const uint8_t, 124189592Sbms const struct sockaddr_in *); 125189592Sbmsstatic void imf_leave(struct in_mfilter *); 126189592Sbmsstatic int imf_prune(struct in_mfilter *, const struct sockaddr_in *); 127189592Sbmsstatic void imf_purge(struct in_mfilter *); 128189592Sbmsstatic void imf_rollback(struct in_mfilter *); 129189592Sbmsstatic void imf_reap(struct in_mfilter *); 130170613Sbmsstatic int imo_grow(struct ip_moptions *); 131189592Sbmsstatic size_t imo_match_group(const struct ip_moptions *, 132189592Sbms const struct ifnet *, const struct sockaddr *); 133189592Sbmsstatic struct in_msource * 134189592Sbms imo_match_source(const struct ip_moptions *, const size_t, 135189592Sbms const struct sockaddr *); 136189592Sbmsstatic void ims_merge(struct ip_msource *ims, 137189592Sbms const struct in_msource *lims, const int rollback); 138189592Sbmsstatic int in_getmulti(struct ifnet *, const struct in_addr *, 139189592Sbms struct in_multi **); 140189592Sbmsstatic int inm_get_source(struct in_multi *inm, const in_addr_t haddr, 141189592Sbms const int noalloc, struct ip_msource **pims); 142259982Sdim#ifdef KTR 143189592Sbmsstatic int inm_is_ifp_detached(const struct in_multi *); 144259982Sdim#endif 145189592Sbmsstatic int inm_merge(struct in_multi *, /*const*/ struct in_mfilter *); 146189592Sbmsstatic void inm_purge(struct in_multi *); 147189592Sbmsstatic void inm_reap(struct in_multi *); 148170613Sbmsstatic struct ip_moptions * 149170613Sbms inp_findmoptions(struct inpcb *); 150228969Sjhbstatic void inp_freemoptions_internal(struct ip_moptions *); 151228969Sjhbstatic void inp_gcmoptions(void *, int); 152170613Sbmsstatic int inp_get_source_filters(struct inpcb *, struct sockopt *); 153170613Sbmsstatic int inp_join_group(struct inpcb *, struct sockopt *); 154170613Sbmsstatic int inp_leave_group(struct inpcb *, struct sockopt *); 155189592Sbmsstatic struct ifnet * 156189592Sbms inp_lookup_mcast_ifp(const struct inpcb *, 157189592Sbms const struct sockaddr_in *, const struct in_addr); 158189592Sbmsstatic int inp_block_unblock_source(struct inpcb *, struct sockopt *); 159170613Sbmsstatic int inp_set_multicast_if(struct inpcb *, struct sockopt *); 160170613Sbmsstatic int inp_set_source_filters(struct inpcb *, struct sockopt *); 161189592Sbmsstatic int sysctl_ip_mcast_filters(SYSCTL_HANDLER_ARGS); 162170613Sbms 163227309Sedstatic SYSCTL_NODE(_net_inet_ip, OID_AUTO, mcast, CTLFLAG_RW, 0, 164227309Sed "IPv4 multicast"); 165189357Sbms 166189592Sbmsstatic u_long in_mcast_maxgrpsrc = IP_MAX_GROUP_SRC_FILTER; 167189592SbmsSYSCTL_ULONG(_net_inet_ip_mcast, OID_AUTO, maxgrpsrc, 168189592Sbms CTLFLAG_RW | CTLFLAG_TUN, &in_mcast_maxgrpsrc, 0, 169189592Sbms "Max source filters per group"); 170189592SbmsTUNABLE_ULONG("net.inet.ip.mcast.maxgrpsrc", &in_mcast_maxgrpsrc); 171189592Sbms 172189592Sbmsstatic u_long in_mcast_maxsocksrc = IP_MAX_SOCK_SRC_FILTER; 173189592SbmsSYSCTL_ULONG(_net_inet_ip_mcast, OID_AUTO, maxsocksrc, 174189592Sbms CTLFLAG_RW | CTLFLAG_TUN, &in_mcast_maxsocksrc, 0, 175189592Sbms "Max source filters per socket"); 176189592SbmsTUNABLE_ULONG("net.inet.ip.mcast.maxsocksrc", &in_mcast_maxsocksrc); 177189592Sbms 178189357Sbmsint in_mcast_loop = IP_DEFAULT_MULTICAST_LOOP; 179189357SbmsSYSCTL_INT(_net_inet_ip_mcast, OID_AUTO, loop, CTLFLAG_RW | CTLFLAG_TUN, 180189357Sbms &in_mcast_loop, 0, "Loopback multicast datagrams by default"); 181189357SbmsTUNABLE_INT("net.inet.ip.mcast.loop", &in_mcast_loop); 182189357Sbms 183227309Sedstatic SYSCTL_NODE(_net_inet_ip_mcast, OID_AUTO, filters, 184189592Sbms CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_ip_mcast_filters, 185189592Sbms "Per-interface stack-wide source filters"); 186189592Sbms 187228969Sjhbstatic STAILQ_HEAD(, ip_moptions) imo_gc_list = 188228969Sjhb STAILQ_HEAD_INITIALIZER(imo_gc_list); 189228969Sjhbstatic struct task imo_gc_task = TASK_INITIALIZER(0, inp_gcmoptions, NULL); 190228969Sjhb 191259982Sdim#ifdef KTR 192170613Sbms/* 193189592Sbms * Inline function which wraps assertions for a valid ifp. 194189592Sbms * The ifnet layer will set the ifma's ifp pointer to NULL if the ifp 195189592Sbms * is detached. 196189592Sbms */ 197189592Sbmsstatic int __inline 198189592Sbmsinm_is_ifp_detached(const struct in_multi *inm) 199189592Sbms{ 200189592Sbms struct ifnet *ifp; 201189592Sbms 202189592Sbms KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__)); 203189592Sbms ifp = inm->inm_ifma->ifma_ifp; 204189592Sbms if (ifp != NULL) { 205189592Sbms /* 206189592Sbms * Sanity check that netinet's notion of ifp is the 207189592Sbms * same as net's. 208189592Sbms */ 209189592Sbms KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__)); 210189592Sbms } 211189592Sbms 212189592Sbms return (ifp == NULL); 213189592Sbms} 214259982Sdim#endif 215189592Sbms 216189592Sbms/* 217189592Sbms * Initialize an in_mfilter structure to a known state at t0, t1 218189592Sbms * with an empty source filter list. 219189592Sbms */ 220189592Sbmsstatic __inline void 221189592Sbmsimf_init(struct in_mfilter *imf, const int st0, const int st1) 222189592Sbms{ 223189592Sbms memset(imf, 0, sizeof(struct in_mfilter)); 224189592Sbms RB_INIT(&imf->imf_sources); 225189592Sbms imf->imf_st[0] = st0; 226189592Sbms imf->imf_st[1] = st1; 227189592Sbms} 228189592Sbms 229189592Sbms/* 230170613Sbms * Resize the ip_moptions vector to the next power-of-two minus 1. 231170613Sbms * May be called with locks held; do not sleep. 232170613Sbms */ 233170613Sbmsstatic int 234170613Sbmsimo_grow(struct ip_moptions *imo) 235170613Sbms{ 236170613Sbms struct in_multi **nmships; 237170613Sbms struct in_multi **omships; 238170613Sbms struct in_mfilter *nmfilters; 239170613Sbms struct in_mfilter *omfilters; 240170613Sbms size_t idx; 241170613Sbms size_t newmax; 242170613Sbms size_t oldmax; 243170613Sbms 244170613Sbms nmships = NULL; 245170613Sbms nmfilters = NULL; 246170613Sbms omships = imo->imo_membership; 247170613Sbms omfilters = imo->imo_mfilters; 248170613Sbms oldmax = imo->imo_max_memberships; 249170613Sbms newmax = ((oldmax + 1) * 2) - 1; 250170613Sbms 251170613Sbms if (newmax <= IP_MAX_MEMBERSHIPS) { 252170613Sbms nmships = (struct in_multi **)realloc(omships, 253170613Sbms sizeof(struct in_multi *) * newmax, M_IPMOPTS, M_NOWAIT); 254170613Sbms nmfilters = (struct in_mfilter *)realloc(omfilters, 255189592Sbms sizeof(struct in_mfilter) * newmax, M_INMFILTER, M_NOWAIT); 256170613Sbms if (nmships != NULL && nmfilters != NULL) { 257170613Sbms /* Initialize newly allocated source filter heads. */ 258170613Sbms for (idx = oldmax; idx < newmax; idx++) { 259189592Sbms imf_init(&nmfilters[idx], MCAST_UNDEFINED, 260189592Sbms MCAST_EXCLUDE); 261170613Sbms } 262170613Sbms imo->imo_max_memberships = newmax; 263170613Sbms imo->imo_membership = nmships; 264170613Sbms imo->imo_mfilters = nmfilters; 265170613Sbms } 266170613Sbms } 267170613Sbms 268170613Sbms if (nmships == NULL || nmfilters == NULL) { 269170613Sbms if (nmships != NULL) 270170613Sbms free(nmships, M_IPMOPTS); 271170613Sbms if (nmfilters != NULL) 272189592Sbms free(nmfilters, M_INMFILTER); 273170613Sbms return (ETOOMANYREFS); 274170613Sbms } 275170613Sbms 276170613Sbms return (0); 277170613Sbms} 278170613Sbms 279170613Sbms/* 280170613Sbms * Find an IPv4 multicast group entry for this ip_moptions instance 281170613Sbms * which matches the specified group, and optionally an interface. 282170613Sbms * Return its index into the array, or -1 if not found. 283170613Sbms */ 284189592Sbmsstatic size_t 285189592Sbmsimo_match_group(const struct ip_moptions *imo, const struct ifnet *ifp, 286189592Sbms const struct sockaddr *group) 287170613Sbms{ 288189592Sbms const struct sockaddr_in *gsin; 289170613Sbms struct in_multi **pinm; 290170613Sbms int idx; 291170613Sbms int nmships; 292170613Sbms 293189592Sbms gsin = (const struct sockaddr_in *)group; 294170613Sbms 295170613Sbms /* The imo_membership array may be lazy allocated. */ 296170613Sbms if (imo->imo_membership == NULL || imo->imo_num_memberships == 0) 297170613Sbms return (-1); 298170613Sbms 299170613Sbms nmships = imo->imo_num_memberships; 300170613Sbms pinm = &imo->imo_membership[0]; 301170613Sbms for (idx = 0; idx < nmships; idx++, pinm++) { 302170613Sbms if (*pinm == NULL) 303170613Sbms continue; 304170613Sbms if ((ifp == NULL || ((*pinm)->inm_ifp == ifp)) && 305189592Sbms in_hosteq((*pinm)->inm_addr, gsin->sin_addr)) { 306170613Sbms break; 307170613Sbms } 308170613Sbms } 309170613Sbms if (idx >= nmships) 310170613Sbms idx = -1; 311170613Sbms 312170613Sbms return (idx); 313170613Sbms} 314170613Sbms 315170613Sbms/* 316189592Sbms * Find an IPv4 multicast source entry for this imo which matches 317170613Sbms * the given group index for this socket, and source address. 318189592Sbms * 319189592Sbms * NOTE: This does not check if the entry is in-mode, merely if 320189592Sbms * it exists, which may not be the desired behaviour. 321170613Sbms */ 322189592Sbmsstatic struct in_msource * 323189592Sbmsimo_match_source(const struct ip_moptions *imo, const size_t gidx, 324189592Sbms const struct sockaddr *src) 325170613Sbms{ 326189592Sbms struct ip_msource find; 327170613Sbms struct in_mfilter *imf; 328189592Sbms struct ip_msource *ims; 329189592Sbms const sockunion_t *psa; 330170613Sbms 331170613Sbms KASSERT(src->sa_family == AF_INET, ("%s: !AF_INET", __func__)); 332170613Sbms KASSERT(gidx != -1 && gidx < imo->imo_num_memberships, 333170613Sbms ("%s: invalid index %d\n", __func__, (int)gidx)); 334170613Sbms 335170613Sbms /* The imo_mfilters array may be lazy allocated. */ 336170613Sbms if (imo->imo_mfilters == NULL) 337170613Sbms return (NULL); 338170613Sbms imf = &imo->imo_mfilters[gidx]; 339170613Sbms 340189592Sbms /* Source trees are keyed in host byte order. */ 341189592Sbms psa = (const sockunion_t *)src; 342189592Sbms find.ims_haddr = ntohl(psa->sin.sin_addr.s_addr); 343189592Sbms ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find); 344189592Sbms 345189592Sbms return ((struct in_msource *)ims); 346170613Sbms} 347170613Sbms 348170613Sbms/* 349189592Sbms * Perform filtering for multicast datagrams on a socket by group and source. 350189592Sbms * 351189592Sbms * Returns 0 if a datagram should be allowed through, or various error codes 352189592Sbms * if the socket was not a member of the group, or the source was muted, etc. 353170613Sbms */ 354189592Sbmsint 355189592Sbmsimo_multi_filter(const struct ip_moptions *imo, const struct ifnet *ifp, 356189592Sbms const struct sockaddr *group, const struct sockaddr *src) 357170613Sbms{ 358189592Sbms size_t gidx; 359189592Sbms struct in_msource *ims; 360189592Sbms int mode; 361189592Sbms 362189592Sbms KASSERT(ifp != NULL, ("%s: null ifp", __func__)); 363189592Sbms 364189592Sbms gidx = imo_match_group(imo, ifp, group); 365189592Sbms if (gidx == -1) 366189592Sbms return (MCAST_NOTGMEMBER); 367189592Sbms 368189592Sbms /* 369189592Sbms * Check if the source was included in an (S,G) join. 370189592Sbms * Allow reception on exclusive memberships by default, 371189592Sbms * reject reception on inclusive memberships by default. 372189592Sbms * Exclude source only if an in-mode exclude filter exists. 373189592Sbms * Include source only if an in-mode include filter exists. 374189592Sbms * NOTE: We are comparing group state here at IGMP t1 (now) 375189592Sbms * with socket-layer t0 (since last downcall). 376189592Sbms */ 377189592Sbms mode = imo->imo_mfilters[gidx].imf_st[1]; 378189592Sbms ims = imo_match_source(imo, gidx, src); 379189592Sbms 380189592Sbms if ((ims == NULL && mode == MCAST_INCLUDE) || 381189592Sbms (ims != NULL && ims->imsl_st[0] != mode)) 382189592Sbms return (MCAST_NOTSMEMBER); 383189592Sbms 384189592Sbms return (MCAST_PASS); 385189592Sbms} 386189592Sbms 387189592Sbms/* 388189592Sbms * Find and return a reference to an in_multi record for (ifp, group), 389189592Sbms * and bump its reference count. 390189592Sbms * If one does not exist, try to allocate it, and update link-layer multicast 391189592Sbms * filters on ifp to listen for group. 392189592Sbms * Assumes the IN_MULTI lock is held across the call. 393189592Sbms * Return 0 if successful, otherwise return an appropriate error code. 394189592Sbms */ 395189592Sbmsstatic int 396189592Sbmsin_getmulti(struct ifnet *ifp, const struct in_addr *group, 397189592Sbms struct in_multi **pinm) 398189592Sbms{ 399189592Sbms struct sockaddr_in gsin; 400189592Sbms struct ifmultiaddr *ifma; 401189592Sbms struct in_ifinfo *ii; 402189592Sbms struct in_multi *inm; 403189592Sbms int error; 404170613Sbms 405189592Sbms IN_MULTI_LOCK_ASSERT(); 406170613Sbms 407189592Sbms ii = (struct in_ifinfo *)ifp->if_afdata[AF_INET]; 408170613Sbms 409189592Sbms inm = inm_lookup(ifp, *group); 410170613Sbms if (inm != NULL) { 411170613Sbms /* 412170613Sbms * If we already joined this group, just bump the 413170613Sbms * refcount and return it. 414170613Sbms */ 415170613Sbms KASSERT(inm->inm_refcount >= 1, 416170613Sbms ("%s: bad refcount %d", __func__, inm->inm_refcount)); 417170613Sbms ++inm->inm_refcount; 418189592Sbms *pinm = inm; 419189592Sbms return (0); 420189592Sbms } 421170613Sbms 422189592Sbms memset(&gsin, 0, sizeof(gsin)); 423189592Sbms gsin.sin_family = AF_INET; 424189592Sbms gsin.sin_len = sizeof(struct sockaddr_in); 425189592Sbms gsin.sin_addr = *group; 426170613Sbms 427189592Sbms /* 428189592Sbms * Check if a link-layer group is already associated 429189592Sbms * with this network-layer group on the given ifnet. 430189592Sbms */ 431189592Sbms error = if_addmulti(ifp, (struct sockaddr *)&gsin, &ifma); 432189592Sbms if (error != 0) 433189592Sbms return (error); 434189592Sbms 435189931Sbms /* XXX ifma_protospec must be covered by IF_ADDR_LOCK */ 436229621Sjhb IF_ADDR_WLOCK(ifp); 437189931Sbms 438189592Sbms /* 439189592Sbms * If something other than netinet is occupying the link-layer 440189592Sbms * group, print a meaningful error message and back out of 441189592Sbms * the allocation. 442189592Sbms * Otherwise, bump the refcount on the existing network-layer 443189592Sbms * group association and return it. 444189592Sbms */ 445189592Sbms if (ifma->ifma_protospec != NULL) { 446189592Sbms inm = (struct in_multi *)ifma->ifma_protospec; 447170613Sbms#ifdef INVARIANTS 448189592Sbms KASSERT(ifma->ifma_addr != NULL, ("%s: no ifma_addr", 449189592Sbms __func__)); 450189592Sbms KASSERT(ifma->ifma_addr->sa_family == AF_INET, 451189592Sbms ("%s: ifma not AF_INET", __func__)); 452189592Sbms KASSERT(inm != NULL, ("%s: no ifma_protospec", __func__)); 453189592Sbms if (inm->inm_ifma != ifma || inm->inm_ifp != ifp || 454189592Sbms !in_hosteq(inm->inm_addr, *group)) 455189592Sbms panic("%s: ifma %p is inconsistent with %p (%s)", 456189592Sbms __func__, ifma, inm, inet_ntoa(*group)); 457170613Sbms#endif 458189592Sbms ++inm->inm_refcount; 459189592Sbms *pinm = inm; 460229621Sjhb IF_ADDR_WUNLOCK(ifp); 461189592Sbms return (0); 462189592Sbms } 463189592Sbms 464229621Sjhb IF_ADDR_WLOCK_ASSERT(ifp); 465189931Sbms 466189592Sbms /* 467189592Sbms * A new in_multi record is needed; allocate and initialize it. 468189592Sbms * We DO NOT perform an IGMP join as the in_ layer may need to 469189592Sbms * push an initial source list down to IGMP to support SSM. 470189592Sbms * 471189592Sbms * The initial source filter state is INCLUDE, {} as per the RFC. 472189592Sbms */ 473189592Sbms inm = malloc(sizeof(*inm), M_IPMADDR, M_NOWAIT | M_ZERO); 474189592Sbms if (inm == NULL) { 475189592Sbms if_delmulti_ifma(ifma); 476229621Sjhb IF_ADDR_WUNLOCK(ifp); 477189592Sbms return (ENOMEM); 478189592Sbms } 479189592Sbms inm->inm_addr = *group; 480189592Sbms inm->inm_ifp = ifp; 481189592Sbms inm->inm_igi = ii->ii_igmp; 482189592Sbms inm->inm_ifma = ifma; 483189592Sbms inm->inm_refcount = 1; 484189592Sbms inm->inm_state = IGMP_NOT_MEMBER; 485189592Sbms 486189592Sbms /* 487189592Sbms * Pending state-changes per group are subject to a bounds check. 488189592Sbms */ 489189592Sbms IFQ_SET_MAXLEN(&inm->inm_scq, IGMP_MAX_STATE_CHANGES); 490189592Sbms 491189592Sbms inm->inm_st[0].iss_fmode = MCAST_UNDEFINED; 492189592Sbms inm->inm_st[1].iss_fmode = MCAST_UNDEFINED; 493189592Sbms RB_INIT(&inm->inm_srcs); 494189592Sbms 495189592Sbms ifma->ifma_protospec = inm; 496189592Sbms 497189592Sbms *pinm = inm; 498189592Sbms 499229621Sjhb IF_ADDR_WUNLOCK(ifp); 500189592Sbms return (0); 501189592Sbms} 502189592Sbms 503189592Sbms/* 504189592Sbms * Drop a reference to an in_multi record. 505189592Sbms * 506189592Sbms * If the refcount drops to 0, free the in_multi record and 507189592Sbms * delete the underlying link-layer membership. 508189592Sbms */ 509189592Sbmsvoid 510189592Sbmsinm_release_locked(struct in_multi *inm) 511189592Sbms{ 512189592Sbms struct ifmultiaddr *ifma; 513189592Sbms 514189592Sbms IN_MULTI_LOCK_ASSERT(); 515189592Sbms 516189592Sbms CTR2(KTR_IGMPV3, "%s: refcount is %d", __func__, inm->inm_refcount); 517189592Sbms 518189592Sbms if (--inm->inm_refcount > 0) { 519189592Sbms CTR2(KTR_IGMPV3, "%s: refcount is now %d", __func__, 520189592Sbms inm->inm_refcount); 521189592Sbms return; 522189592Sbms } 523189592Sbms 524189592Sbms CTR2(KTR_IGMPV3, "%s: freeing inm %p", __func__, inm); 525189592Sbms 526189592Sbms ifma = inm->inm_ifma; 527189592Sbms 528189931Sbms /* XXX this access is not covered by IF_ADDR_LOCK */ 529189592Sbms CTR2(KTR_IGMPV3, "%s: purging ifma %p", __func__, ifma); 530189592Sbms KASSERT(ifma->ifma_protospec == inm, 531189592Sbms ("%s: ifma_protospec != inm", __func__)); 532189592Sbms ifma->ifma_protospec = NULL; 533189592Sbms 534189592Sbms inm_purge(inm); 535189592Sbms 536189592Sbms free(inm, M_IPMADDR); 537189592Sbms 538189592Sbms if_delmulti_ifma(ifma); 539189592Sbms} 540189592Sbms 541189592Sbms/* 542189592Sbms * Clear recorded source entries for a group. 543189592Sbms * Used by the IGMP code. Caller must hold the IN_MULTI lock. 544189592Sbms * FIXME: Should reap. 545189592Sbms */ 546189592Sbmsvoid 547189592Sbmsinm_clear_recorded(struct in_multi *inm) 548189592Sbms{ 549189592Sbms struct ip_msource *ims; 550189592Sbms 551189592Sbms IN_MULTI_LOCK_ASSERT(); 552189592Sbms 553189592Sbms RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) { 554189592Sbms if (ims->ims_stp) { 555189592Sbms ims->ims_stp = 0; 556189592Sbms --inm->inm_st[1].iss_rec; 557170613Sbms } 558189592Sbms } 559189592Sbms KASSERT(inm->inm_st[1].iss_rec == 0, 560189592Sbms ("%s: iss_rec %d not 0", __func__, inm->inm_st[1].iss_rec)); 561189592Sbms} 562170613Sbms 563189592Sbms/* 564189592Sbms * Record a source as pending for a Source-Group IGMPv3 query. 565189592Sbms * This lives here as it modifies the shared tree. 566189592Sbms * 567189592Sbms * inm is the group descriptor. 568189592Sbms * naddr is the address of the source to record in network-byte order. 569189592Sbms * 570189592Sbms * If the net.inet.igmp.sgalloc sysctl is non-zero, we will 571189592Sbms * lazy-allocate a source node in response to an SG query. 572189592Sbms * Otherwise, no allocation is performed. This saves some memory 573189592Sbms * with the trade-off that the source will not be reported to the 574189592Sbms * router if joined in the window between the query response and 575189592Sbms * the group actually being joined on the local host. 576189592Sbms * 577189592Sbms * VIMAGE: XXX: Currently the igmp_sgalloc feature has been removed. 578189592Sbms * This turns off the allocation of a recorded source entry if 579189592Sbms * the group has not been joined. 580189592Sbms * 581189592Sbms * Return 0 if the source didn't exist or was already marked as recorded. 582189592Sbms * Return 1 if the source was marked as recorded by this function. 583189592Sbms * Return <0 if any error occured (negated errno code). 584189592Sbms */ 585189592Sbmsint 586189592Sbmsinm_record_source(struct in_multi *inm, const in_addr_t naddr) 587189592Sbms{ 588189592Sbms struct ip_msource find; 589189592Sbms struct ip_msource *ims, *nims; 590189592Sbms 591189592Sbms IN_MULTI_LOCK_ASSERT(); 592189592Sbms 593189592Sbms find.ims_haddr = ntohl(naddr); 594189592Sbms ims = RB_FIND(ip_msource_tree, &inm->inm_srcs, &find); 595189592Sbms if (ims && ims->ims_stp) 596189592Sbms return (0); 597189592Sbms if (ims == NULL) { 598189592Sbms if (inm->inm_nsrc == in_mcast_maxgrpsrc) 599189592Sbms return (-ENOSPC); 600189592Sbms nims = malloc(sizeof(struct ip_msource), M_IPMSOURCE, 601189592Sbms M_NOWAIT | M_ZERO); 602189592Sbms if (nims == NULL) 603189592Sbms return (-ENOMEM); 604189592Sbms nims->ims_haddr = find.ims_haddr; 605189592Sbms RB_INSERT(ip_msource_tree, &inm->inm_srcs, nims); 606189592Sbms ++inm->inm_nsrc; 607189592Sbms ims = nims; 608189592Sbms } 609189592Sbms 610189592Sbms /* 611189592Sbms * Mark the source as recorded and update the recorded 612189592Sbms * source count. 613189592Sbms */ 614189592Sbms ++ims->ims_stp; 615189592Sbms ++inm->inm_st[1].iss_rec; 616189592Sbms 617189592Sbms return (1); 618189592Sbms} 619189592Sbms 620189592Sbms/* 621189592Sbms * Return a pointer to an in_msource owned by an in_mfilter, 622189592Sbms * given its source address. 623189592Sbms * Lazy-allocate if needed. If this is a new entry its filter state is 624189592Sbms * undefined at t0. 625189592Sbms * 626189592Sbms * imf is the filter set being modified. 627189592Sbms * haddr is the source address in *host* byte-order. 628189592Sbms * 629189592Sbms * SMPng: May be called with locks held; malloc must not block. 630189592Sbms */ 631189592Sbmsstatic int 632189592Sbmsimf_get_source(struct in_mfilter *imf, const struct sockaddr_in *psin, 633189592Sbms struct in_msource **plims) 634189592Sbms{ 635189592Sbms struct ip_msource find; 636189592Sbms struct ip_msource *ims, *nims; 637189592Sbms struct in_msource *lims; 638189592Sbms int error; 639189592Sbms 640189592Sbms error = 0; 641189592Sbms ims = NULL; 642189592Sbms lims = NULL; 643189592Sbms 644189592Sbms /* key is host byte order */ 645189592Sbms find.ims_haddr = ntohl(psin->sin_addr.s_addr); 646189592Sbms ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find); 647189592Sbms lims = (struct in_msource *)ims; 648189592Sbms if (lims == NULL) { 649189592Sbms if (imf->imf_nsrc == in_mcast_maxsocksrc) 650189592Sbms return (ENOSPC); 651189592Sbms nims = malloc(sizeof(struct in_msource), M_INMFILTER, 652189592Sbms M_NOWAIT | M_ZERO); 653189592Sbms if (nims == NULL) 654189592Sbms return (ENOMEM); 655189592Sbms lims = (struct in_msource *)nims; 656189592Sbms lims->ims_haddr = find.ims_haddr; 657189592Sbms lims->imsl_st[0] = MCAST_UNDEFINED; 658189592Sbms RB_INSERT(ip_msource_tree, &imf->imf_sources, nims); 659189592Sbms ++imf->imf_nsrc; 660189592Sbms } 661189592Sbms 662189592Sbms *plims = lims; 663189592Sbms 664189592Sbms return (error); 665189592Sbms} 666189592Sbms 667189592Sbms/* 668189592Sbms * Graft a source entry into an existing socket-layer filter set, 669189592Sbms * maintaining any required invariants and checking allocations. 670189592Sbms * 671189592Sbms * The source is marked as being in the new filter mode at t1. 672189592Sbms * 673189592Sbms * Return the pointer to the new node, otherwise return NULL. 674189592Sbms */ 675189592Sbmsstatic struct in_msource * 676189592Sbmsimf_graft(struct in_mfilter *imf, const uint8_t st1, 677189592Sbms const struct sockaddr_in *psin) 678189592Sbms{ 679189592Sbms struct ip_msource *nims; 680189592Sbms struct in_msource *lims; 681189592Sbms 682189592Sbms nims = malloc(sizeof(struct in_msource), M_INMFILTER, 683189592Sbms M_NOWAIT | M_ZERO); 684189592Sbms if (nims == NULL) 685189592Sbms return (NULL); 686189592Sbms lims = (struct in_msource *)nims; 687189592Sbms lims->ims_haddr = ntohl(psin->sin_addr.s_addr); 688189592Sbms lims->imsl_st[0] = MCAST_UNDEFINED; 689189592Sbms lims->imsl_st[1] = st1; 690189592Sbms RB_INSERT(ip_msource_tree, &imf->imf_sources, nims); 691189592Sbms ++imf->imf_nsrc; 692189592Sbms 693189592Sbms return (lims); 694189592Sbms} 695189592Sbms 696189592Sbms/* 697189592Sbms * Prune a source entry from an existing socket-layer filter set, 698189592Sbms * maintaining any required invariants and checking allocations. 699189592Sbms * 700189592Sbms * The source is marked as being left at t1, it is not freed. 701189592Sbms * 702189592Sbms * Return 0 if no error occurred, otherwise return an errno value. 703189592Sbms */ 704189592Sbmsstatic int 705189592Sbmsimf_prune(struct in_mfilter *imf, const struct sockaddr_in *psin) 706189592Sbms{ 707189592Sbms struct ip_msource find; 708189592Sbms struct ip_msource *ims; 709189592Sbms struct in_msource *lims; 710189592Sbms 711189592Sbms /* key is host byte order */ 712189592Sbms find.ims_haddr = ntohl(psin->sin_addr.s_addr); 713189592Sbms ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find); 714189592Sbms if (ims == NULL) 715189592Sbms return (ENOENT); 716189592Sbms lims = (struct in_msource *)ims; 717189592Sbms lims->imsl_st[1] = MCAST_UNDEFINED; 718189592Sbms return (0); 719189592Sbms} 720189592Sbms 721189592Sbms/* 722189592Sbms * Revert socket-layer filter set deltas at t1 to t0 state. 723189592Sbms */ 724189592Sbmsstatic void 725189592Sbmsimf_rollback(struct in_mfilter *imf) 726189592Sbms{ 727189592Sbms struct ip_msource *ims, *tims; 728189592Sbms struct in_msource *lims; 729189592Sbms 730189592Sbms RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) { 731189592Sbms lims = (struct in_msource *)ims; 732189592Sbms if (lims->imsl_st[0] == lims->imsl_st[1]) { 733189592Sbms /* no change at t1 */ 734189592Sbms continue; 735189592Sbms } else if (lims->imsl_st[0] != MCAST_UNDEFINED) { 736189592Sbms /* revert change to existing source at t1 */ 737189592Sbms lims->imsl_st[1] = lims->imsl_st[0]; 738189592Sbms } else { 739189592Sbms /* revert source added t1 */ 740189592Sbms CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims); 741189592Sbms RB_REMOVE(ip_msource_tree, &imf->imf_sources, ims); 742189592Sbms free(ims, M_INMFILTER); 743189592Sbms imf->imf_nsrc--; 744189592Sbms } 745189592Sbms } 746189592Sbms imf->imf_st[1] = imf->imf_st[0]; 747189592Sbms} 748189592Sbms 749189592Sbms/* 750189592Sbms * Mark socket-layer filter set as INCLUDE {} at t1. 751189592Sbms */ 752189592Sbmsstatic void 753189592Sbmsimf_leave(struct in_mfilter *imf) 754189592Sbms{ 755189592Sbms struct ip_msource *ims; 756189592Sbms struct in_msource *lims; 757189592Sbms 758189592Sbms RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { 759189592Sbms lims = (struct in_msource *)ims; 760189592Sbms lims->imsl_st[1] = MCAST_UNDEFINED; 761189592Sbms } 762189592Sbms imf->imf_st[1] = MCAST_INCLUDE; 763189592Sbms} 764189592Sbms 765189592Sbms/* 766189592Sbms * Mark socket-layer filter set deltas as committed. 767189592Sbms */ 768189592Sbmsstatic void 769189592Sbmsimf_commit(struct in_mfilter *imf) 770189592Sbms{ 771189592Sbms struct ip_msource *ims; 772189592Sbms struct in_msource *lims; 773189592Sbms 774189592Sbms RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { 775189592Sbms lims = (struct in_msource *)ims; 776189592Sbms lims->imsl_st[0] = lims->imsl_st[1]; 777189592Sbms } 778189592Sbms imf->imf_st[0] = imf->imf_st[1]; 779189592Sbms} 780189592Sbms 781189592Sbms/* 782189592Sbms * Reap unreferenced sources from socket-layer filter set. 783189592Sbms */ 784189592Sbmsstatic void 785189592Sbmsimf_reap(struct in_mfilter *imf) 786189592Sbms{ 787189592Sbms struct ip_msource *ims, *tims; 788189592Sbms struct in_msource *lims; 789189592Sbms 790189592Sbms RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) { 791189592Sbms lims = (struct in_msource *)ims; 792189592Sbms if ((lims->imsl_st[0] == MCAST_UNDEFINED) && 793189592Sbms (lims->imsl_st[1] == MCAST_UNDEFINED)) { 794189592Sbms CTR2(KTR_IGMPV3, "%s: free lims %p", __func__, ims); 795189592Sbms RB_REMOVE(ip_msource_tree, &imf->imf_sources, ims); 796189592Sbms free(ims, M_INMFILTER); 797189592Sbms imf->imf_nsrc--; 798189592Sbms } 799189592Sbms } 800189592Sbms} 801189592Sbms 802189592Sbms/* 803189592Sbms * Purge socket-layer filter set. 804189592Sbms */ 805189592Sbmsstatic void 806189592Sbmsimf_purge(struct in_mfilter *imf) 807189592Sbms{ 808189592Sbms struct ip_msource *ims, *tims; 809189592Sbms 810189592Sbms RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) { 811189592Sbms CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims); 812189592Sbms RB_REMOVE(ip_msource_tree, &imf->imf_sources, ims); 813189592Sbms free(ims, M_INMFILTER); 814189592Sbms imf->imf_nsrc--; 815189592Sbms } 816189592Sbms imf->imf_st[0] = imf->imf_st[1] = MCAST_UNDEFINED; 817189592Sbms KASSERT(RB_EMPTY(&imf->imf_sources), 818189592Sbms ("%s: imf_sources not empty", __func__)); 819189592Sbms} 820189592Sbms 821189592Sbms/* 822189592Sbms * Look up a source filter entry for a multicast group. 823189592Sbms * 824189592Sbms * inm is the group descriptor to work with. 825189592Sbms * haddr is the host-byte-order IPv4 address to look up. 826189592Sbms * noalloc may be non-zero to suppress allocation of sources. 827189592Sbms * *pims will be set to the address of the retrieved or allocated source. 828189592Sbms * 829189592Sbms * SMPng: NOTE: may be called with locks held. 830189592Sbms * Return 0 if successful, otherwise return a non-zero error code. 831189592Sbms */ 832189592Sbmsstatic int 833189592Sbmsinm_get_source(struct in_multi *inm, const in_addr_t haddr, 834189592Sbms const int noalloc, struct ip_msource **pims) 835189592Sbms{ 836189592Sbms struct ip_msource find; 837189592Sbms struct ip_msource *ims, *nims; 838189592Sbms#ifdef KTR 839189592Sbms struct in_addr ia; 840189592Sbms#endif 841189592Sbms 842189592Sbms find.ims_haddr = haddr; 843189592Sbms ims = RB_FIND(ip_msource_tree, &inm->inm_srcs, &find); 844189592Sbms if (ims == NULL && !noalloc) { 845189592Sbms if (inm->inm_nsrc == in_mcast_maxgrpsrc) 846189592Sbms return (ENOSPC); 847189592Sbms nims = malloc(sizeof(struct ip_msource), M_IPMSOURCE, 848189592Sbms M_NOWAIT | M_ZERO); 849189592Sbms if (nims == NULL) 850189592Sbms return (ENOMEM); 851189592Sbms nims->ims_haddr = haddr; 852189592Sbms RB_INSERT(ip_msource_tree, &inm->inm_srcs, nims); 853189592Sbms ++inm->inm_nsrc; 854189592Sbms ims = nims; 855189592Sbms#ifdef KTR 856189592Sbms ia.s_addr = htonl(haddr); 857189592Sbms CTR3(KTR_IGMPV3, "%s: allocated %s as %p", __func__, 858189592Sbms inet_ntoa(ia), ims); 859189592Sbms#endif 860189592Sbms } 861189592Sbms 862189592Sbms *pims = ims; 863189592Sbms return (0); 864189592Sbms} 865189592Sbms 866189592Sbms/* 867189592Sbms * Merge socket-layer source into IGMP-layer source. 868189592Sbms * If rollback is non-zero, perform the inverse of the merge. 869189592Sbms */ 870189592Sbmsstatic void 871189592Sbmsims_merge(struct ip_msource *ims, const struct in_msource *lims, 872189592Sbms const int rollback) 873189592Sbms{ 874189592Sbms int n = rollback ? -1 : 1; 875189592Sbms#ifdef KTR 876189592Sbms struct in_addr ia; 877189592Sbms 878189592Sbms ia.s_addr = htonl(ims->ims_haddr); 879189592Sbms#endif 880189592Sbms 881189592Sbms if (lims->imsl_st[0] == MCAST_EXCLUDE) { 882189592Sbms CTR3(KTR_IGMPV3, "%s: t1 ex -= %d on %s", 883189592Sbms __func__, n, inet_ntoa(ia)); 884189592Sbms ims->ims_st[1].ex -= n; 885189592Sbms } else if (lims->imsl_st[0] == MCAST_INCLUDE) { 886189592Sbms CTR3(KTR_IGMPV3, "%s: t1 in -= %d on %s", 887189592Sbms __func__, n, inet_ntoa(ia)); 888189592Sbms ims->ims_st[1].in -= n; 889189592Sbms } 890189592Sbms 891189592Sbms if (lims->imsl_st[1] == MCAST_EXCLUDE) { 892189592Sbms CTR3(KTR_IGMPV3, "%s: t1 ex += %d on %s", 893189592Sbms __func__, n, inet_ntoa(ia)); 894189592Sbms ims->ims_st[1].ex += n; 895189592Sbms } else if (lims->imsl_st[1] == MCAST_INCLUDE) { 896189592Sbms CTR3(KTR_IGMPV3, "%s: t1 in += %d on %s", 897189592Sbms __func__, n, inet_ntoa(ia)); 898189592Sbms ims->ims_st[1].in += n; 899189592Sbms } 900189592Sbms} 901189592Sbms 902189592Sbms/* 903189592Sbms * Atomically update the global in_multi state, when a membership's 904189592Sbms * filter list is being updated in any way. 905189592Sbms * 906189592Sbms * imf is the per-inpcb-membership group filter pointer. 907189592Sbms * A fake imf may be passed for in-kernel consumers. 908189592Sbms * 909189592Sbms * XXX This is a candidate for a set-symmetric-difference style loop 910189592Sbms * which would eliminate the repeated lookup from root of ims nodes, 911189592Sbms * as they share the same key space. 912189592Sbms * 913189592Sbms * If any error occurred this function will back out of refcounts 914189592Sbms * and return a non-zero value. 915189592Sbms */ 916189592Sbmsstatic int 917189592Sbmsinm_merge(struct in_multi *inm, /*const*/ struct in_mfilter *imf) 918189592Sbms{ 919189592Sbms struct ip_msource *ims, *nims; 920189592Sbms struct in_msource *lims; 921189592Sbms int schanged, error; 922189592Sbms int nsrc0, nsrc1; 923189592Sbms 924189592Sbms schanged = 0; 925189592Sbms error = 0; 926189592Sbms nsrc1 = nsrc0 = 0; 927189592Sbms 928189592Sbms /* 929189592Sbms * Update the source filters first, as this may fail. 930189592Sbms * Maintain count of in-mode filters at t0, t1. These are 931189592Sbms * used to work out if we transition into ASM mode or not. 932189592Sbms * Maintain a count of source filters whose state was 933189592Sbms * actually modified by this operation. 934189592Sbms */ 935189592Sbms RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { 936189592Sbms lims = (struct in_msource *)ims; 937189592Sbms if (lims->imsl_st[0] == imf->imf_st[0]) nsrc0++; 938189592Sbms if (lims->imsl_st[1] == imf->imf_st[1]) nsrc1++; 939189592Sbms if (lims->imsl_st[0] == lims->imsl_st[1]) continue; 940189592Sbms error = inm_get_source(inm, lims->ims_haddr, 0, &nims); 941189592Sbms ++schanged; 942189592Sbms if (error) 943170613Sbms break; 944189592Sbms ims_merge(nims, lims, 0); 945189592Sbms } 946189592Sbms if (error) { 947189592Sbms struct ip_msource *bims; 948189592Sbms 949189592Sbms RB_FOREACH_REVERSE_FROM(ims, ip_msource_tree, nims) { 950189592Sbms lims = (struct in_msource *)ims; 951189592Sbms if (lims->imsl_st[0] == lims->imsl_st[1]) 952189592Sbms continue; 953189592Sbms (void)inm_get_source(inm, lims->ims_haddr, 1, &bims); 954189592Sbms if (bims == NULL) 955189592Sbms continue; 956189592Sbms ims_merge(bims, lims, 1); 957170613Sbms } 958189592Sbms goto out_reap; 959189592Sbms } 960170613Sbms 961189592Sbms CTR3(KTR_IGMPV3, "%s: imf filters in-mode: %d at t0, %d at t1", 962189592Sbms __func__, nsrc0, nsrc1); 963170613Sbms 964189592Sbms /* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */ 965189592Sbms if (imf->imf_st[0] == imf->imf_st[1] && 966189592Sbms imf->imf_st[1] == MCAST_INCLUDE) { 967189592Sbms if (nsrc1 == 0) { 968189592Sbms CTR1(KTR_IGMPV3, "%s: --in on inm at t1", __func__); 969189592Sbms --inm->inm_st[1].iss_in; 970189592Sbms } 971189592Sbms } 972170613Sbms 973189592Sbms /* Handle filter mode transition on socket. */ 974189592Sbms if (imf->imf_st[0] != imf->imf_st[1]) { 975189592Sbms CTR3(KTR_IGMPV3, "%s: imf transition %d to %d", 976189592Sbms __func__, imf->imf_st[0], imf->imf_st[1]); 977189592Sbms 978189592Sbms if (imf->imf_st[0] == MCAST_EXCLUDE) { 979189592Sbms CTR1(KTR_IGMPV3, "%s: --ex on inm at t1", __func__); 980189592Sbms --inm->inm_st[1].iss_ex; 981189592Sbms } else if (imf->imf_st[0] == MCAST_INCLUDE) { 982189592Sbms CTR1(KTR_IGMPV3, "%s: --in on inm at t1", __func__); 983189592Sbms --inm->inm_st[1].iss_in; 984189592Sbms } 985189592Sbms 986189592Sbms if (imf->imf_st[1] == MCAST_EXCLUDE) { 987189592Sbms CTR1(KTR_IGMPV3, "%s: ex++ on inm at t1", __func__); 988189592Sbms inm->inm_st[1].iss_ex++; 989189592Sbms } else if (imf->imf_st[1] == MCAST_INCLUDE && nsrc1 > 0) { 990189592Sbms CTR1(KTR_IGMPV3, "%s: in++ on inm at t1", __func__); 991189592Sbms inm->inm_st[1].iss_in++; 992189592Sbms } 993189592Sbms } 994189592Sbms 995189592Sbms /* 996189592Sbms * Track inm filter state in terms of listener counts. 997189592Sbms * If there are any exclusive listeners, stack-wide 998189592Sbms * membership is exclusive. 999189592Sbms * Otherwise, if only inclusive listeners, stack-wide is inclusive. 1000189592Sbms * If no listeners remain, state is undefined at t1, 1001189592Sbms * and the IGMP lifecycle for this group should finish. 1002189592Sbms */ 1003189592Sbms if (inm->inm_st[1].iss_ex > 0) { 1004189592Sbms CTR1(KTR_IGMPV3, "%s: transition to EX", __func__); 1005189592Sbms inm->inm_st[1].iss_fmode = MCAST_EXCLUDE; 1006189592Sbms } else if (inm->inm_st[1].iss_in > 0) { 1007189592Sbms CTR1(KTR_IGMPV3, "%s: transition to IN", __func__); 1008189592Sbms inm->inm_st[1].iss_fmode = MCAST_INCLUDE; 1009189592Sbms } else { 1010189592Sbms CTR1(KTR_IGMPV3, "%s: transition to UNDEF", __func__); 1011189592Sbms inm->inm_st[1].iss_fmode = MCAST_UNDEFINED; 1012189592Sbms } 1013189592Sbms 1014189592Sbms /* Decrement ASM listener count on transition out of ASM mode. */ 1015189592Sbms if (imf->imf_st[0] == MCAST_EXCLUDE && nsrc0 == 0) { 1016189592Sbms if ((imf->imf_st[1] != MCAST_EXCLUDE) || 1017189592Sbms (imf->imf_st[1] == MCAST_EXCLUDE && nsrc1 > 0)) 1018189592Sbms CTR1(KTR_IGMPV3, "%s: --asm on inm at t1", __func__); 1019189592Sbms --inm->inm_st[1].iss_asm; 1020189592Sbms } 1021189592Sbms 1022189592Sbms /* Increment ASM listener count on transition to ASM mode. */ 1023189592Sbms if (imf->imf_st[1] == MCAST_EXCLUDE && nsrc1 == 0) { 1024189592Sbms CTR1(KTR_IGMPV3, "%s: asm++ on inm at t1", __func__); 1025189592Sbms inm->inm_st[1].iss_asm++; 1026189592Sbms } 1027189592Sbms 1028189592Sbms CTR3(KTR_IGMPV3, "%s: merged imf %p to inm %p", __func__, imf, inm); 1029189592Sbms inm_print(inm); 1030189592Sbms 1031189592Sbmsout_reap: 1032189592Sbms if (schanged > 0) { 1033189592Sbms CTR1(KTR_IGMPV3, "%s: sources changed; reaping", __func__); 1034189592Sbms inm_reap(inm); 1035189592Sbms } 1036189592Sbms return (error); 1037189592Sbms} 1038189592Sbms 1039189592Sbms/* 1040189592Sbms * Mark an in_multi's filter set deltas as committed. 1041189592Sbms * Called by IGMP after a state change has been enqueued. 1042189592Sbms */ 1043189592Sbmsvoid 1044189592Sbmsinm_commit(struct in_multi *inm) 1045189592Sbms{ 1046189592Sbms struct ip_msource *ims; 1047189592Sbms 1048189592Sbms CTR2(KTR_IGMPV3, "%s: commit inm %p", __func__, inm); 1049189592Sbms CTR1(KTR_IGMPV3, "%s: pre commit:", __func__); 1050189592Sbms inm_print(inm); 1051189592Sbms 1052189592Sbms RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) { 1053189592Sbms ims->ims_st[0] = ims->ims_st[1]; 1054189592Sbms } 1055189592Sbms inm->inm_st[0] = inm->inm_st[1]; 1056189592Sbms} 1057189592Sbms 1058189592Sbms/* 1059189592Sbms * Reap unreferenced nodes from an in_multi's filter set. 1060189592Sbms */ 1061189592Sbmsstatic void 1062189592Sbmsinm_reap(struct in_multi *inm) 1063189592Sbms{ 1064189592Sbms struct ip_msource *ims, *tims; 1065189592Sbms 1066189592Sbms RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, tims) { 1067189592Sbms if (ims->ims_st[0].ex > 0 || ims->ims_st[0].in > 0 || 1068189592Sbms ims->ims_st[1].ex > 0 || ims->ims_st[1].in > 0 || 1069189592Sbms ims->ims_stp != 0) 1070189592Sbms continue; 1071189592Sbms CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims); 1072189592Sbms RB_REMOVE(ip_msource_tree, &inm->inm_srcs, ims); 1073189592Sbms free(ims, M_IPMSOURCE); 1074189592Sbms inm->inm_nsrc--; 1075189592Sbms } 1076189592Sbms} 1077189592Sbms 1078189592Sbms/* 1079189592Sbms * Purge all source nodes from an in_multi's filter set. 1080189592Sbms */ 1081189592Sbmsstatic void 1082189592Sbmsinm_purge(struct in_multi *inm) 1083189592Sbms{ 1084189592Sbms struct ip_msource *ims, *tims; 1085189592Sbms 1086189592Sbms RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, tims) { 1087189592Sbms CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims); 1088189592Sbms RB_REMOVE(ip_msource_tree, &inm->inm_srcs, ims); 1089189592Sbms free(ims, M_IPMSOURCE); 1090189592Sbms inm->inm_nsrc--; 1091189592Sbms } 1092189592Sbms} 1093189592Sbms 1094189592Sbms/* 1095189592Sbms * Join a multicast group; unlocked entry point. 1096189592Sbms * 1097189592Sbms * SMPng: XXX: in_joingroup() is called from in_control() when Giant 1098189592Sbms * is not held. Fortunately, ifp is unlikely to have been detached 1099189592Sbms * at this point, so we assume it's OK to recurse. 1100189592Sbms */ 1101189592Sbmsint 1102189592Sbmsin_joingroup(struct ifnet *ifp, const struct in_addr *gina, 1103189592Sbms /*const*/ struct in_mfilter *imf, struct in_multi **pinm) 1104189592Sbms{ 1105189592Sbms int error; 1106189592Sbms 1107189592Sbms IN_MULTI_LOCK(); 1108189592Sbms error = in_joingroup_locked(ifp, gina, imf, pinm); 1109170613Sbms IN_MULTI_UNLOCK(); 1110170613Sbms 1111189592Sbms return (error); 1112170613Sbms} 1113170613Sbms 1114170613Sbms/* 1115189592Sbms * Join a multicast group; real entry point. 1116170613Sbms * 1117189592Sbms * Only preserves atomicity at inm level. 1118189592Sbms * NOTE: imf argument cannot be const due to sys/tree.h limitations. 1119170613Sbms * 1120189592Sbms * If the IGMP downcall fails, the group is not joined, and an error 1121189592Sbms * code is returned. 1122170613Sbms */ 1123189592Sbmsint 1124189592Sbmsin_joingroup_locked(struct ifnet *ifp, const struct in_addr *gina, 1125189592Sbms /*const*/ struct in_mfilter *imf, struct in_multi **pinm) 1126170613Sbms{ 1127189592Sbms struct in_mfilter timf; 1128189592Sbms struct in_multi *inm; 1129189592Sbms int error; 1130170613Sbms 1131189592Sbms IN_MULTI_LOCK_ASSERT(); 1132170613Sbms 1133189592Sbms CTR4(KTR_IGMPV3, "%s: join %s on %p(%s))", __func__, 1134189592Sbms inet_ntoa(*gina), ifp, ifp->if_xname); 1135189592Sbms 1136189592Sbms error = 0; 1137189592Sbms inm = NULL; 1138189592Sbms 1139189592Sbms /* 1140189592Sbms * If no imf was specified (i.e. kernel consumer), 1141189592Sbms * fake one up and assume it is an ASM join. 1142189592Sbms */ 1143189592Sbms if (imf == NULL) { 1144189592Sbms imf_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE); 1145189592Sbms imf = &timf; 1146170613Sbms } 1147170613Sbms 1148189592Sbms error = in_getmulti(ifp, gina, &inm); 1149189592Sbms if (error) { 1150189592Sbms CTR1(KTR_IGMPV3, "%s: in_getmulti() failure", __func__); 1151189592Sbms return (error); 1152189592Sbms } 1153189592Sbms 1154189592Sbms CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); 1155189592Sbms error = inm_merge(inm, imf); 1156189592Sbms if (error) { 1157189592Sbms CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__); 1158189592Sbms goto out_inm_release; 1159189592Sbms } 1160189592Sbms 1161189592Sbms CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); 1162189592Sbms error = igmp_change_state(inm); 1163189592Sbms if (error) { 1164189592Sbms CTR1(KTR_IGMPV3, "%s: failed to update source", __func__); 1165189592Sbms goto out_inm_release; 1166189592Sbms } 1167189592Sbms 1168189592Sbmsout_inm_release: 1169189592Sbms if (error) { 1170189592Sbms CTR2(KTR_IGMPV3, "%s: dropping ref on %p", __func__, inm); 1171189592Sbms inm_release_locked(inm); 1172189592Sbms } else { 1173189592Sbms *pinm = inm; 1174189592Sbms } 1175189592Sbms 1176189592Sbms return (error); 1177189592Sbms} 1178189592Sbms 1179189592Sbms/* 1180189592Sbms * Leave a multicast group; unlocked entry point. 1181189592Sbms */ 1182189592Sbmsint 1183189592Sbmsin_leavegroup(struct in_multi *inm, /*const*/ struct in_mfilter *imf) 1184189592Sbms{ 1185189851Srwatson int error; 1186189592Sbms 1187170613Sbms IN_MULTI_LOCK(); 1188189592Sbms error = in_leavegroup_locked(inm, imf); 1189170613Sbms IN_MULTI_UNLOCK(); 1190170613Sbms 1191189592Sbms return (error); 1192170613Sbms} 1193170613Sbms 1194170613Sbms/* 1195189592Sbms * Leave a multicast group; real entry point. 1196189592Sbms * All source filters will be expunged. 1197170613Sbms * 1198189592Sbms * Only preserves atomicity at inm level. 1199189592Sbms * 1200189592Sbms * Holding the write lock for the INP which contains imf 1201189592Sbms * is highly advisable. We can't assert for it as imf does not 1202189592Sbms * contain a back-pointer to the owning inp. 1203189592Sbms * 1204189592Sbms * Note: This is not the same as inm_release(*) as this function also 1205189592Sbms * makes a state change downcall into IGMP. 1206170613Sbms */ 1207189592Sbmsint 1208189592Sbmsin_leavegroup_locked(struct in_multi *inm, /*const*/ struct in_mfilter *imf) 1209170613Sbms{ 1210189592Sbms struct in_mfilter timf; 1211189592Sbms int error; 1212170613Sbms 1213189592Sbms error = 0; 1214189592Sbms 1215170613Sbms IN_MULTI_LOCK_ASSERT(); 1216170613Sbms 1217189592Sbms CTR5(KTR_IGMPV3, "%s: leave inm %p, %s/%s, imf %p", __func__, 1218189592Sbms inm, inet_ntoa(inm->inm_addr), 1219189592Sbms (inm_is_ifp_detached(inm) ? "null" : inm->inm_ifp->if_xname), 1220189592Sbms imf); 1221170613Sbms 1222189592Sbms /* 1223189592Sbms * If no imf was specified (i.e. kernel consumer), 1224189592Sbms * fake one up and assume it is an ASM join. 1225189592Sbms */ 1226189592Sbms if (imf == NULL) { 1227189592Sbms imf_init(&timf, MCAST_EXCLUDE, MCAST_UNDEFINED); 1228189592Sbms imf = &timf; 1229189592Sbms } 1230170613Sbms 1231189592Sbms /* 1232189592Sbms * Begin state merge transaction at IGMP layer. 1233189592Sbms * 1234189592Sbms * As this particular invocation should not cause any memory 1235189592Sbms * to be allocated, and there is no opportunity to roll back 1236189592Sbms * the transaction, it MUST NOT fail. 1237189592Sbms */ 1238189592Sbms CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); 1239189592Sbms error = inm_merge(inm, imf); 1240189592Sbms KASSERT(error == 0, ("%s: failed to merge inm state", __func__)); 1241170613Sbms 1242189592Sbms CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); 1243252510Shrs CURVNET_SET(inm->inm_ifp->if_vnet); 1244189592Sbms error = igmp_change_state(inm); 1245252510Shrs CURVNET_RESTORE(); 1246189592Sbms if (error) 1247189592Sbms CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__); 1248189592Sbms 1249189592Sbms CTR2(KTR_IGMPV3, "%s: dropping ref on %p", __func__, inm); 1250189592Sbms inm_release_locked(inm); 1251189592Sbms 1252189592Sbms return (error); 1253170613Sbms} 1254170613Sbms 1255189592Sbms/*#ifndef BURN_BRIDGES*/ 1256170613Sbms/* 1257189592Sbms * Join an IPv4 multicast group in (*,G) exclusive mode. 1258189592Sbms * The group must be a 224.0.0.0/24 link-scope group. 1259189592Sbms * This KPI is for legacy kernel consumers only. 1260170613Sbms */ 1261189592Sbmsstruct in_multi * 1262189592Sbmsin_addmulti(struct in_addr *ap, struct ifnet *ifp) 1263189592Sbms{ 1264189592Sbms struct in_multi *pinm; 1265189592Sbms int error; 1266189592Sbms 1267189592Sbms KASSERT(IN_LOCAL_GROUP(ntohl(ap->s_addr)), 1268189592Sbms ("%s: %s not in 224.0.0.0/24", __func__, inet_ntoa(*ap))); 1269189592Sbms 1270189592Sbms error = in_joingroup(ifp, ap, NULL, &pinm); 1271189592Sbms if (error != 0) 1272189592Sbms pinm = NULL; 1273189592Sbms 1274189592Sbms return (pinm); 1275189592Sbms} 1276189592Sbms 1277189592Sbms/* 1278189592Sbms * Leave an IPv4 multicast group, assumed to be in exclusive (*,G) mode. 1279189592Sbms * This KPI is for legacy kernel consumers only. 1280189592Sbms */ 1281189592Sbmsvoid 1282189592Sbmsin_delmulti(struct in_multi *inm) 1283189592Sbms{ 1284189592Sbms 1285189592Sbms (void)in_leavegroup(inm, NULL); 1286189592Sbms} 1287189592Sbms/*#endif*/ 1288189592Sbms 1289189592Sbms/* 1290189592Sbms * Block or unblock an ASM multicast source on an inpcb. 1291189592Sbms * This implements the delta-based API described in RFC 3678. 1292189592Sbms * 1293189592Sbms * The delta-based API applies only to exclusive-mode memberships. 1294189592Sbms * An IGMP downcall will be performed. 1295189592Sbms * 1296189592Sbms * SMPng: NOTE: Must take Giant as a join may create a new ifma. 1297189592Sbms * 1298189592Sbms * Return 0 if successful, otherwise return an appropriate error code. 1299189592Sbms */ 1300170613Sbmsstatic int 1301189592Sbmsinp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) 1302170613Sbms{ 1303170613Sbms struct group_source_req gsr; 1304170613Sbms sockunion_t *gsa, *ssa; 1305170613Sbms struct ifnet *ifp; 1306170613Sbms struct in_mfilter *imf; 1307170613Sbms struct ip_moptions *imo; 1308170613Sbms struct in_msource *ims; 1309189592Sbms struct in_multi *inm; 1310170613Sbms size_t idx; 1311189592Sbms uint16_t fmode; 1312189592Sbms int error, doblock; 1313170613Sbms 1314170613Sbms ifp = NULL; 1315170613Sbms error = 0; 1316189592Sbms doblock = 0; 1317170613Sbms 1318170613Sbms memset(&gsr, 0, sizeof(struct group_source_req)); 1319170613Sbms gsa = (sockunion_t *)&gsr.gsr_group; 1320170613Sbms ssa = (sockunion_t *)&gsr.gsr_source; 1321170613Sbms 1322170613Sbms switch (sopt->sopt_name) { 1323170613Sbms case IP_BLOCK_SOURCE: 1324170613Sbms case IP_UNBLOCK_SOURCE: { 1325170613Sbms struct ip_mreq_source mreqs; 1326170613Sbms 1327170613Sbms error = sooptcopyin(sopt, &mreqs, 1328170613Sbms sizeof(struct ip_mreq_source), 1329170613Sbms sizeof(struct ip_mreq_source)); 1330170613Sbms if (error) 1331170613Sbms return (error); 1332170613Sbms 1333170613Sbms gsa->sin.sin_family = AF_INET; 1334170613Sbms gsa->sin.sin_len = sizeof(struct sockaddr_in); 1335170613Sbms gsa->sin.sin_addr = mreqs.imr_multiaddr; 1336170613Sbms 1337170613Sbms ssa->sin.sin_family = AF_INET; 1338170613Sbms ssa->sin.sin_len = sizeof(struct sockaddr_in); 1339170613Sbms ssa->sin.sin_addr = mreqs.imr_sourceaddr; 1340170613Sbms 1341189592Sbms if (!in_nullhost(mreqs.imr_interface)) 1342170613Sbms INADDR_TO_IFP(mreqs.imr_interface, ifp); 1343170613Sbms 1344170613Sbms if (sopt->sopt_name == IP_BLOCK_SOURCE) 1345189592Sbms doblock = 1; 1346170613Sbms 1347189592Sbms CTR3(KTR_IGMPV3, "%s: imr_interface = %s, ifp = %p", 1348189592Sbms __func__, inet_ntoa(mreqs.imr_interface), ifp); 1349170613Sbms break; 1350170613Sbms } 1351170613Sbms 1352170613Sbms case MCAST_BLOCK_SOURCE: 1353170613Sbms case MCAST_UNBLOCK_SOURCE: 1354170613Sbms error = sooptcopyin(sopt, &gsr, 1355170613Sbms sizeof(struct group_source_req), 1356170613Sbms sizeof(struct group_source_req)); 1357170613Sbms if (error) 1358170613Sbms return (error); 1359170613Sbms 1360170613Sbms if (gsa->sin.sin_family != AF_INET || 1361170613Sbms gsa->sin.sin_len != sizeof(struct sockaddr_in)) 1362170613Sbms return (EINVAL); 1363170613Sbms 1364170613Sbms if (ssa->sin.sin_family != AF_INET || 1365170613Sbms ssa->sin.sin_len != sizeof(struct sockaddr_in)) 1366170613Sbms return (EINVAL); 1367170613Sbms 1368181803Sbz if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 1369170613Sbms return (EADDRNOTAVAIL); 1370170613Sbms 1371170613Sbms ifp = ifnet_byindex(gsr.gsr_interface); 1372170613Sbms 1373170613Sbms if (sopt->sopt_name == MCAST_BLOCK_SOURCE) 1374189592Sbms doblock = 1; 1375170613Sbms break; 1376170613Sbms 1377170613Sbms default: 1378189592Sbms CTR2(KTR_IGMPV3, "%s: unknown sopt_name %d", 1379189592Sbms __func__, sopt->sopt_name); 1380170613Sbms return (EOPNOTSUPP); 1381170613Sbms break; 1382170613Sbms } 1383170613Sbms 1384170613Sbms if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr))) 1385170613Sbms return (EINVAL); 1386170613Sbms 1387170613Sbms /* 1388170613Sbms * Check if we are actually a member of this group. 1389170613Sbms */ 1390170613Sbms imo = inp_findmoptions(inp); 1391170613Sbms idx = imo_match_group(imo, ifp, &gsa->sa); 1392170613Sbms if (idx == -1 || imo->imo_mfilters == NULL) { 1393170613Sbms error = EADDRNOTAVAIL; 1394189592Sbms goto out_inp_locked; 1395170613Sbms } 1396170613Sbms 1397170613Sbms KASSERT(imo->imo_mfilters != NULL, 1398170613Sbms ("%s: imo_mfilters not allocated", __func__)); 1399170613Sbms imf = &imo->imo_mfilters[idx]; 1400189592Sbms inm = imo->imo_membership[idx]; 1401170613Sbms 1402170613Sbms /* 1403189592Sbms * Attempting to use the delta-based API on an 1404189592Sbms * non exclusive-mode membership is an error. 1405170613Sbms */ 1406189592Sbms fmode = imf->imf_st[0]; 1407189592Sbms if (fmode != MCAST_EXCLUDE) { 1408189592Sbms error = EINVAL; 1409189592Sbms goto out_inp_locked; 1410170613Sbms } 1411189592Sbms 1412189592Sbms /* 1413189592Sbms * Deal with error cases up-front: 1414189592Sbms * Asked to block, but already blocked; or 1415189592Sbms * Asked to unblock, but nothing to unblock. 1416189592Sbms * If adding a new block entry, allocate it. 1417189592Sbms */ 1418170613Sbms ims = imo_match_source(imo, idx, &ssa->sa); 1419189592Sbms if ((ims != NULL && doblock) || (ims == NULL && !doblock)) { 1420189592Sbms CTR3(KTR_IGMPV3, "%s: source %s %spresent", __func__, 1421189592Sbms inet_ntoa(ssa->sin.sin_addr), doblock ? "" : "not "); 1422189592Sbms error = EADDRNOTAVAIL; 1423189592Sbms goto out_inp_locked; 1424189592Sbms } 1425189592Sbms 1426189592Sbms INP_WLOCK_ASSERT(inp); 1427189592Sbms 1428189592Sbms /* 1429189592Sbms * Begin state merge transaction at socket layer. 1430189592Sbms */ 1431189592Sbms if (doblock) { 1432189592Sbms CTR2(KTR_IGMPV3, "%s: %s source", __func__, "block"); 1433189592Sbms ims = imf_graft(imf, fmode, &ssa->sin); 1434189592Sbms if (ims == NULL) 1435189592Sbms error = ENOMEM; 1436170613Sbms } else { 1437189592Sbms CTR2(KTR_IGMPV3, "%s: %s source", __func__, "allow"); 1438189592Sbms error = imf_prune(imf, &ssa->sin); 1439170613Sbms } 1440170613Sbms 1441189592Sbms if (error) { 1442189592Sbms CTR1(KTR_IGMPV3, "%s: merge imf state failed", __func__); 1443189592Sbms goto out_imf_rollback; 1444189592Sbms } 1445189592Sbms 1446189592Sbms /* 1447189592Sbms * Begin state merge transaction at IGMP layer. 1448189592Sbms */ 1449189592Sbms IN_MULTI_LOCK(); 1450189592Sbms 1451189592Sbms CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); 1452189592Sbms error = inm_merge(inm, imf); 1453189592Sbms if (error) { 1454189592Sbms CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__); 1455261425Sgnn goto out_in_multi_locked; 1456189592Sbms } 1457189592Sbms 1458189592Sbms CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); 1459189592Sbms error = igmp_change_state(inm); 1460189592Sbms if (error) 1461189592Sbms CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__); 1462189592Sbms 1463261425Sgnnout_in_multi_locked: 1464261425Sgnn 1465189592Sbms IN_MULTI_UNLOCK(); 1466189592Sbms 1467189592Sbmsout_imf_rollback: 1468189592Sbms if (error) 1469189592Sbms imf_rollback(imf); 1470189592Sbms else 1471189592Sbms imf_commit(imf); 1472189592Sbms 1473189592Sbms imf_reap(imf); 1474189592Sbms 1475189592Sbmsout_inp_locked: 1476178285Srwatson INP_WUNLOCK(inp); 1477170613Sbms return (error); 1478170613Sbms} 1479170613Sbms 1480170613Sbms/* 1481170613Sbms * Given an inpcb, return its multicast options structure pointer. Accepts 1482170613Sbms * an unlocked inpcb pointer, but will return it locked. May sleep. 1483189592Sbms * 1484189592Sbms * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. 1485189592Sbms * SMPng: NOTE: Returns with the INP write lock held. 1486170613Sbms */ 1487170613Sbmsstatic struct ip_moptions * 1488170613Sbmsinp_findmoptions(struct inpcb *inp) 1489170613Sbms{ 1490170613Sbms struct ip_moptions *imo; 1491170613Sbms struct in_multi **immp; 1492170613Sbms struct in_mfilter *imfp; 1493170613Sbms size_t idx; 1494170613Sbms 1495178285Srwatson INP_WLOCK(inp); 1496170613Sbms if (inp->inp_moptions != NULL) 1497170613Sbms return (inp->inp_moptions); 1498170613Sbms 1499178285Srwatson INP_WUNLOCK(inp); 1500170613Sbms 1501189592Sbms imo = malloc(sizeof(*imo), M_IPMOPTS, M_WAITOK); 1502189592Sbms immp = malloc(sizeof(*immp) * IP_MIN_MEMBERSHIPS, M_IPMOPTS, 1503189592Sbms M_WAITOK | M_ZERO); 1504189592Sbms imfp = malloc(sizeof(struct in_mfilter) * IP_MIN_MEMBERSHIPS, 1505189592Sbms M_INMFILTER, M_WAITOK); 1506170613Sbms 1507170613Sbms imo->imo_multicast_ifp = NULL; 1508170613Sbms imo->imo_multicast_addr.s_addr = INADDR_ANY; 1509170613Sbms imo->imo_multicast_vif = -1; 1510170613Sbms imo->imo_multicast_ttl = IP_DEFAULT_MULTICAST_TTL; 1511189357Sbms imo->imo_multicast_loop = in_mcast_loop; 1512170613Sbms imo->imo_num_memberships = 0; 1513170613Sbms imo->imo_max_memberships = IP_MIN_MEMBERSHIPS; 1514170613Sbms imo->imo_membership = immp; 1515170613Sbms 1516170613Sbms /* Initialize per-group source filters. */ 1517189592Sbms for (idx = 0; idx < IP_MIN_MEMBERSHIPS; idx++) 1518189592Sbms imf_init(&imfp[idx], MCAST_UNDEFINED, MCAST_EXCLUDE); 1519170613Sbms imo->imo_mfilters = imfp; 1520170613Sbms 1521178285Srwatson INP_WLOCK(inp); 1522170613Sbms if (inp->inp_moptions != NULL) { 1523189592Sbms free(imfp, M_INMFILTER); 1524170613Sbms free(immp, M_IPMOPTS); 1525170613Sbms free(imo, M_IPMOPTS); 1526170613Sbms return (inp->inp_moptions); 1527170613Sbms } 1528170613Sbms inp->inp_moptions = imo; 1529170613Sbms return (imo); 1530170613Sbms} 1531170613Sbms 1532170613Sbms/* 1533228969Sjhb * Discard the IP multicast options (and source filters). To minimize 1534228969Sjhb * the amount of work done while holding locks such as the INP's 1535228969Sjhb * pcbinfo lock (which is used in the receive path), the free 1536228969Sjhb * operation is performed asynchronously in a separate task. 1537189592Sbms * 1538189592Sbms * SMPng: NOTE: assumes INP write lock is held. 1539170613Sbms */ 1540170613Sbmsvoid 1541170613Sbmsinp_freemoptions(struct ip_moptions *imo) 1542170613Sbms{ 1543228969Sjhb 1544228969Sjhb KASSERT(imo != NULL, ("%s: ip_moptions is NULL", __func__)); 1545228969Sjhb IN_MULTI_LOCK(); 1546228969Sjhb STAILQ_INSERT_TAIL(&imo_gc_list, imo, imo_link); 1547228969Sjhb IN_MULTI_UNLOCK(); 1548228969Sjhb taskqueue_enqueue(taskqueue_thread, &imo_gc_task); 1549228969Sjhb} 1550228969Sjhb 1551228969Sjhbstatic void 1552228969Sjhbinp_freemoptions_internal(struct ip_moptions *imo) 1553228969Sjhb{ 1554170613Sbms struct in_mfilter *imf; 1555170613Sbms size_t idx, nmships; 1556170613Sbms 1557170613Sbms nmships = imo->imo_num_memberships; 1558170613Sbms for (idx = 0; idx < nmships; ++idx) { 1559189592Sbms imf = imo->imo_mfilters ? &imo->imo_mfilters[idx] : NULL; 1560189592Sbms if (imf) 1561189592Sbms imf_leave(imf); 1562189592Sbms (void)in_leavegroup(imo->imo_membership[idx], imf); 1563189592Sbms if (imf) 1564189592Sbms imf_purge(imf); 1565170613Sbms } 1566170613Sbms 1567189592Sbms if (imo->imo_mfilters) 1568189592Sbms free(imo->imo_mfilters, M_INMFILTER); 1569170613Sbms free(imo->imo_membership, M_IPMOPTS); 1570170613Sbms free(imo, M_IPMOPTS); 1571170613Sbms} 1572170613Sbms 1573228969Sjhbstatic void 1574228969Sjhbinp_gcmoptions(void *context, int pending) 1575228969Sjhb{ 1576228969Sjhb struct ip_moptions *imo; 1577228969Sjhb 1578228969Sjhb IN_MULTI_LOCK(); 1579228969Sjhb while (!STAILQ_EMPTY(&imo_gc_list)) { 1580228969Sjhb imo = STAILQ_FIRST(&imo_gc_list); 1581228969Sjhb STAILQ_REMOVE_HEAD(&imo_gc_list, imo_link); 1582228969Sjhb IN_MULTI_UNLOCK(); 1583228969Sjhb inp_freemoptions_internal(imo); 1584228969Sjhb IN_MULTI_LOCK(); 1585228969Sjhb } 1586228969Sjhb IN_MULTI_UNLOCK(); 1587228969Sjhb} 1588228969Sjhb 1589170613Sbms/* 1590170613Sbms * Atomically get source filters on a socket for an IPv4 multicast group. 1591170613Sbms * Called with INP lock held; returns with lock released. 1592170613Sbms */ 1593170613Sbmsstatic int 1594170613Sbmsinp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) 1595170613Sbms{ 1596170613Sbms struct __msfilterreq msfr; 1597170613Sbms sockunion_t *gsa; 1598170613Sbms struct ifnet *ifp; 1599170613Sbms struct ip_moptions *imo; 1600170613Sbms struct in_mfilter *imf; 1601189592Sbms struct ip_msource *ims; 1602189592Sbms struct in_msource *lims; 1603189592Sbms struct sockaddr_in *psin; 1604170613Sbms struct sockaddr_storage *ptss; 1605170613Sbms struct sockaddr_storage *tss; 1606170613Sbms int error; 1607189592Sbms size_t idx, nsrcs, ncsrcs; 1608170613Sbms 1609178285Srwatson INP_WLOCK_ASSERT(inp); 1610170613Sbms 1611170613Sbms imo = inp->inp_moptions; 1612170613Sbms KASSERT(imo != NULL, ("%s: null ip_moptions", __func__)); 1613170613Sbms 1614178285Srwatson INP_WUNLOCK(inp); 1615170613Sbms 1616170613Sbms error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 1617170613Sbms sizeof(struct __msfilterreq)); 1618170613Sbms if (error) 1619170613Sbms return (error); 1620170613Sbms 1621181803Sbz if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) 1622170613Sbms return (EINVAL); 1623170613Sbms 1624170613Sbms ifp = ifnet_byindex(msfr.msfr_ifindex); 1625170613Sbms if (ifp == NULL) 1626170613Sbms return (EINVAL); 1627170613Sbms 1628178285Srwatson INP_WLOCK(inp); 1629170613Sbms 1630170613Sbms /* 1631170613Sbms * Lookup group on the socket. 1632170613Sbms */ 1633170613Sbms gsa = (sockunion_t *)&msfr.msfr_group; 1634170613Sbms idx = imo_match_group(imo, ifp, &gsa->sa); 1635170613Sbms if (idx == -1 || imo->imo_mfilters == NULL) { 1636178285Srwatson INP_WUNLOCK(inp); 1637170613Sbms return (EADDRNOTAVAIL); 1638170613Sbms } 1639170613Sbms imf = &imo->imo_mfilters[idx]; 1640170613Sbms 1641170613Sbms /* 1642189592Sbms * Ignore memberships which are in limbo. 1643189592Sbms */ 1644189592Sbms if (imf->imf_st[1] == MCAST_UNDEFINED) { 1645189592Sbms INP_WUNLOCK(inp); 1646189592Sbms return (EAGAIN); 1647189592Sbms } 1648189592Sbms msfr.msfr_fmode = imf->imf_st[1]; 1649189592Sbms 1650189592Sbms /* 1651170613Sbms * If the user specified a buffer, copy out the source filter 1652170613Sbms * entries to userland gracefully. 1653189592Sbms * We only copy out the number of entries which userland 1654189592Sbms * has asked for, but we always tell userland how big the 1655189592Sbms * buffer really needs to be. 1656170613Sbms */ 1657254629Sdelphij if (msfr.msfr_nsrcs > in_mcast_maxsocksrc) 1658254629Sdelphij msfr.msfr_nsrcs = in_mcast_maxsocksrc; 1659170613Sbms tss = NULL; 1660170613Sbms if (msfr.msfr_srcs != NULL && msfr.msfr_nsrcs > 0) { 1661184214Sdes tss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 1662189592Sbms M_TEMP, M_NOWAIT | M_ZERO); 1663170613Sbms if (tss == NULL) { 1664189592Sbms INP_WUNLOCK(inp); 1665189592Sbms return (ENOBUFS); 1666170613Sbms } 1667170613Sbms } 1668170613Sbms 1669189592Sbms /* 1670189592Sbms * Count number of sources in-mode at t0. 1671189592Sbms * If buffer space exists and remains, copy out source entries. 1672189592Sbms */ 1673189592Sbms nsrcs = msfr.msfr_nsrcs; 1674189592Sbms ncsrcs = 0; 1675189592Sbms ptss = tss; 1676189592Sbms RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { 1677189592Sbms lims = (struct in_msource *)ims; 1678189592Sbms if (lims->imsl_st[0] == MCAST_UNDEFINED || 1679189592Sbms lims->imsl_st[0] != imf->imf_st[0]) 1680189592Sbms continue; 1681189592Sbms ++ncsrcs; 1682191659Sbms if (tss != NULL && nsrcs > 0) { 1683191659Sbms psin = (struct sockaddr_in *)ptss; 1684189592Sbms psin->sin_family = AF_INET; 1685189592Sbms psin->sin_len = sizeof(struct sockaddr_in); 1686189592Sbms psin->sin_addr.s_addr = htonl(lims->ims_haddr); 1687191659Sbms psin->sin_port = 0; 1688191659Sbms ++ptss; 1689191659Sbms --nsrcs; 1690189592Sbms } 1691189592Sbms } 1692189592Sbms 1693178285Srwatson INP_WUNLOCK(inp); 1694170613Sbms 1695170613Sbms if (tss != NULL) { 1696170613Sbms error = copyout(tss, msfr.msfr_srcs, 1697170613Sbms sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 1698184205Sdes free(tss, M_TEMP); 1699189592Sbms if (error) 1700189592Sbms return (error); 1701170613Sbms } 1702170613Sbms 1703189592Sbms msfr.msfr_nsrcs = ncsrcs; 1704170613Sbms error = sooptcopyout(sopt, &msfr, sizeof(struct __msfilterreq)); 1705170613Sbms 1706170613Sbms return (error); 1707170613Sbms} 1708170613Sbms 1709170613Sbms/* 1710170613Sbms * Return the IP multicast options in response to user getsockopt(). 1711170613Sbms */ 1712170613Sbmsint 1713170613Sbmsinp_getmoptions(struct inpcb *inp, struct sockopt *sopt) 1714170613Sbms{ 1715170613Sbms struct ip_mreqn mreqn; 1716170613Sbms struct ip_moptions *imo; 1717170613Sbms struct ifnet *ifp; 1718170613Sbms struct in_ifaddr *ia; 1719170613Sbms int error, optval; 1720170613Sbms u_char coptval; 1721170613Sbms 1722178285Srwatson INP_WLOCK(inp); 1723170613Sbms imo = inp->inp_moptions; 1724171746Scsjp /* 1725171746Scsjp * If socket is neither of type SOCK_RAW or SOCK_DGRAM, 1726171746Scsjp * or is a divert socket, reject it. 1727171746Scsjp */ 1728171746Scsjp if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || 1729171746Scsjp (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 1730171746Scsjp inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) { 1731178285Srwatson INP_WUNLOCK(inp); 1732171746Scsjp return (EOPNOTSUPP); 1733171746Scsjp } 1734170613Sbms 1735170613Sbms error = 0; 1736170613Sbms switch (sopt->sopt_name) { 1737170613Sbms case IP_MULTICAST_VIF: 1738170613Sbms if (imo != NULL) 1739170613Sbms optval = imo->imo_multicast_vif; 1740170613Sbms else 1741170613Sbms optval = -1; 1742178285Srwatson INP_WUNLOCK(inp); 1743170613Sbms error = sooptcopyout(sopt, &optval, sizeof(int)); 1744170613Sbms break; 1745170613Sbms 1746170613Sbms case IP_MULTICAST_IF: 1747170613Sbms memset(&mreqn, 0, sizeof(struct ip_mreqn)); 1748170613Sbms if (imo != NULL) { 1749170613Sbms ifp = imo->imo_multicast_ifp; 1750189592Sbms if (!in_nullhost(imo->imo_multicast_addr)) { 1751170613Sbms mreqn.imr_address = imo->imo_multicast_addr; 1752170613Sbms } else if (ifp != NULL) { 1753170613Sbms mreqn.imr_ifindex = ifp->if_index; 1754170613Sbms IFP_TO_IA(ifp, ia); 1755170613Sbms if (ia != NULL) { 1756170613Sbms mreqn.imr_address = 1757170613Sbms IA_SIN(ia)->sin_addr; 1758194760Srwatson ifa_free(&ia->ia_ifa); 1759170613Sbms } 1760170613Sbms } 1761170613Sbms } 1762178285Srwatson INP_WUNLOCK(inp); 1763170613Sbms if (sopt->sopt_valsize == sizeof(struct ip_mreqn)) { 1764170613Sbms error = sooptcopyout(sopt, &mreqn, 1765170613Sbms sizeof(struct ip_mreqn)); 1766170613Sbms } else { 1767170613Sbms error = sooptcopyout(sopt, &mreqn.imr_address, 1768170613Sbms sizeof(struct in_addr)); 1769170613Sbms } 1770170613Sbms break; 1771170613Sbms 1772170613Sbms case IP_MULTICAST_TTL: 1773170613Sbms if (imo == 0) 1774170613Sbms optval = coptval = IP_DEFAULT_MULTICAST_TTL; 1775170613Sbms else 1776170613Sbms optval = coptval = imo->imo_multicast_ttl; 1777178285Srwatson INP_WUNLOCK(inp); 1778170613Sbms if (sopt->sopt_valsize == sizeof(u_char)) 1779170613Sbms error = sooptcopyout(sopt, &coptval, sizeof(u_char)); 1780170613Sbms else 1781170613Sbms error = sooptcopyout(sopt, &optval, sizeof(int)); 1782170613Sbms break; 1783170613Sbms 1784170613Sbms case IP_MULTICAST_LOOP: 1785170613Sbms if (imo == 0) 1786170613Sbms optval = coptval = IP_DEFAULT_MULTICAST_LOOP; 1787170613Sbms else 1788170613Sbms optval = coptval = imo->imo_multicast_loop; 1789178285Srwatson INP_WUNLOCK(inp); 1790170613Sbms if (sopt->sopt_valsize == sizeof(u_char)) 1791170613Sbms error = sooptcopyout(sopt, &coptval, sizeof(u_char)); 1792170613Sbms else 1793170613Sbms error = sooptcopyout(sopt, &optval, sizeof(int)); 1794170613Sbms break; 1795170613Sbms 1796170613Sbms case IP_MSFILTER: 1797170613Sbms if (imo == NULL) { 1798170613Sbms error = EADDRNOTAVAIL; 1799178285Srwatson INP_WUNLOCK(inp); 1800170613Sbms } else { 1801170613Sbms error = inp_get_source_filters(inp, sopt); 1802170613Sbms } 1803170613Sbms break; 1804170613Sbms 1805170613Sbms default: 1806178285Srwatson INP_WUNLOCK(inp); 1807170613Sbms error = ENOPROTOOPT; 1808170613Sbms break; 1809170613Sbms } 1810170613Sbms 1811170613Sbms INP_UNLOCK_ASSERT(inp); 1812170613Sbms 1813170613Sbms return (error); 1814170613Sbms} 1815170613Sbms 1816170613Sbms/* 1817189592Sbms * Look up the ifnet to use for a multicast group membership, 1818189592Sbms * given the IPv4 address of an interface, and the IPv4 group address. 1819189592Sbms * 1820189592Sbms * This routine exists to support legacy multicast applications 1821189592Sbms * which do not understand that multicast memberships are scoped to 1822189592Sbms * specific physical links in the networking stack, or which need 1823189592Sbms * to join link-scope groups before IPv4 addresses are configured. 1824189592Sbms * 1825189592Sbms * If inp is non-NULL, use this socket's current FIB number for any 1826189592Sbms * required FIB lookup. 1827189592Sbms * If ina is INADDR_ANY, look up the group address in the unicast FIB, 1828189592Sbms * and use its ifp; usually, this points to the default next-hop. 1829189592Sbms * 1830189592Sbms * If the FIB lookup fails, attempt to use the first non-loopback 1831189592Sbms * interface with multicast capability in the system as a 1832189592Sbms * last resort. The legacy IPv4 ASM API requires that we do 1833189592Sbms * this in order to allow groups to be joined when the routing 1834189592Sbms * table has not yet been populated during boot. 1835189592Sbms * 1836189592Sbms * Returns NULL if no ifp could be found. 1837189592Sbms * 1838189592Sbms * SMPng: TODO: Acquire the appropriate locks for INADDR_TO_IFP. 1839189592Sbms * FUTURE: Implement IPv4 source-address selection. 1840189592Sbms */ 1841189592Sbmsstatic struct ifnet * 1842189592Sbmsinp_lookup_mcast_ifp(const struct inpcb *inp, 1843189592Sbms const struct sockaddr_in *gsin, const struct in_addr ina) 1844189592Sbms{ 1845189592Sbms struct ifnet *ifp; 1846189592Sbms 1847189592Sbms KASSERT(gsin->sin_family == AF_INET, ("%s: not AF_INET", __func__)); 1848189592Sbms KASSERT(IN_MULTICAST(ntohl(gsin->sin_addr.s_addr)), 1849189592Sbms ("%s: not multicast", __func__)); 1850189592Sbms 1851189592Sbms ifp = NULL; 1852189592Sbms if (!in_nullhost(ina)) { 1853189592Sbms INADDR_TO_IFP(ina, ifp); 1854189592Sbms } else { 1855189592Sbms struct route ro; 1856189592Sbms 1857189592Sbms ro.ro_rt = NULL; 1858189592Sbms memcpy(&ro.ro_dst, gsin, sizeof(struct sockaddr_in)); 1859189592Sbms in_rtalloc_ign(&ro, 0, inp ? inp->inp_inc.inc_fibnum : 0); 1860189592Sbms if (ro.ro_rt != NULL) { 1861189592Sbms ifp = ro.ro_rt->rt_ifp; 1862189592Sbms KASSERT(ifp != NULL, ("%s: null ifp", __func__)); 1863189592Sbms RTFREE(ro.ro_rt); 1864189592Sbms } else { 1865189592Sbms struct in_ifaddr *ia; 1866189592Sbms struct ifnet *mifp; 1867189592Sbms 1868189592Sbms mifp = NULL; 1869194951Srwatson IN_IFADDR_RLOCK(); 1870189592Sbms TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { 1871189592Sbms mifp = ia->ia_ifp; 1872189592Sbms if (!(mifp->if_flags & IFF_LOOPBACK) && 1873189592Sbms (mifp->if_flags & IFF_MULTICAST)) { 1874189592Sbms ifp = mifp; 1875189592Sbms break; 1876189592Sbms } 1877189592Sbms } 1878194951Srwatson IN_IFADDR_RUNLOCK(); 1879189592Sbms } 1880189592Sbms } 1881189592Sbms 1882189592Sbms return (ifp); 1883189592Sbms} 1884189592Sbms 1885189592Sbms/* 1886170613Sbms * Join an IPv4 multicast group, possibly with a source. 1887170613Sbms */ 1888170613Sbmsstatic int 1889170613Sbmsinp_join_group(struct inpcb *inp, struct sockopt *sopt) 1890170613Sbms{ 1891170613Sbms struct group_source_req gsr; 1892170613Sbms sockunion_t *gsa, *ssa; 1893170613Sbms struct ifnet *ifp; 1894170613Sbms struct in_mfilter *imf; 1895170613Sbms struct ip_moptions *imo; 1896170613Sbms struct in_multi *inm; 1897189592Sbms struct in_msource *lims; 1898170613Sbms size_t idx; 1899189592Sbms int error, is_new; 1900170613Sbms 1901170613Sbms ifp = NULL; 1902189592Sbms imf = NULL; 1903197136Sbms lims = NULL; 1904170613Sbms error = 0; 1905189592Sbms is_new = 0; 1906170613Sbms 1907170613Sbms memset(&gsr, 0, sizeof(struct group_source_req)); 1908170613Sbms gsa = (sockunion_t *)&gsr.gsr_group; 1909170613Sbms gsa->ss.ss_family = AF_UNSPEC; 1910170613Sbms ssa = (sockunion_t *)&gsr.gsr_source; 1911170613Sbms ssa->ss.ss_family = AF_UNSPEC; 1912170613Sbms 1913170613Sbms switch (sopt->sopt_name) { 1914170613Sbms case IP_ADD_MEMBERSHIP: 1915170613Sbms case IP_ADD_SOURCE_MEMBERSHIP: { 1916170613Sbms struct ip_mreq_source mreqs; 1917170613Sbms 1918170613Sbms if (sopt->sopt_name == IP_ADD_MEMBERSHIP) { 1919170613Sbms error = sooptcopyin(sopt, &mreqs, 1920170613Sbms sizeof(struct ip_mreq), 1921170613Sbms sizeof(struct ip_mreq)); 1922170613Sbms /* 1923170613Sbms * Do argument switcharoo from ip_mreq into 1924170613Sbms * ip_mreq_source to avoid using two instances. 1925170613Sbms */ 1926170613Sbms mreqs.imr_interface = mreqs.imr_sourceaddr; 1927170613Sbms mreqs.imr_sourceaddr.s_addr = INADDR_ANY; 1928170613Sbms } else if (sopt->sopt_name == IP_ADD_SOURCE_MEMBERSHIP) { 1929170613Sbms error = sooptcopyin(sopt, &mreqs, 1930170613Sbms sizeof(struct ip_mreq_source), 1931170613Sbms sizeof(struct ip_mreq_source)); 1932170613Sbms } 1933170613Sbms if (error) 1934170613Sbms return (error); 1935170613Sbms 1936170613Sbms gsa->sin.sin_family = AF_INET; 1937170613Sbms gsa->sin.sin_len = sizeof(struct sockaddr_in); 1938170613Sbms gsa->sin.sin_addr = mreqs.imr_multiaddr; 1939170613Sbms 1940170613Sbms if (sopt->sopt_name == IP_ADD_SOURCE_MEMBERSHIP) { 1941170613Sbms ssa->sin.sin_family = AF_INET; 1942170613Sbms ssa->sin.sin_len = sizeof(struct sockaddr_in); 1943170613Sbms ssa->sin.sin_addr = mreqs.imr_sourceaddr; 1944170613Sbms } 1945170613Sbms 1946196932Ssyrinx if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr))) 1947196932Ssyrinx return (EINVAL); 1948196932Ssyrinx 1949189592Sbms ifp = inp_lookup_mcast_ifp(inp, &gsa->sin, 1950189592Sbms mreqs.imr_interface); 1951189592Sbms CTR3(KTR_IGMPV3, "%s: imr_interface = %s, ifp = %p", 1952189592Sbms __func__, inet_ntoa(mreqs.imr_interface), ifp); 1953170613Sbms break; 1954170613Sbms } 1955170613Sbms 1956170613Sbms case MCAST_JOIN_GROUP: 1957170613Sbms case MCAST_JOIN_SOURCE_GROUP: 1958170613Sbms if (sopt->sopt_name == MCAST_JOIN_GROUP) { 1959170613Sbms error = sooptcopyin(sopt, &gsr, 1960170613Sbms sizeof(struct group_req), 1961170613Sbms sizeof(struct group_req)); 1962170613Sbms } else if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1963170613Sbms error = sooptcopyin(sopt, &gsr, 1964170613Sbms sizeof(struct group_source_req), 1965170613Sbms sizeof(struct group_source_req)); 1966170613Sbms } 1967170613Sbms if (error) 1968170613Sbms return (error); 1969170613Sbms 1970170613Sbms if (gsa->sin.sin_family != AF_INET || 1971170613Sbms gsa->sin.sin_len != sizeof(struct sockaddr_in)) 1972170613Sbms return (EINVAL); 1973170613Sbms 1974170613Sbms /* 1975170613Sbms * Overwrite the port field if present, as the sockaddr 1976170613Sbms * being copied in may be matched with a binary comparison. 1977170613Sbms */ 1978170613Sbms gsa->sin.sin_port = 0; 1979170613Sbms if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1980170613Sbms if (ssa->sin.sin_family != AF_INET || 1981170613Sbms ssa->sin.sin_len != sizeof(struct sockaddr_in)) 1982170613Sbms return (EINVAL); 1983170613Sbms ssa->sin.sin_port = 0; 1984170613Sbms } 1985170613Sbms 1986196932Ssyrinx if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr))) 1987196932Ssyrinx return (EINVAL); 1988196932Ssyrinx 1989181803Sbz if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 1990170613Sbms return (EADDRNOTAVAIL); 1991170613Sbms ifp = ifnet_byindex(gsr.gsr_interface); 1992170613Sbms break; 1993170613Sbms 1994170613Sbms default: 1995189592Sbms CTR2(KTR_IGMPV3, "%s: unknown sopt_name %d", 1996189592Sbms __func__, sopt->sopt_name); 1997170613Sbms return (EOPNOTSUPP); 1998170613Sbms break; 1999170613Sbms } 2000170613Sbms 2001170613Sbms if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) 2002170613Sbms return (EADDRNOTAVAIL); 2003170613Sbms 2004170613Sbms imo = inp_findmoptions(inp); 2005170613Sbms idx = imo_match_group(imo, ifp, &gsa->sa); 2006189592Sbms if (idx == -1) { 2007189592Sbms is_new = 1; 2008189592Sbms } else { 2009189592Sbms inm = imo->imo_membership[idx]; 2010189592Sbms imf = &imo->imo_mfilters[idx]; 2011197132Sbms if (ssa->ss.ss_family != AF_UNSPEC) { 2012197132Sbms /* 2013199525Sbms * MCAST_JOIN_SOURCE_GROUP on an exclusive membership 2014197132Sbms * is an error. On an existing inclusive membership, 2015197132Sbms * it just adds the source to the filter list. 2016197132Sbms */ 2017197132Sbms if (imf->imf_st[1] != MCAST_INCLUDE) { 2018197132Sbms error = EINVAL; 2019197132Sbms goto out_inp_locked; 2020197132Sbms } 2021197136Sbms /* 2022197136Sbms * Throw out duplicates. 2023197136Sbms * 2024197136Sbms * XXX FIXME: This makes a naive assumption that 2025197136Sbms * even if entries exist for *ssa in this imf, 2026197136Sbms * they will be rejected as dupes, even if they 2027197136Sbms * are not valid in the current mode (in-mode). 2028197136Sbms * 2029197136Sbms * in_msource is transactioned just as for anything 2030197136Sbms * else in SSM -- but note naive use of inm_graft() 2031197136Sbms * below for allocating new filter entries. 2032197136Sbms * 2033197136Sbms * This is only an issue if someone mixes the 2034197136Sbms * full-state SSM API with the delta-based API, 2035197136Sbms * which is discouraged in the relevant RFCs. 2036197136Sbms */ 2037197132Sbms lims = imo_match_source(imo, idx, &ssa->sa); 2038197136Sbms if (lims != NULL /*&& 2039197136Sbms lims->imsl_st[1] == MCAST_INCLUDE*/) { 2040197132Sbms error = EADDRNOTAVAIL; 2041197132Sbms goto out_inp_locked; 2042197132Sbms } 2043197132Sbms } else { 2044197132Sbms /* 2045206452Sbms * MCAST_JOIN_GROUP on an existing exclusive 2046206452Sbms * membership is an error; return EADDRINUSE 2047206452Sbms * to preserve 4.4BSD API idempotence, and 2048206452Sbms * avoid tedious detour to code below. 2049206452Sbms * NOTE: This is bending RFC 3678 a bit. 2050206452Sbms * 2051197135Sbms * On an existing inclusive membership, this is also 2052197135Sbms * an error; if you want to change filter mode, 2053197135Sbms * you must use the userland API setsourcefilter(). 2054197135Sbms * XXX We don't reject this for imf in UNDEFINED 2055197135Sbms * state at t1, because allocation of a filter 2056197135Sbms * is atomic with allocation of a membership. 2057197132Sbms */ 2058197135Sbms error = EINVAL; 2059206452Sbms if (imf->imf_st[1] == MCAST_EXCLUDE) 2060206452Sbms error = EADDRINUSE; 2061197135Sbms goto out_inp_locked; 2062189592Sbms } 2063170613Sbms } 2064170613Sbms 2065170613Sbms /* 2066189592Sbms * Begin state merge transaction at socket layer. 2067170613Sbms */ 2068189592Sbms INP_WLOCK_ASSERT(inp); 2069189592Sbms 2070189592Sbms if (is_new) { 2071189592Sbms if (imo->imo_num_memberships == imo->imo_max_memberships) { 2072189592Sbms error = imo_grow(imo); 2073189592Sbms if (error) 2074189592Sbms goto out_inp_locked; 2075189592Sbms } 2076189592Sbms /* 2077189592Sbms * Allocate the new slot upfront so we can deal with 2078189592Sbms * grafting the new source filter in same code path 2079189592Sbms * as for join-source on existing membership. 2080189592Sbms */ 2081189592Sbms idx = imo->imo_num_memberships; 2082189592Sbms imo->imo_membership[idx] = NULL; 2083189592Sbms imo->imo_num_memberships++; 2084189592Sbms KASSERT(imo->imo_mfilters != NULL, 2085189592Sbms ("%s: imf_mfilters vector was not allocated", __func__)); 2086189592Sbms imf = &imo->imo_mfilters[idx]; 2087189592Sbms KASSERT(RB_EMPTY(&imf->imf_sources), 2088189592Sbms ("%s: imf_sources not empty", __func__)); 2089170613Sbms } 2090170613Sbms 2091170613Sbms /* 2092189592Sbms * Graft new source into filter list for this inpcb's 2093189592Sbms * membership of the group. The in_multi may not have 2094197132Sbms * been allocated yet if this is a new membership, however, 2095197132Sbms * the in_mfilter slot will be allocated and must be initialized. 2096197135Sbms * 2097197135Sbms * Note: Grafting of exclusive mode filters doesn't happen 2098197135Sbms * in this path. 2099197136Sbms * XXX: Should check for non-NULL lims (node exists but may 2100197136Sbms * not be in-mode) for interop with full-state API. 2101170613Sbms */ 2102189592Sbms if (ssa->ss.ss_family != AF_UNSPEC) { 2103189592Sbms /* Membership starts in IN mode */ 2104189592Sbms if (is_new) { 2105189592Sbms CTR1(KTR_IGMPV3, "%s: new join w/source", __func__); 2106189592Sbms imf_init(imf, MCAST_UNDEFINED, MCAST_INCLUDE); 2107189592Sbms } else { 2108189592Sbms CTR2(KTR_IGMPV3, "%s: %s source", __func__, "allow"); 2109189592Sbms } 2110189592Sbms lims = imf_graft(imf, MCAST_INCLUDE, &ssa->sin); 2111189592Sbms if (lims == NULL) { 2112189592Sbms CTR1(KTR_IGMPV3, "%s: merge imf state failed", 2113189592Sbms __func__); 2114189592Sbms error = ENOMEM; 2115189592Sbms goto out_imo_free; 2116189592Sbms } 2117197132Sbms } else { 2118197132Sbms /* No address specified; Membership starts in EX mode */ 2119197132Sbms if (is_new) { 2120197132Sbms CTR1(KTR_IGMPV3, "%s: new join w/o source", __func__); 2121197132Sbms imf_init(imf, MCAST_UNDEFINED, MCAST_EXCLUDE); 2122197132Sbms } 2123170613Sbms } 2124170613Sbms 2125170613Sbms /* 2126189592Sbms * Begin state merge transaction at IGMP layer. 2127170613Sbms */ 2128189592Sbms IN_MULTI_LOCK(); 2129170613Sbms 2130189592Sbms if (is_new) { 2131189592Sbms error = in_joingroup_locked(ifp, &gsa->sin.sin_addr, imf, 2132189592Sbms &inm); 2133261425Sgnn if (error) { 2134261425Sgnn CTR1(KTR_IGMPV3, "%s: in_joingroup_locked failed", 2135261425Sgnn __func__); 2136261425Sgnn IN_MULTI_UNLOCK(); 2137189592Sbms goto out_imo_free; 2138261425Sgnn } 2139189592Sbms imo->imo_membership[idx] = inm; 2140189592Sbms } else { 2141189592Sbms CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); 2142189592Sbms error = inm_merge(inm, imf); 2143170613Sbms if (error) { 2144189592Sbms CTR1(KTR_IGMPV3, "%s: failed to merge inm state", 2145189592Sbms __func__); 2146261425Sgnn goto out_in_multi_locked; 2147170613Sbms } 2148189592Sbms CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); 2149189592Sbms error = igmp_change_state(inm); 2150189592Sbms if (error) { 2151189592Sbms CTR1(KTR_IGMPV3, "%s: failed igmp downcall", 2152189592Sbms __func__); 2153261425Sgnn goto out_in_multi_locked; 2154189592Sbms } 2155170613Sbms } 2156170613Sbms 2157261425Sgnnout_in_multi_locked: 2158261425Sgnn 2159189592Sbms IN_MULTI_UNLOCK(); 2160189592Sbms 2161189592Sbms INP_WLOCK_ASSERT(inp); 2162189592Sbms if (error) { 2163189592Sbms imf_rollback(imf); 2164189592Sbms if (is_new) 2165189592Sbms imf_purge(imf); 2166189592Sbms else 2167189592Sbms imf_reap(imf); 2168189592Sbms } else { 2169189592Sbms imf_commit(imf); 2170189592Sbms } 2171189592Sbms 2172189592Sbmsout_imo_free: 2173189592Sbms if (error && is_new) { 2174189592Sbms imo->imo_membership[idx] = NULL; 2175189592Sbms --imo->imo_num_memberships; 2176189592Sbms } 2177189592Sbms 2178189592Sbmsout_inp_locked: 2179178285Srwatson INP_WUNLOCK(inp); 2180170613Sbms return (error); 2181170613Sbms} 2182170613Sbms 2183170613Sbms/* 2184170613Sbms * Leave an IPv4 multicast group on an inpcb, possibly with a source. 2185170613Sbms */ 2186170613Sbmsstatic int 2187170613Sbmsinp_leave_group(struct inpcb *inp, struct sockopt *sopt) 2188170613Sbms{ 2189170613Sbms struct group_source_req gsr; 2190170613Sbms struct ip_mreq_source mreqs; 2191170613Sbms sockunion_t *gsa, *ssa; 2192170613Sbms struct ifnet *ifp; 2193170613Sbms struct in_mfilter *imf; 2194170613Sbms struct ip_moptions *imo; 2195189592Sbms struct in_msource *ims; 2196170613Sbms struct in_multi *inm; 2197170613Sbms size_t idx; 2198189592Sbms int error, is_final; 2199170613Sbms 2200170613Sbms ifp = NULL; 2201170613Sbms error = 0; 2202189592Sbms is_final = 1; 2203170613Sbms 2204170613Sbms memset(&gsr, 0, sizeof(struct group_source_req)); 2205170613Sbms gsa = (sockunion_t *)&gsr.gsr_group; 2206170613Sbms gsa->ss.ss_family = AF_UNSPEC; 2207170613Sbms ssa = (sockunion_t *)&gsr.gsr_source; 2208170613Sbms ssa->ss.ss_family = AF_UNSPEC; 2209170613Sbms 2210170613Sbms switch (sopt->sopt_name) { 2211170613Sbms case IP_DROP_MEMBERSHIP: 2212170613Sbms case IP_DROP_SOURCE_MEMBERSHIP: 2213170613Sbms if (sopt->sopt_name == IP_DROP_MEMBERSHIP) { 2214170613Sbms error = sooptcopyin(sopt, &mreqs, 2215170613Sbms sizeof(struct ip_mreq), 2216170613Sbms sizeof(struct ip_mreq)); 2217170613Sbms /* 2218170613Sbms * Swap interface and sourceaddr arguments, 2219170613Sbms * as ip_mreq and ip_mreq_source are laid 2220170613Sbms * out differently. 2221170613Sbms */ 2222170613Sbms mreqs.imr_interface = mreqs.imr_sourceaddr; 2223170613Sbms mreqs.imr_sourceaddr.s_addr = INADDR_ANY; 2224170613Sbms } else if (sopt->sopt_name == IP_DROP_SOURCE_MEMBERSHIP) { 2225170613Sbms error = sooptcopyin(sopt, &mreqs, 2226170613Sbms sizeof(struct ip_mreq_source), 2227170613Sbms sizeof(struct ip_mreq_source)); 2228170613Sbms } 2229170613Sbms if (error) 2230170613Sbms return (error); 2231170613Sbms 2232170613Sbms gsa->sin.sin_family = AF_INET; 2233170613Sbms gsa->sin.sin_len = sizeof(struct sockaddr_in); 2234170613Sbms gsa->sin.sin_addr = mreqs.imr_multiaddr; 2235170613Sbms 2236170613Sbms if (sopt->sopt_name == IP_DROP_SOURCE_MEMBERSHIP) { 2237170613Sbms ssa->sin.sin_family = AF_INET; 2238170613Sbms ssa->sin.sin_len = sizeof(struct sockaddr_in); 2239170613Sbms ssa->sin.sin_addr = mreqs.imr_sourceaddr; 2240170613Sbms } 2241170613Sbms 2242206452Sbms /* 2243206452Sbms * Attempt to look up hinted ifp from interface address. 2244206452Sbms * Fallthrough with null ifp iff lookup fails, to 2245206452Sbms * preserve 4.4BSD mcast API idempotence. 2246206452Sbms * XXX NOTE WELL: The RFC 3678 API is preferred because 2247206452Sbms * using an IPv4 address as a key is racy. 2248206452Sbms */ 2249206452Sbms if (!in_nullhost(mreqs.imr_interface)) 2250170613Sbms INADDR_TO_IFP(mreqs.imr_interface, ifp); 2251170613Sbms 2252189592Sbms CTR3(KTR_IGMPV3, "%s: imr_interface = %s, ifp = %p", 2253189592Sbms __func__, inet_ntoa(mreqs.imr_interface), ifp); 2254189592Sbms 2255170613Sbms break; 2256170613Sbms 2257170613Sbms case MCAST_LEAVE_GROUP: 2258170613Sbms case MCAST_LEAVE_SOURCE_GROUP: 2259170613Sbms if (sopt->sopt_name == MCAST_LEAVE_GROUP) { 2260170613Sbms error = sooptcopyin(sopt, &gsr, 2261170613Sbms sizeof(struct group_req), 2262170613Sbms sizeof(struct group_req)); 2263170613Sbms } else if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2264170613Sbms error = sooptcopyin(sopt, &gsr, 2265170613Sbms sizeof(struct group_source_req), 2266170613Sbms sizeof(struct group_source_req)); 2267170613Sbms } 2268170613Sbms if (error) 2269170613Sbms return (error); 2270170613Sbms 2271170613Sbms if (gsa->sin.sin_family != AF_INET || 2272170613Sbms gsa->sin.sin_len != sizeof(struct sockaddr_in)) 2273170613Sbms return (EINVAL); 2274170613Sbms 2275170613Sbms if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2276170613Sbms if (ssa->sin.sin_family != AF_INET || 2277170613Sbms ssa->sin.sin_len != sizeof(struct sockaddr_in)) 2278170613Sbms return (EINVAL); 2279170613Sbms } 2280170613Sbms 2281181803Sbz if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 2282170613Sbms return (EADDRNOTAVAIL); 2283170613Sbms 2284170613Sbms ifp = ifnet_byindex(gsr.gsr_interface); 2285206452Sbms 2286206452Sbms if (ifp == NULL) 2287206452Sbms return (EADDRNOTAVAIL); 2288170613Sbms break; 2289170613Sbms 2290170613Sbms default: 2291189592Sbms CTR2(KTR_IGMPV3, "%s: unknown sopt_name %d", 2292189592Sbms __func__, sopt->sopt_name); 2293170613Sbms return (EOPNOTSUPP); 2294170613Sbms break; 2295170613Sbms } 2296170613Sbms 2297170613Sbms if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr))) 2298170613Sbms return (EINVAL); 2299170613Sbms 2300170613Sbms /* 2301170613Sbms * Find the membership in the membership array. 2302170613Sbms */ 2303170613Sbms imo = inp_findmoptions(inp); 2304170613Sbms idx = imo_match_group(imo, ifp, &gsa->sa); 2305170613Sbms if (idx == -1) { 2306170613Sbms error = EADDRNOTAVAIL; 2307189592Sbms goto out_inp_locked; 2308170613Sbms } 2309189592Sbms inm = imo->imo_membership[idx]; 2310170613Sbms imf = &imo->imo_mfilters[idx]; 2311170613Sbms 2312189592Sbms if (ssa->ss.ss_family != AF_UNSPEC) 2313189592Sbms is_final = 0; 2314189592Sbms 2315170613Sbms /* 2316189592Sbms * Begin state merge transaction at socket layer. 2317189592Sbms */ 2318189592Sbms INP_WLOCK_ASSERT(inp); 2319189592Sbms 2320189592Sbms /* 2321170613Sbms * If we were instructed only to leave a given source, do so. 2322189592Sbms * MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships. 2323170613Sbms */ 2324189592Sbms if (is_final) { 2325189592Sbms imf_leave(imf); 2326189592Sbms } else { 2327189592Sbms if (imf->imf_st[0] == MCAST_EXCLUDE) { 2328189592Sbms error = EADDRNOTAVAIL; 2329189592Sbms goto out_inp_locked; 2330170613Sbms } 2331189592Sbms ims = imo_match_source(imo, idx, &ssa->sa); 2332189592Sbms if (ims == NULL) { 2333189592Sbms CTR3(KTR_IGMPV3, "%s: source %s %spresent", __func__, 2334189592Sbms inet_ntoa(ssa->sin.sin_addr), "not "); 2335189592Sbms error = EADDRNOTAVAIL; 2336189592Sbms goto out_inp_locked; 2337189592Sbms } 2338189592Sbms CTR2(KTR_IGMPV3, "%s: %s source", __func__, "block"); 2339189592Sbms error = imf_prune(imf, &ssa->sin); 2340189592Sbms if (error) { 2341189592Sbms CTR1(KTR_IGMPV3, "%s: merge imf state failed", 2342189592Sbms __func__); 2343189592Sbms goto out_inp_locked; 2344189592Sbms } 2345170613Sbms } 2346170613Sbms 2347170613Sbms /* 2348189592Sbms * Begin state merge transaction at IGMP layer. 2349170613Sbms */ 2350189592Sbms IN_MULTI_LOCK(); 2351170613Sbms 2352189592Sbms if (is_final) { 2353189592Sbms /* 2354189592Sbms * Give up the multicast address record to which 2355189592Sbms * the membership points. 2356189592Sbms */ 2357189592Sbms (void)in_leavegroup_locked(inm, imf); 2358189592Sbms } else { 2359189592Sbms CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); 2360189592Sbms error = inm_merge(inm, imf); 2361189592Sbms if (error) { 2362189592Sbms CTR1(KTR_IGMPV3, "%s: failed to merge inm state", 2363189592Sbms __func__); 2364261425Sgnn goto out_in_multi_locked; 2365170613Sbms } 2366189592Sbms 2367189592Sbms CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); 2368189592Sbms error = igmp_change_state(inm); 2369189592Sbms if (error) { 2370189592Sbms CTR1(KTR_IGMPV3, "%s: failed igmp downcall", 2371189592Sbms __func__); 2372189592Sbms } 2373170613Sbms } 2374170613Sbms 2375261425Sgnnout_in_multi_locked: 2376261425Sgnn 2377189592Sbms IN_MULTI_UNLOCK(); 2378170613Sbms 2379189592Sbms if (error) 2380189592Sbms imf_rollback(imf); 2381189592Sbms else 2382189592Sbms imf_commit(imf); 2383189592Sbms 2384189592Sbms imf_reap(imf); 2385189592Sbms 2386189592Sbms if (is_final) { 2387197130Sbms /* Remove the gap in the membership and filter array. */ 2388197130Sbms for (++idx; idx < imo->imo_num_memberships; ++idx) { 2389189592Sbms imo->imo_membership[idx-1] = imo->imo_membership[idx]; 2390197130Sbms imo->imo_mfilters[idx-1] = imo->imo_mfilters[idx]; 2391197130Sbms } 2392189592Sbms imo->imo_num_memberships--; 2393189592Sbms } 2394189592Sbms 2395189592Sbmsout_inp_locked: 2396178285Srwatson INP_WUNLOCK(inp); 2397170613Sbms return (error); 2398170613Sbms} 2399170613Sbms 2400170613Sbms/* 2401170613Sbms * Select the interface for transmitting IPv4 multicast datagrams. 2402170613Sbms * 2403170613Sbms * Either an instance of struct in_addr or an instance of struct ip_mreqn 2404170613Sbms * may be passed to this socket option. An address of INADDR_ANY or an 2405170613Sbms * interface index of 0 is used to remove a previous selection. 2406170613Sbms * When no interface is selected, one is chosen for every send. 2407170613Sbms */ 2408170613Sbmsstatic int 2409170613Sbmsinp_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) 2410170613Sbms{ 2411170613Sbms struct in_addr addr; 2412170613Sbms struct ip_mreqn mreqn; 2413170613Sbms struct ifnet *ifp; 2414170613Sbms struct ip_moptions *imo; 2415170613Sbms int error; 2416170613Sbms 2417170613Sbms if (sopt->sopt_valsize == sizeof(struct ip_mreqn)) { 2418170613Sbms /* 2419170613Sbms * An interface index was specified using the 2420170613Sbms * Linux-derived ip_mreqn structure. 2421170613Sbms */ 2422170613Sbms error = sooptcopyin(sopt, &mreqn, sizeof(struct ip_mreqn), 2423170613Sbms sizeof(struct ip_mreqn)); 2424170613Sbms if (error) 2425170613Sbms return (error); 2426170613Sbms 2427181803Sbz if (mreqn.imr_ifindex < 0 || V_if_index < mreqn.imr_ifindex) 2428170613Sbms return (EINVAL); 2429170613Sbms 2430170613Sbms if (mreqn.imr_ifindex == 0) { 2431170613Sbms ifp = NULL; 2432170613Sbms } else { 2433170613Sbms ifp = ifnet_byindex(mreqn.imr_ifindex); 2434170613Sbms if (ifp == NULL) 2435170613Sbms return (EADDRNOTAVAIL); 2436170613Sbms } 2437170613Sbms } else { 2438170613Sbms /* 2439170613Sbms * An interface was specified by IPv4 address. 2440170613Sbms * This is the traditional BSD usage. 2441170613Sbms */ 2442170613Sbms error = sooptcopyin(sopt, &addr, sizeof(struct in_addr), 2443170613Sbms sizeof(struct in_addr)); 2444170613Sbms if (error) 2445170613Sbms return (error); 2446189592Sbms if (in_nullhost(addr)) { 2447170613Sbms ifp = NULL; 2448170613Sbms } else { 2449170613Sbms INADDR_TO_IFP(addr, ifp); 2450170613Sbms if (ifp == NULL) 2451170613Sbms return (EADDRNOTAVAIL); 2452170613Sbms } 2453189592Sbms CTR3(KTR_IGMPV3, "%s: ifp = %p, addr = %s", __func__, ifp, 2454189592Sbms inet_ntoa(addr)); 2455170613Sbms } 2456170613Sbms 2457170613Sbms /* Reject interfaces which do not support multicast. */ 2458170613Sbms if (ifp != NULL && (ifp->if_flags & IFF_MULTICAST) == 0) 2459170613Sbms return (EOPNOTSUPP); 2460170613Sbms 2461170613Sbms imo = inp_findmoptions(inp); 2462170613Sbms imo->imo_multicast_ifp = ifp; 2463170613Sbms imo->imo_multicast_addr.s_addr = INADDR_ANY; 2464178285Srwatson INP_WUNLOCK(inp); 2465170613Sbms 2466170613Sbms return (0); 2467170613Sbms} 2468170613Sbms 2469170613Sbms/* 2470170613Sbms * Atomically set source filters on a socket for an IPv4 multicast group. 2471189592Sbms * 2472189592Sbms * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. 2473170613Sbms */ 2474170613Sbmsstatic int 2475170613Sbmsinp_set_source_filters(struct inpcb *inp, struct sockopt *sopt) 2476170613Sbms{ 2477170613Sbms struct __msfilterreq msfr; 2478170613Sbms sockunion_t *gsa; 2479170613Sbms struct ifnet *ifp; 2480170613Sbms struct in_mfilter *imf; 2481170613Sbms struct ip_moptions *imo; 2482189592Sbms struct in_multi *inm; 2483170613Sbms size_t idx; 2484170613Sbms int error; 2485170613Sbms 2486170613Sbms error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 2487170613Sbms sizeof(struct __msfilterreq)); 2488170613Sbms if (error) 2489170613Sbms return (error); 2490170613Sbms 2491197314Sbms if (msfr.msfr_nsrcs > in_mcast_maxsocksrc) 2492197314Sbms return (ENOBUFS); 2493197314Sbms 2494197314Sbms if ((msfr.msfr_fmode != MCAST_EXCLUDE && 2495170613Sbms msfr.msfr_fmode != MCAST_INCLUDE)) 2496170613Sbms return (EINVAL); 2497170613Sbms 2498170613Sbms if (msfr.msfr_group.ss_family != AF_INET || 2499170613Sbms msfr.msfr_group.ss_len != sizeof(struct sockaddr_in)) 2500170613Sbms return (EINVAL); 2501170613Sbms 2502170613Sbms gsa = (sockunion_t *)&msfr.msfr_group; 2503170613Sbms if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr))) 2504170613Sbms return (EINVAL); 2505170613Sbms 2506170613Sbms gsa->sin.sin_port = 0; /* ignore port */ 2507170613Sbms 2508181803Sbz if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) 2509170613Sbms return (EADDRNOTAVAIL); 2510170613Sbms 2511170613Sbms ifp = ifnet_byindex(msfr.msfr_ifindex); 2512170613Sbms if (ifp == NULL) 2513170613Sbms return (EADDRNOTAVAIL); 2514170613Sbms 2515170613Sbms /* 2516189592Sbms * Take the INP write lock. 2517170613Sbms * Check if this socket is a member of this group. 2518170613Sbms */ 2519170613Sbms imo = inp_findmoptions(inp); 2520170613Sbms idx = imo_match_group(imo, ifp, &gsa->sa); 2521170613Sbms if (idx == -1 || imo->imo_mfilters == NULL) { 2522170613Sbms error = EADDRNOTAVAIL; 2523189592Sbms goto out_inp_locked; 2524170613Sbms } 2525189592Sbms inm = imo->imo_membership[idx]; 2526170613Sbms imf = &imo->imo_mfilters[idx]; 2527170613Sbms 2528170613Sbms /* 2529189592Sbms * Begin state merge transaction at socket layer. 2530170613Sbms */ 2531189592Sbms INP_WLOCK_ASSERT(inp); 2532170613Sbms 2533189592Sbms imf->imf_st[1] = msfr.msfr_fmode; 2534189592Sbms 2535170613Sbms /* 2536170613Sbms * Apply any new source filters, if present. 2537189592Sbms * Make a copy of the user-space source vector so 2538189592Sbms * that we may copy them with a single copyin. This 2539189592Sbms * allows us to deal with page faults up-front. 2540170613Sbms */ 2541170613Sbms if (msfr.msfr_nsrcs > 0) { 2542189592Sbms struct in_msource *lims; 2543189592Sbms struct sockaddr_in *psin; 2544189592Sbms struct sockaddr_storage *kss, *pkss; 2545189592Sbms int i; 2546170613Sbms 2547178285Srwatson INP_WUNLOCK(inp); 2548189592Sbms 2549189592Sbms CTR2(KTR_IGMPV3, "%s: loading %lu source list entries", 2550189592Sbms __func__, (unsigned long)msfr.msfr_nsrcs); 2551184214Sdes kss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 2552170613Sbms M_TEMP, M_WAITOK); 2553170613Sbms error = copyin(msfr.msfr_srcs, kss, 2554170613Sbms sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 2555170613Sbms if (error) { 2556184205Sdes free(kss, M_TEMP); 2557170613Sbms return (error); 2558170613Sbms } 2559170613Sbms 2560189592Sbms INP_WLOCK(inp); 2561189592Sbms 2562170613Sbms /* 2563189592Sbms * Mark all source filters as UNDEFINED at t1. 2564189592Sbms * Restore new group filter mode, as imf_leave() 2565189592Sbms * will set it to INCLUDE. 2566170613Sbms */ 2567189592Sbms imf_leave(imf); 2568189592Sbms imf->imf_st[1] = msfr.msfr_fmode; 2569189592Sbms 2570189592Sbms /* 2571189592Sbms * Update socket layer filters at t1, lazy-allocating 2572189592Sbms * new entries. This saves a bunch of memory at the 2573189592Sbms * cost of one RB_FIND() per source entry; duplicate 2574189592Sbms * entries in the msfr_nsrcs vector are ignored. 2575189592Sbms * If we encounter an error, rollback transaction. 2576189592Sbms * 2577189592Sbms * XXX This too could be replaced with a set-symmetric 2578189592Sbms * difference like loop to avoid walking from root 2579189592Sbms * every time, as the key space is common. 2580189592Sbms */ 2581189592Sbms for (i = 0, pkss = kss; i < msfr.msfr_nsrcs; i++, pkss++) { 2582189592Sbms psin = (struct sockaddr_in *)pkss; 2583189592Sbms if (psin->sin_family != AF_INET) { 2584170613Sbms error = EAFNOSUPPORT; 2585170613Sbms break; 2586170613Sbms } 2587189592Sbms if (psin->sin_len != sizeof(struct sockaddr_in)) { 2588189592Sbms error = EINVAL; 2589189592Sbms break; 2590189592Sbms } 2591189592Sbms error = imf_get_source(imf, psin, &lims); 2592170613Sbms if (error) 2593170613Sbms break; 2594189592Sbms lims->imsl_st[1] = imf->imf_st[1]; 2595170613Sbms } 2596189592Sbms free(kss, M_TEMP); 2597189592Sbms } 2598170613Sbms 2599189592Sbms if (error) 2600189592Sbms goto out_imf_rollback; 2601170613Sbms 2602189592Sbms INP_WLOCK_ASSERT(inp); 2603189592Sbms IN_MULTI_LOCK(); 2604170613Sbms 2605170613Sbms /* 2606189592Sbms * Begin state merge transaction at IGMP layer. 2607170613Sbms */ 2608189592Sbms CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); 2609189592Sbms error = inm_merge(inm, imf); 2610189592Sbms if (error) { 2611189592Sbms CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__); 2612261425Sgnn goto out_in_multi_locked; 2613189592Sbms } 2614170613Sbms 2615189592Sbms CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); 2616189592Sbms error = igmp_change_state(inm); 2617189592Sbms if (error) 2618189592Sbms CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__); 2619189592Sbms 2620261425Sgnnout_in_multi_locked: 2621261425Sgnn 2622189592Sbms IN_MULTI_UNLOCK(); 2623189592Sbms 2624189592Sbmsout_imf_rollback: 2625189592Sbms if (error) 2626189592Sbms imf_rollback(imf); 2627189592Sbms else 2628189592Sbms imf_commit(imf); 2629189592Sbms 2630189592Sbms imf_reap(imf); 2631189592Sbms 2632189592Sbmsout_inp_locked: 2633178285Srwatson INP_WUNLOCK(inp); 2634170613Sbms return (error); 2635170613Sbms} 2636170613Sbms 2637170613Sbms/* 2638170613Sbms * Set the IP multicast options in response to user setsockopt(). 2639170613Sbms * 2640170613Sbms * Many of the socket options handled in this function duplicate the 2641170613Sbms * functionality of socket options in the regular unicast API. However, 2642170613Sbms * it is not possible to merge the duplicate code, because the idempotence 2643170613Sbms * of the IPv4 multicast part of the BSD Sockets API must be preserved; 2644170613Sbms * the effects of these options must be treated as separate and distinct. 2645189592Sbms * 2646189592Sbms * SMPng: XXX: Unlocked read of inp_socket believed OK. 2647189592Sbms * FUTURE: The IP_MULTICAST_VIF option may be eliminated if MROUTING 2648189592Sbms * is refactored to no longer use vifs. 2649170613Sbms */ 2650170613Sbmsint 2651170613Sbmsinp_setmoptions(struct inpcb *inp, struct sockopt *sopt) 2652170613Sbms{ 2653170613Sbms struct ip_moptions *imo; 2654170613Sbms int error; 2655170613Sbms 2656170613Sbms error = 0; 2657170613Sbms 2658171746Scsjp /* 2659171746Scsjp * If socket is neither of type SOCK_RAW or SOCK_DGRAM, 2660171746Scsjp * or is a divert socket, reject it. 2661171746Scsjp */ 2662171746Scsjp if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || 2663171746Scsjp (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 2664189592Sbms inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) 2665171746Scsjp return (EOPNOTSUPP); 2666171746Scsjp 2667170613Sbms switch (sopt->sopt_name) { 2668170613Sbms case IP_MULTICAST_VIF: { 2669170613Sbms int vifi; 2670170613Sbms /* 2671170613Sbms * Select a multicast VIF for transmission. 2672170613Sbms * Only useful if multicast forwarding is active. 2673170613Sbms */ 2674170613Sbms if (legal_vif_num == NULL) { 2675170613Sbms error = EOPNOTSUPP; 2676170613Sbms break; 2677170613Sbms } 2678170613Sbms error = sooptcopyin(sopt, &vifi, sizeof(int), sizeof(int)); 2679170613Sbms if (error) 2680170613Sbms break; 2681170613Sbms if (!legal_vif_num(vifi) && (vifi != -1)) { 2682170613Sbms error = EINVAL; 2683170613Sbms break; 2684170613Sbms } 2685170613Sbms imo = inp_findmoptions(inp); 2686170613Sbms imo->imo_multicast_vif = vifi; 2687178285Srwatson INP_WUNLOCK(inp); 2688170613Sbms break; 2689170613Sbms } 2690170613Sbms 2691170613Sbms case IP_MULTICAST_IF: 2692170613Sbms error = inp_set_multicast_if(inp, sopt); 2693170613Sbms break; 2694170613Sbms 2695170613Sbms case IP_MULTICAST_TTL: { 2696170613Sbms u_char ttl; 2697170613Sbms 2698170613Sbms /* 2699170613Sbms * Set the IP time-to-live for outgoing multicast packets. 2700170613Sbms * The original multicast API required a char argument, 2701170613Sbms * which is inconsistent with the rest of the socket API. 2702170613Sbms * We allow either a char or an int. 2703170613Sbms */ 2704170613Sbms if (sopt->sopt_valsize == sizeof(u_char)) { 2705170613Sbms error = sooptcopyin(sopt, &ttl, sizeof(u_char), 2706170613Sbms sizeof(u_char)); 2707170613Sbms if (error) 2708170613Sbms break; 2709170613Sbms } else { 2710170613Sbms u_int ittl; 2711170613Sbms 2712170613Sbms error = sooptcopyin(sopt, &ittl, sizeof(u_int), 2713170613Sbms sizeof(u_int)); 2714170613Sbms if (error) 2715170613Sbms break; 2716170613Sbms if (ittl > 255) { 2717170613Sbms error = EINVAL; 2718170613Sbms break; 2719170613Sbms } 2720170613Sbms ttl = (u_char)ittl; 2721170613Sbms } 2722170613Sbms imo = inp_findmoptions(inp); 2723170613Sbms imo->imo_multicast_ttl = ttl; 2724178285Srwatson INP_WUNLOCK(inp); 2725170613Sbms break; 2726170613Sbms } 2727170613Sbms 2728170613Sbms case IP_MULTICAST_LOOP: { 2729170613Sbms u_char loop; 2730170613Sbms 2731170613Sbms /* 2732170613Sbms * Set the loopback flag for outgoing multicast packets. 2733170613Sbms * Must be zero or one. The original multicast API required a 2734170613Sbms * char argument, which is inconsistent with the rest 2735170613Sbms * of the socket API. We allow either a char or an int. 2736170613Sbms */ 2737170613Sbms if (sopt->sopt_valsize == sizeof(u_char)) { 2738170613Sbms error = sooptcopyin(sopt, &loop, sizeof(u_char), 2739170613Sbms sizeof(u_char)); 2740170613Sbms if (error) 2741170613Sbms break; 2742170613Sbms } else { 2743170613Sbms u_int iloop; 2744170613Sbms 2745170613Sbms error = sooptcopyin(sopt, &iloop, sizeof(u_int), 2746170613Sbms sizeof(u_int)); 2747170613Sbms if (error) 2748170613Sbms break; 2749170613Sbms loop = (u_char)iloop; 2750170613Sbms } 2751170613Sbms imo = inp_findmoptions(inp); 2752170613Sbms imo->imo_multicast_loop = !!loop; 2753178285Srwatson INP_WUNLOCK(inp); 2754170613Sbms break; 2755170613Sbms } 2756170613Sbms 2757170613Sbms case IP_ADD_MEMBERSHIP: 2758170613Sbms case IP_ADD_SOURCE_MEMBERSHIP: 2759170613Sbms case MCAST_JOIN_GROUP: 2760170613Sbms case MCAST_JOIN_SOURCE_GROUP: 2761170613Sbms error = inp_join_group(inp, sopt); 2762170613Sbms break; 2763170613Sbms 2764170613Sbms case IP_DROP_MEMBERSHIP: 2765170613Sbms case IP_DROP_SOURCE_MEMBERSHIP: 2766170613Sbms case MCAST_LEAVE_GROUP: 2767170613Sbms case MCAST_LEAVE_SOURCE_GROUP: 2768170613Sbms error = inp_leave_group(inp, sopt); 2769170613Sbms break; 2770170613Sbms 2771170613Sbms case IP_BLOCK_SOURCE: 2772170613Sbms case IP_UNBLOCK_SOURCE: 2773170613Sbms case MCAST_BLOCK_SOURCE: 2774170613Sbms case MCAST_UNBLOCK_SOURCE: 2775189592Sbms error = inp_block_unblock_source(inp, sopt); 2776170613Sbms break; 2777170613Sbms 2778170613Sbms case IP_MSFILTER: 2779170613Sbms error = inp_set_source_filters(inp, sopt); 2780170613Sbms break; 2781170613Sbms 2782170613Sbms default: 2783170613Sbms error = EOPNOTSUPP; 2784170613Sbms break; 2785170613Sbms } 2786170613Sbms 2787170613Sbms INP_UNLOCK_ASSERT(inp); 2788170613Sbms 2789170613Sbms return (error); 2790170613Sbms} 2791189592Sbms 2792189592Sbms/* 2793189592Sbms * Expose IGMP's multicast filter mode and source list(s) to userland, 2794189592Sbms * keyed by (ifindex, group). 2795189592Sbms * The filter mode is written out as a uint32_t, followed by 2796189592Sbms * 0..n of struct in_addr. 2797189592Sbms * For use by ifmcstat(8). 2798189592Sbms * SMPng: NOTE: unlocked read of ifindex space. 2799189592Sbms */ 2800189592Sbmsstatic int 2801189592Sbmssysctl_ip_mcast_filters(SYSCTL_HANDLER_ARGS) 2802189592Sbms{ 2803189592Sbms struct in_addr src, group; 2804189592Sbms struct ifnet *ifp; 2805189592Sbms struct ifmultiaddr *ifma; 2806189592Sbms struct in_multi *inm; 2807189592Sbms struct ip_msource *ims; 2808189592Sbms int *name; 2809189592Sbms int retval; 2810189592Sbms u_int namelen; 2811189592Sbms uint32_t fmode, ifindex; 2812189592Sbms 2813189592Sbms name = (int *)arg1; 2814189592Sbms namelen = arg2; 2815189592Sbms 2816189592Sbms if (req->newptr != NULL) 2817189592Sbms return (EPERM); 2818189592Sbms 2819189592Sbms if (namelen != 2) 2820189592Sbms return (EINVAL); 2821189592Sbms 2822189592Sbms ifindex = name[0]; 2823189592Sbms if (ifindex <= 0 || ifindex > V_if_index) { 2824189592Sbms CTR2(KTR_IGMPV3, "%s: ifindex %u out of range", 2825189592Sbms __func__, ifindex); 2826189592Sbms return (ENOENT); 2827189592Sbms } 2828189592Sbms 2829189592Sbms group.s_addr = name[1]; 2830189592Sbms if (!IN_MULTICAST(ntohl(group.s_addr))) { 2831189592Sbms CTR2(KTR_IGMPV3, "%s: group %s is not multicast", 2832189592Sbms __func__, inet_ntoa(group)); 2833189592Sbms return (EINVAL); 2834189592Sbms } 2835189592Sbms 2836189592Sbms ifp = ifnet_byindex(ifindex); 2837189592Sbms if (ifp == NULL) { 2838189592Sbms CTR2(KTR_IGMPV3, "%s: no ifp for ifindex %u", 2839189592Sbms __func__, ifindex); 2840189592Sbms return (ENOENT); 2841189592Sbms } 2842189592Sbms 2843189592Sbms retval = sysctl_wire_old_buffer(req, 2844189592Sbms sizeof(uint32_t) + (in_mcast_maxgrpsrc * sizeof(struct in_addr))); 2845189592Sbms if (retval) 2846189592Sbms return (retval); 2847189592Sbms 2848189592Sbms IN_MULTI_LOCK(); 2849189592Sbms 2850229621Sjhb IF_ADDR_RLOCK(ifp); 2851189592Sbms TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2852189592Sbms if (ifma->ifma_addr->sa_family != AF_INET || 2853189592Sbms ifma->ifma_protospec == NULL) 2854189592Sbms continue; 2855189592Sbms inm = (struct in_multi *)ifma->ifma_protospec; 2856189592Sbms if (!in_hosteq(inm->inm_addr, group)) 2857189592Sbms continue; 2858189592Sbms fmode = inm->inm_st[1].iss_fmode; 2859189592Sbms retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t)); 2860189592Sbms if (retval != 0) 2861189592Sbms break; 2862189592Sbms RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) { 2863189592Sbms#ifdef KTR 2864189592Sbms struct in_addr ina; 2865189592Sbms ina.s_addr = htonl(ims->ims_haddr); 2866189592Sbms CTR2(KTR_IGMPV3, "%s: visit node %s", __func__, 2867189592Sbms inet_ntoa(ina)); 2868189592Sbms#endif 2869189592Sbms /* 2870189592Sbms * Only copy-out sources which are in-mode. 2871189592Sbms */ 2872189592Sbms if (fmode != ims_get_mode(inm, ims, 1)) { 2873189592Sbms CTR1(KTR_IGMPV3, "%s: skip non-in-mode", 2874189592Sbms __func__); 2875189592Sbms continue; 2876189592Sbms } 2877189592Sbms src.s_addr = htonl(ims->ims_haddr); 2878189592Sbms retval = SYSCTL_OUT(req, &src, sizeof(struct in_addr)); 2879189592Sbms if (retval != 0) 2880189592Sbms break; 2881189592Sbms } 2882189592Sbms } 2883229621Sjhb IF_ADDR_RUNLOCK(ifp); 2884189592Sbms 2885189592Sbms IN_MULTI_UNLOCK(); 2886189592Sbms 2887189592Sbms return (retval); 2888189592Sbms} 2889189592Sbms 2890189592Sbms#ifdef KTR 2891189592Sbms 2892189592Sbmsstatic const char *inm_modestrs[] = { "un", "in", "ex" }; 2893189592Sbms 2894189592Sbmsstatic const char * 2895189592Sbmsinm_mode_str(const int mode) 2896189592Sbms{ 2897189592Sbms 2898189592Sbms if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE) 2899189592Sbms return (inm_modestrs[mode]); 2900189592Sbms return ("??"); 2901189592Sbms} 2902189592Sbms 2903189592Sbmsstatic const char *inm_statestrs[] = { 2904189592Sbms "not-member", 2905189592Sbms "silent", 2906189592Sbms "idle", 2907189592Sbms "lazy", 2908189592Sbms "sleeping", 2909189592Sbms "awakening", 2910189592Sbms "query-pending", 2911189592Sbms "sg-query-pending", 2912189592Sbms "leaving" 2913189592Sbms}; 2914189592Sbms 2915189592Sbmsstatic const char * 2916189592Sbmsinm_state_str(const int state) 2917189592Sbms{ 2918189592Sbms 2919189592Sbms if (state >= IGMP_NOT_MEMBER && state <= IGMP_LEAVING_MEMBER) 2920189592Sbms return (inm_statestrs[state]); 2921189592Sbms return ("??"); 2922189592Sbms} 2923189592Sbms 2924189592Sbms/* 2925189592Sbms * Dump an in_multi structure to the console. 2926189592Sbms */ 2927189592Sbmsvoid 2928189592Sbmsinm_print(const struct in_multi *inm) 2929189592Sbms{ 2930189592Sbms int t; 2931189592Sbms 2932190753Skan if ((ktr_mask & KTR_IGMPV3) == 0) 2933189635Sbms return; 2934189635Sbms 2935189592Sbms printf("%s: --- begin inm %p ---\n", __func__, inm); 2936189592Sbms printf("addr %s ifp %p(%s) ifma %p\n", 2937189592Sbms inet_ntoa(inm->inm_addr), 2938189592Sbms inm->inm_ifp, 2939189592Sbms inm->inm_ifp->if_xname, 2940189592Sbms inm->inm_ifma); 2941189592Sbms printf("timer %u state %s refcount %u scq.len %u\n", 2942189592Sbms inm->inm_timer, 2943189592Sbms inm_state_str(inm->inm_state), 2944189592Sbms inm->inm_refcount, 2945189592Sbms inm->inm_scq.ifq_len); 2946189592Sbms printf("igi %p nsrc %lu sctimer %u scrv %u\n", 2947189592Sbms inm->inm_igi, 2948189592Sbms inm->inm_nsrc, 2949189592Sbms inm->inm_sctimer, 2950189592Sbms inm->inm_scrv); 2951189592Sbms for (t = 0; t < 2; t++) { 2952189592Sbms printf("t%d: fmode %s asm %u ex %u in %u rec %u\n", t, 2953189592Sbms inm_mode_str(inm->inm_st[t].iss_fmode), 2954189592Sbms inm->inm_st[t].iss_asm, 2955189592Sbms inm->inm_st[t].iss_ex, 2956189592Sbms inm->inm_st[t].iss_in, 2957189592Sbms inm->inm_st[t].iss_rec); 2958189592Sbms } 2959189592Sbms printf("%s: --- end inm %p ---\n", __func__, inm); 2960189592Sbms} 2961189592Sbms 2962189592Sbms#else /* !KTR */ 2963189592Sbms 2964189592Sbmsvoid 2965189592Sbmsinm_print(const struct in_multi *inm) 2966189592Sbms{ 2967189592Sbms 2968189592Sbms} 2969189592Sbms 2970189592Sbms#endif /* KTR */ 2971189592Sbms 2972189592SbmsRB_GENERATE(ip_msource_tree, ip_msource, ims_link, ip_msource_cmp); 2973