1/*	$NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $	*/
2
3/*-
4 * SPDX-License-Identifier: BSD-4-Clause
5 *
6 * Copyright 2001 Wasabi Systems, Inc.
7 * All rights reserved.
8 *
9 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed for the NetBSD Project by
22 *	Wasabi Systems, Inc.
23 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
24 *    or promote products derived from this software without specific prior
25 *    written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 *    notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 *    notice, this list of conditions and the following disclaimer in the
51 *    documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
55 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
56 * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
57 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
61 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
62 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 *
65 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
66 */
67
68/*
69 * Network interface bridge support.
70 *
71 * TODO:
72 *
73 *	- Currently only supports Ethernet-like interfaces (Ethernet,
74 *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
75 *	  to bridge other types of interfaces (maybe consider
76 *	  heterogeneous bridges).
77 */
78
79#include <sys/cdefs.h>
80__FBSDID("$FreeBSD$");
81
82#include "opt_inet.h"
83#include "opt_inet6.h"
84
85#include <sys/param.h>
86#include <sys/eventhandler.h>
87#include <sys/mbuf.h>
88#include <sys/malloc.h>
89#include <sys/protosw.h>
90#include <sys/systm.h>
91#include <sys/jail.h>
92#include <sys/time.h>
93#include <sys/socket.h> /* for net/if.h */
94#include <sys/sockio.h>
95#include <sys/ctype.h>  /* string functions */
96#include <sys/kernel.h>
97#include <sys/random.h>
98#include <sys/syslog.h>
99#include <sys/sysctl.h>
100#include <vm/uma.h>
101#include <sys/module.h>
102#include <sys/priv.h>
103#include <sys/proc.h>
104#include <sys/lock.h>
105#include <sys/mutex.h>
106
107#include <net/bpf.h>
108#include <net/if.h>
109#include <net/if_clone.h>
110#include <net/if_dl.h>
111#include <net/if_types.h>
112#include <net/if_var.h>
113#include <net/pfil.h>
114#include <net/vnet.h>
115
116#include <netinet/in.h>
117#include <netinet/in_systm.h>
118#include <netinet/in_var.h>
119#include <netinet/ip.h>
120#include <netinet/ip_var.h>
121#ifdef INET6
122#include <netinet/ip6.h>
123#include <netinet6/ip6_var.h>
124#include <netinet6/in6_ifattach.h>
125#endif
126#if defined(INET) || defined(INET6)
127#include <netinet/ip_carp.h>
128#endif
129#include <machine/in_cksum.h>
130#include <netinet/if_ether.h>
131#include <net/bridgestp.h>
132#include <net/if_bridgevar.h>
133#include <net/if_llc.h>
134#include <net/if_vlan_var.h>
135
136#include <net/route.h>
137
138#ifdef INET6
139/*
140 * XXX: declare here to avoid to include many inet6 related files..
141 * should be more generalized?
142 */
143extern void	nd6_setmtu(struct ifnet *);
144#endif
145
146/*
147 * Size of the route hash table.  Must be a power of two.
148 */
149#ifndef BRIDGE_RTHASH_SIZE
150#define	BRIDGE_RTHASH_SIZE		1024
151#endif
152
153#define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
154
155/*
156 * Default maximum number of addresses to cache.
157 */
158#ifndef BRIDGE_RTABLE_MAX
159#define	BRIDGE_RTABLE_MAX		2000
160#endif
161
162/*
163 * Timeout (in seconds) for entries learned dynamically.
164 */
165#ifndef BRIDGE_RTABLE_TIMEOUT
166#define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
167#endif
168
169/*
170 * Number of seconds between walks of the route list.
171 */
172#ifndef BRIDGE_RTABLE_PRUNE_PERIOD
173#define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
174#endif
175
176/*
177 * List of capabilities to possibly mask on the member interface.
178 */
179#define	BRIDGE_IFCAPS_MASK		(IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM|\
180					 IFCAP_TXCSUM_IPV6)
181
182/*
183 * List of capabilities to strip
184 */
185#define	BRIDGE_IFCAPS_STRIP		IFCAP_LRO
186
187/*
188 * Bridge locking
189 *
190 * The bridge relies heavily on the epoch(9) system to protect its data
191 * structures. This means we can safely use CK_LISTs while in NET_EPOCH, but we
192 * must ensure there is only one writer at a time.
193 *
194 * That is: for read accesses we only need to be in NET_EPOCH, but for write
195 * accesses we must hold:
196 *
197 *  - BRIDGE_RT_LOCK, for any change to bridge_rtnodes
198 *  - BRIDGE_LOCK, for any other change
199 *
200 * The BRIDGE_LOCK is a sleepable lock, because it is held accross ioctl()
201 * calls to bridge member interfaces and these ioctl()s can sleep.
202 * The BRIDGE_RT_LOCK is a non-sleepable mutex, because it is sometimes
203 * required while we're in NET_EPOCH and then we're not allowed to sleep.
204 */
205#define BRIDGE_LOCK_INIT(_sc)		do {			\
206	sx_init(&(_sc)->sc_sx, "if_bridge");			\
207	mtx_init(&(_sc)->sc_rt_mtx, "if_bridge rt", NULL, MTX_DEF);	\
208} while (0)
209#define BRIDGE_LOCK_DESTROY(_sc)	do {	\
210	sx_destroy(&(_sc)->sc_sx);		\
211	mtx_destroy(&(_sc)->sc_rt_mtx);		\
212} while (0)
213#define BRIDGE_LOCK(_sc)		sx_xlock(&(_sc)->sc_sx)
214#define BRIDGE_UNLOCK(_sc)		sx_xunlock(&(_sc)->sc_sx)
215#define BRIDGE_LOCK_ASSERT(_sc)		sx_assert(&(_sc)->sc_sx, SX_XLOCKED)
216#define BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(_sc)	\
217	    MPASS(in_epoch(net_epoch_preempt) || sx_xlocked(&(_sc)->sc_sx))
218#define BRIDGE_UNLOCK_ASSERT(_sc)	sx_assert(&(_sc)->sc_sx, SX_UNLOCKED)
219#define BRIDGE_RT_LOCK(_sc)		mtx_lock(&(_sc)->sc_rt_mtx)
220#define BRIDGE_RT_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_rt_mtx)
221#define BRIDGE_RT_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_rt_mtx, MA_OWNED)
222#define BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(_sc)	\
223	    MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(_sc)->sc_rt_mtx))
224
225/*
226 * Bridge interface list entry.
227 */
228struct bridge_iflist {
229	CK_LIST_ENTRY(bridge_iflist) bif_next;
230	struct ifnet		*bif_ifp;	/* member if */
231	struct bstp_port	bif_stp;	/* STP state */
232	uint32_t		bif_flags;	/* member if flags */
233	int			bif_savedcaps;	/* saved capabilities */
234	uint32_t		bif_addrmax;	/* max # of addresses */
235	uint32_t		bif_addrcnt;	/* cur. # of addresses */
236	uint32_t		bif_addrexceeded;/* # of address violations */
237	struct epoch_context	bif_epoch_ctx;
238};
239
240/*
241 * Bridge route node.
242 */
243struct bridge_rtnode {
244	CK_LIST_ENTRY(bridge_rtnode) brt_hash;	/* hash table linkage */
245	CK_LIST_ENTRY(bridge_rtnode) brt_list;	/* list linkage */
246	struct bridge_iflist	*brt_dst;	/* destination if */
247	unsigned long		brt_expire;	/* expiration time */
248	uint8_t			brt_flags;	/* address flags */
249	uint8_t			brt_addr[ETHER_ADDR_LEN];
250	uint16_t		brt_vlan;	/* vlan id */
251	struct	vnet		*brt_vnet;
252	struct	epoch_context	brt_epoch_ctx;
253};
254#define	brt_ifp			brt_dst->bif_ifp
255
256/*
257 * Software state for each bridge.
258 */
259struct bridge_softc {
260	struct ifnet		*sc_ifp;	/* make this an interface */
261	LIST_ENTRY(bridge_softc) sc_list;
262	struct sx		sc_sx;
263	struct mtx		sc_rt_mtx;
264	uint32_t		sc_brtmax;	/* max # of addresses */
265	uint32_t		sc_brtcnt;	/* cur. # of addresses */
266	uint32_t		sc_brttimeout;	/* rt timeout in seconds */
267	struct callout		sc_brcallout;	/* bridge callout */
268	CK_LIST_HEAD(, bridge_iflist) sc_iflist;	/* member interface list */
269	CK_LIST_HEAD(, bridge_rtnode) *sc_rthash;	/* our forwarding table */
270	CK_LIST_HEAD(, bridge_rtnode) sc_rtlist;	/* list version of above */
271	uint32_t		sc_rthash_key;	/* key for hash */
272	CK_LIST_HEAD(, bridge_iflist) sc_spanlist;	/* span ports list */
273	struct bstp_state	sc_stp;		/* STP state */
274	uint32_t		sc_brtexceeded;	/* # of cache drops */
275	struct ifnet		*sc_ifaddr;	/* member mac copied from */
276	struct ether_addr	sc_defaddr;	/* Default MAC address */
277	struct epoch_context	sc_epoch_ctx;
278};
279
280VNET_DEFINE_STATIC(struct sx, bridge_list_sx);
281#define	V_bridge_list_sx	VNET(bridge_list_sx)
282static eventhandler_tag bridge_detach_cookie;
283
284int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
285
286VNET_DEFINE_STATIC(uma_zone_t, bridge_rtnode_zone);
287#define	V_bridge_rtnode_zone	VNET(bridge_rtnode_zone)
288
289static int	bridge_clone_create(struct if_clone *, int, caddr_t);
290static void	bridge_clone_destroy(struct ifnet *);
291
292static int	bridge_ioctl(struct ifnet *, u_long, caddr_t);
293static void	bridge_mutecaps(struct bridge_softc *);
294static void	bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
295		    int);
296static void	bridge_ifdetach(void *arg __unused, struct ifnet *);
297static void	bridge_init(void *);
298static void	bridge_dummynet(struct mbuf *, struct ifnet *);
299static void	bridge_stop(struct ifnet *, int);
300static int	bridge_transmit(struct ifnet *, struct mbuf *);
301static void	bridge_qflush(struct ifnet *);
302static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
303static int	bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
304		    struct rtentry *);
305static int	bridge_enqueue(struct bridge_softc *, struct ifnet *,
306		    struct mbuf *);
307static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
308
309static void	bridge_forward(struct bridge_softc *, struct bridge_iflist *,
310		    struct mbuf *m);
311
312static void	bridge_timer(void *);
313
314static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
315		    struct mbuf *, int);
316static void	bridge_span(struct bridge_softc *, struct mbuf *);
317
318static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
319		    uint16_t, struct bridge_iflist *, int, uint8_t);
320static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
321		    uint16_t);
322static void	bridge_rttrim(struct bridge_softc *);
323static void	bridge_rtage(struct bridge_softc *);
324static void	bridge_rtflush(struct bridge_softc *, int);
325static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
326		    uint16_t);
327
328static void	bridge_rtable_init(struct bridge_softc *);
329static void	bridge_rtable_fini(struct bridge_softc *);
330
331static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
332static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
333		    const uint8_t *, uint16_t);
334static int	bridge_rtnode_insert(struct bridge_softc *,
335		    struct bridge_rtnode *);
336static void	bridge_rtnode_destroy(struct bridge_softc *,
337		    struct bridge_rtnode *);
338static void	bridge_rtable_expire(struct ifnet *, int);
339static void	bridge_state_change(struct ifnet *, int);
340
341static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
342		    const char *name);
343static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
344		    struct ifnet *ifp);
345static void	bridge_delete_member(struct bridge_softc *,
346		    struct bridge_iflist *, int);
347static void	bridge_delete_span(struct bridge_softc *,
348		    struct bridge_iflist *);
349
350static int	bridge_ioctl_add(struct bridge_softc *, void *);
351static int	bridge_ioctl_del(struct bridge_softc *, void *);
352static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
353static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
354static int	bridge_ioctl_scache(struct bridge_softc *, void *);
355static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
356static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
357static int	bridge_ioctl_rts(struct bridge_softc *, void *);
358static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
359static int	bridge_ioctl_sto(struct bridge_softc *, void *);
360static int	bridge_ioctl_gto(struct bridge_softc *, void *);
361static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
362static int	bridge_ioctl_flush(struct bridge_softc *, void *);
363static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
364static int	bridge_ioctl_spri(struct bridge_softc *, void *);
365static int	bridge_ioctl_ght(struct bridge_softc *, void *);
366static int	bridge_ioctl_sht(struct bridge_softc *, void *);
367static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
368static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
369static int	bridge_ioctl_gma(struct bridge_softc *, void *);
370static int	bridge_ioctl_sma(struct bridge_softc *, void *);
371static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
372static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
373static int	bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
374static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
375static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
376static int	bridge_ioctl_gbparam(struct bridge_softc *, void *);
377static int	bridge_ioctl_grte(struct bridge_softc *, void *);
378static int	bridge_ioctl_gifsstp(struct bridge_softc *, void *);
379static int	bridge_ioctl_sproto(struct bridge_softc *, void *);
380static int	bridge_ioctl_stxhc(struct bridge_softc *, void *);
381static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
382		    int);
383static int	bridge_ip_checkbasic(struct mbuf **mp);
384#ifdef INET6
385static int	bridge_ip6_checkbasic(struct mbuf **mp);
386#endif /* INET6 */
387static int	bridge_fragment(struct ifnet *, struct mbuf **mp,
388		    struct ether_header *, int, struct llc *);
389static void	bridge_linkstate(struct ifnet *ifp);
390static void	bridge_linkcheck(struct bridge_softc *sc);
391
392/* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
393#define	VLANTAGOF(_m)	\
394    (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1
395
396static struct bstp_cb_ops bridge_ops = {
397	.bcb_state = bridge_state_change,
398	.bcb_rtage = bridge_rtable_expire
399};
400
401SYSCTL_DECL(_net_link);
402static SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
403    "Bridge");
404
405/* only pass IP[46] packets when pfil is enabled */
406VNET_DEFINE_STATIC(int, pfil_onlyip) = 1;
407#define	V_pfil_onlyip	VNET(pfil_onlyip)
408SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip,
409    CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_onlyip), 0,
410    "Only pass IP packets when pfil is enabled");
411
412/* run pfil hooks on the bridge interface */
413VNET_DEFINE_STATIC(int, pfil_bridge) = 1;
414#define	V_pfil_bridge	VNET(pfil_bridge)
415SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge,
416    CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_bridge), 0,
417    "Packet filter on the bridge interface");
418
419/* layer2 filter with ipfw */
420VNET_DEFINE_STATIC(int, pfil_ipfw);
421#define	V_pfil_ipfw	VNET(pfil_ipfw)
422
423/* layer2 ARP filter with ipfw */
424VNET_DEFINE_STATIC(int, pfil_ipfw_arp);
425#define	V_pfil_ipfw_arp	VNET(pfil_ipfw_arp)
426SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp,
427    CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_ipfw_arp), 0,
428    "Filter ARP packets through IPFW layer2");
429
430/* run pfil hooks on the member interface */
431VNET_DEFINE_STATIC(int, pfil_member) = 1;
432#define	V_pfil_member	VNET(pfil_member)
433SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member,
434    CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_member), 0,
435    "Packet filter on the member interface");
436
437/* run pfil hooks on the physical interface for locally destined packets */
438VNET_DEFINE_STATIC(int, pfil_local_phys);
439#define	V_pfil_local_phys	VNET(pfil_local_phys)
440SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
441    CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_local_phys), 0,
442    "Packet filter on the physical interface for locally destined packets");
443
444/* log STP state changes */
445VNET_DEFINE_STATIC(int, log_stp);
446#define	V_log_stp	VNET(log_stp)
447SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp,
448    CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(log_stp), 0,
449    "Log STP state changes");
450
451/* share MAC with first bridge member */
452VNET_DEFINE_STATIC(int, bridge_inherit_mac);
453#define	V_bridge_inherit_mac	VNET(bridge_inherit_mac)
454SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
455    CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(bridge_inherit_mac), 0,
456    "Inherit MAC address from the first bridge member");
457
458VNET_DEFINE_STATIC(int, allow_llz_overlap) = 0;
459#define	V_allow_llz_overlap	VNET(allow_llz_overlap)
460SYSCTL_INT(_net_link_bridge, OID_AUTO, allow_llz_overlap,
461    CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(allow_llz_overlap), 0,
462    "Allow overlap of link-local scope "
463    "zones of a bridge interface and the member interfaces");
464
465struct bridge_control {
466	int	(*bc_func)(struct bridge_softc *, void *);
467	int	bc_argsize;
468	int	bc_flags;
469};
470
471#define	BC_F_COPYIN		0x01	/* copy arguments in */
472#define	BC_F_COPYOUT		0x02	/* copy arguments out */
473#define	BC_F_SUSER		0x04	/* do super-user check */
474
475const struct bridge_control bridge_control_table[] = {
476	{ bridge_ioctl_add,		sizeof(struct ifbreq),
477	  BC_F_COPYIN|BC_F_SUSER },
478	{ bridge_ioctl_del,		sizeof(struct ifbreq),
479	  BC_F_COPYIN|BC_F_SUSER },
480
481	{ bridge_ioctl_gifflags,	sizeof(struct ifbreq),
482	  BC_F_COPYIN|BC_F_COPYOUT },
483	{ bridge_ioctl_sifflags,	sizeof(struct ifbreq),
484	  BC_F_COPYIN|BC_F_SUSER },
485
486	{ bridge_ioctl_scache,		sizeof(struct ifbrparam),
487	  BC_F_COPYIN|BC_F_SUSER },
488	{ bridge_ioctl_gcache,		sizeof(struct ifbrparam),
489	  BC_F_COPYOUT },
490
491	{ bridge_ioctl_gifs,		sizeof(struct ifbifconf),
492	  BC_F_COPYIN|BC_F_COPYOUT },
493	{ bridge_ioctl_rts,		sizeof(struct ifbaconf),
494	  BC_F_COPYIN|BC_F_COPYOUT },
495
496	{ bridge_ioctl_saddr,		sizeof(struct ifbareq),
497	  BC_F_COPYIN|BC_F_SUSER },
498
499	{ bridge_ioctl_sto,		sizeof(struct ifbrparam),
500	  BC_F_COPYIN|BC_F_SUSER },
501	{ bridge_ioctl_gto,		sizeof(struct ifbrparam),
502	  BC_F_COPYOUT },
503
504	{ bridge_ioctl_daddr,		sizeof(struct ifbareq),
505	  BC_F_COPYIN|BC_F_SUSER },
506
507	{ bridge_ioctl_flush,		sizeof(struct ifbreq),
508	  BC_F_COPYIN|BC_F_SUSER },
509
510	{ bridge_ioctl_gpri,		sizeof(struct ifbrparam),
511	  BC_F_COPYOUT },
512	{ bridge_ioctl_spri,		sizeof(struct ifbrparam),
513	  BC_F_COPYIN|BC_F_SUSER },
514
515	{ bridge_ioctl_ght,		sizeof(struct ifbrparam),
516	  BC_F_COPYOUT },
517	{ bridge_ioctl_sht,		sizeof(struct ifbrparam),
518	  BC_F_COPYIN|BC_F_SUSER },
519
520	{ bridge_ioctl_gfd,		sizeof(struct ifbrparam),
521	  BC_F_COPYOUT },
522	{ bridge_ioctl_sfd,		sizeof(struct ifbrparam),
523	  BC_F_COPYIN|BC_F_SUSER },
524
525	{ bridge_ioctl_gma,		sizeof(struct ifbrparam),
526	  BC_F_COPYOUT },
527	{ bridge_ioctl_sma,		sizeof(struct ifbrparam),
528	  BC_F_COPYIN|BC_F_SUSER },
529
530	{ bridge_ioctl_sifprio,		sizeof(struct ifbreq),
531	  BC_F_COPYIN|BC_F_SUSER },
532
533	{ bridge_ioctl_sifcost,		sizeof(struct ifbreq),
534	  BC_F_COPYIN|BC_F_SUSER },
535
536	{ bridge_ioctl_addspan,		sizeof(struct ifbreq),
537	  BC_F_COPYIN|BC_F_SUSER },
538	{ bridge_ioctl_delspan,		sizeof(struct ifbreq),
539	  BC_F_COPYIN|BC_F_SUSER },
540
541	{ bridge_ioctl_gbparam,		sizeof(struct ifbropreq),
542	  BC_F_COPYOUT },
543
544	{ bridge_ioctl_grte,		sizeof(struct ifbrparam),
545	  BC_F_COPYOUT },
546
547	{ bridge_ioctl_gifsstp,		sizeof(struct ifbpstpconf),
548	  BC_F_COPYIN|BC_F_COPYOUT },
549
550	{ bridge_ioctl_sproto,		sizeof(struct ifbrparam),
551	  BC_F_COPYIN|BC_F_SUSER },
552
553	{ bridge_ioctl_stxhc,		sizeof(struct ifbrparam),
554	  BC_F_COPYIN|BC_F_SUSER },
555
556	{ bridge_ioctl_sifmaxaddr,	sizeof(struct ifbreq),
557	  BC_F_COPYIN|BC_F_SUSER },
558
559};
560const int bridge_control_table_size = nitems(bridge_control_table);
561
562VNET_DEFINE_STATIC(LIST_HEAD(, bridge_softc), bridge_list);
563#define	V_bridge_list	VNET(bridge_list)
564#define	BRIDGE_LIST_LOCK_INIT(x)	sx_init(&V_bridge_list_sx,	\
565					    "if_bridge list")
566#define	BRIDGE_LIST_LOCK_DESTROY(x)	sx_destroy(&V_bridge_list_sx)
567#define	BRIDGE_LIST_LOCK(x)		sx_xlock(&V_bridge_list_sx)
568#define	BRIDGE_LIST_UNLOCK(x)		sx_xunlock(&V_bridge_list_sx)
569
570VNET_DEFINE_STATIC(struct if_clone *, bridge_cloner);
571#define	V_bridge_cloner	VNET(bridge_cloner)
572
573static const char bridge_name[] = "bridge";
574
575static void
576vnet_bridge_init(const void *unused __unused)
577{
578
579	V_bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
580	    sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
581	    UMA_ALIGN_PTR, 0);
582	BRIDGE_LIST_LOCK_INIT();
583	LIST_INIT(&V_bridge_list);
584	V_bridge_cloner = if_clone_simple(bridge_name,
585	    bridge_clone_create, bridge_clone_destroy, 0);
586}
587VNET_SYSINIT(vnet_bridge_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
588    vnet_bridge_init, NULL);
589
590static void
591vnet_bridge_uninit(const void *unused __unused)
592{
593
594	if_clone_detach(V_bridge_cloner);
595	V_bridge_cloner = NULL;
596	BRIDGE_LIST_LOCK_DESTROY();
597
598	/* Callbacks may use the UMA zone. */
599	epoch_drain_callbacks(net_epoch_preempt);
600
601	uma_zdestroy(V_bridge_rtnode_zone);
602}
603VNET_SYSUNINIT(vnet_bridge_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY,
604    vnet_bridge_uninit, NULL);
605
606static int
607bridge_modevent(module_t mod, int type, void *data)
608{
609
610	switch (type) {
611	case MOD_LOAD:
612		bridge_dn_p = bridge_dummynet;
613		bridge_detach_cookie = EVENTHANDLER_REGISTER(
614		    ifnet_departure_event, bridge_ifdetach, NULL,
615		    EVENTHANDLER_PRI_ANY);
616		break;
617	case MOD_UNLOAD:
618		EVENTHANDLER_DEREGISTER(ifnet_departure_event,
619		    bridge_detach_cookie);
620		bridge_dn_p = NULL;
621		break;
622	default:
623		return (EOPNOTSUPP);
624	}
625	return (0);
626}
627
628static moduledata_t bridge_mod = {
629	"if_bridge",
630	bridge_modevent,
631	0
632};
633
634DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
635MODULE_VERSION(if_bridge, 1);
636MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
637
638/*
639 * handler for net.link.bridge.ipfw
640 */
641static int
642sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
643{
644	int enable = V_pfil_ipfw;
645	int error;
646
647	error = sysctl_handle_int(oidp, &enable, 0, req);
648	enable &= 1;
649
650	if (enable != V_pfil_ipfw) {
651		V_pfil_ipfw = enable;
652
653		/*
654		 * Disable pfil so that ipfw doesnt run twice, if the user
655		 * really wants both then they can re-enable pfil_bridge and/or
656		 * pfil_member. Also allow non-ip packets as ipfw can filter by
657		 * layer2 type.
658		 */
659		if (V_pfil_ipfw) {
660			V_pfil_onlyip = 0;
661			V_pfil_bridge = 0;
662			V_pfil_member = 0;
663		}
664	}
665
666	return (error);
667}
668SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw,
669    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET | CTLFLAG_NEEDGIANT,
670    &VNET_NAME(pfil_ipfw), 0, &sysctl_pfil_ipfw, "I",
671    "Layer2 filter with IPFW");
672
673#ifdef VIMAGE
674static void
675bridge_reassign(struct ifnet *ifp, struct vnet *newvnet, char *arg)
676{
677	struct bridge_softc *sc = ifp->if_softc;
678	struct bridge_iflist *bif;
679
680	BRIDGE_LOCK(sc);
681
682	while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
683		bridge_delete_member(sc, bif, 0);
684
685	while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
686		bridge_delete_span(sc, bif);
687	}
688
689	BRIDGE_UNLOCK(sc);
690
691	ether_reassign(ifp, newvnet, arg);
692}
693#endif
694
695/*
696 * bridge_clone_create:
697 *
698 *	Create a new bridge instance.
699 */
700static int
701bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params)
702{
703	struct bridge_softc *sc;
704	struct ifnet *ifp;
705
706	sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
707	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
708	if (ifp == NULL) {
709		free(sc, M_DEVBUF);
710		return (ENOSPC);
711	}
712
713	BRIDGE_LOCK_INIT(sc);
714	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
715	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
716
717	/* Initialize our routing table. */
718	bridge_rtable_init(sc);
719
720	callout_init_mtx(&sc->sc_brcallout, &sc->sc_rt_mtx, 0);
721
722	CK_LIST_INIT(&sc->sc_iflist);
723	CK_LIST_INIT(&sc->sc_spanlist);
724
725	ifp->if_softc = sc;
726	if_initname(ifp, bridge_name, unit);
727	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
728	ifp->if_ioctl = bridge_ioctl;
729	ifp->if_transmit = bridge_transmit;
730	ifp->if_qflush = bridge_qflush;
731	ifp->if_init = bridge_init;
732	ifp->if_type = IFT_BRIDGE;
733
734	ether_gen_addr(ifp, &sc->sc_defaddr);
735
736	bstp_attach(&sc->sc_stp, &bridge_ops);
737	ether_ifattach(ifp, sc->sc_defaddr.octet);
738	/* Now undo some of the damage... */
739	ifp->if_baudrate = 0;
740	ifp->if_type = IFT_BRIDGE;
741#ifdef VIMAGE
742	ifp->if_reassign = bridge_reassign;
743#endif
744
745	BRIDGE_LIST_LOCK();
746	LIST_INSERT_HEAD(&V_bridge_list, sc, sc_list);
747	BRIDGE_LIST_UNLOCK();
748
749	return (0);
750}
751
752static void
753bridge_clone_destroy_cb(struct epoch_context *ctx)
754{
755	struct bridge_softc *sc;
756
757	sc = __containerof(ctx, struct bridge_softc, sc_epoch_ctx);
758
759	BRIDGE_LOCK_DESTROY(sc);
760	free(sc, M_DEVBUF);
761}
762
763/*
764 * bridge_clone_destroy:
765 *
766 *	Destroy a bridge instance.
767 */
768static void
769bridge_clone_destroy(struct ifnet *ifp)
770{
771	struct bridge_softc *sc = ifp->if_softc;
772	struct bridge_iflist *bif;
773	struct epoch_tracker et;
774
775	BRIDGE_LOCK(sc);
776
777	bridge_stop(ifp, 1);
778	ifp->if_flags &= ~IFF_UP;
779
780	while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
781		bridge_delete_member(sc, bif, 0);
782
783	while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
784		bridge_delete_span(sc, bif);
785	}
786
787	/* Tear down the routing table. */
788	bridge_rtable_fini(sc);
789
790	BRIDGE_UNLOCK(sc);
791
792	NET_EPOCH_ENTER(et);
793
794	callout_drain(&sc->sc_brcallout);
795
796	BRIDGE_LIST_LOCK();
797	LIST_REMOVE(sc, sc_list);
798	BRIDGE_LIST_UNLOCK();
799
800	bstp_detach(&sc->sc_stp);
801	NET_EPOCH_EXIT(et);
802
803	ether_ifdetach(ifp);
804	if_free(ifp);
805
806	NET_EPOCH_CALL(bridge_clone_destroy_cb, &sc->sc_epoch_ctx);
807}
808
809/*
810 * bridge_ioctl:
811 *
812 *	Handle a control request from the operator.
813 */
814static int
815bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
816{
817	struct bridge_softc *sc = ifp->if_softc;
818	struct ifreq *ifr = (struct ifreq *)data;
819	struct bridge_iflist *bif;
820	struct thread *td = curthread;
821	union {
822		struct ifbreq ifbreq;
823		struct ifbifconf ifbifconf;
824		struct ifbareq ifbareq;
825		struct ifbaconf ifbaconf;
826		struct ifbrparam ifbrparam;
827		struct ifbropreq ifbropreq;
828	} args;
829	struct ifdrv *ifd = (struct ifdrv *) data;
830	const struct bridge_control *bc;
831	int error = 0, oldmtu;
832
833	BRIDGE_LOCK(sc);
834
835	switch (cmd) {
836	case SIOCADDMULTI:
837	case SIOCDELMULTI:
838		break;
839
840	case SIOCGDRVSPEC:
841	case SIOCSDRVSPEC:
842		if (ifd->ifd_cmd >= bridge_control_table_size) {
843			error = EINVAL;
844			break;
845		}
846		bc = &bridge_control_table[ifd->ifd_cmd];
847
848		if (cmd == SIOCGDRVSPEC &&
849		    (bc->bc_flags & BC_F_COPYOUT) == 0) {
850			error = EINVAL;
851			break;
852		}
853		else if (cmd == SIOCSDRVSPEC &&
854		    (bc->bc_flags & BC_F_COPYOUT) != 0) {
855			error = EINVAL;
856			break;
857		}
858
859		if (bc->bc_flags & BC_F_SUSER) {
860			error = priv_check(td, PRIV_NET_BRIDGE);
861			if (error)
862				break;
863		}
864
865		if (ifd->ifd_len != bc->bc_argsize ||
866		    ifd->ifd_len > sizeof(args)) {
867			error = EINVAL;
868			break;
869		}
870
871		bzero(&args, sizeof(args));
872		if (bc->bc_flags & BC_F_COPYIN) {
873			error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
874			if (error)
875				break;
876		}
877
878		oldmtu = ifp->if_mtu;
879		error = (*bc->bc_func)(sc, &args);
880		if (error)
881			break;
882
883		/*
884		 * Bridge MTU may change during addition of the first port.
885		 * If it did, do network layer specific procedure.
886		 */
887		if (ifp->if_mtu != oldmtu) {
888#ifdef INET6
889			nd6_setmtu(ifp);
890#endif
891			rt_updatemtu(ifp);
892		}
893
894		if (bc->bc_flags & BC_F_COPYOUT)
895			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
896
897		break;
898
899	case SIOCSIFFLAGS:
900		if (!(ifp->if_flags & IFF_UP) &&
901		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
902			/*
903			 * If interface is marked down and it is running,
904			 * then stop and disable it.
905			 */
906			bridge_stop(ifp, 1);
907		} else if ((ifp->if_flags & IFF_UP) &&
908		    !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
909			/*
910			 * If interface is marked up and it is stopped, then
911			 * start it.
912			 */
913			BRIDGE_UNLOCK(sc);
914			(*ifp->if_init)(sc);
915			BRIDGE_LOCK(sc);
916		}
917		break;
918
919	case SIOCSIFMTU:
920		if (ifr->ifr_mtu < 576) {
921			error = EINVAL;
922			break;
923		}
924		if (CK_LIST_EMPTY(&sc->sc_iflist)) {
925			sc->sc_ifp->if_mtu = ifr->ifr_mtu;
926			break;
927		}
928		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
929			if (bif->bif_ifp->if_mtu != ifr->ifr_mtu) {
930				log(LOG_NOTICE, "%s: invalid MTU: %u(%s)"
931				    " != %d\n", sc->sc_ifp->if_xname,
932				    bif->bif_ifp->if_mtu,
933				    bif->bif_ifp->if_xname, ifr->ifr_mtu);
934				error = EINVAL;
935				break;
936			}
937		}
938		if (!error)
939			sc->sc_ifp->if_mtu = ifr->ifr_mtu;
940		break;
941	default:
942		/*
943		 * drop the lock as ether_ioctl() will call bridge_start() and
944		 * cause the lock to be recursed.
945		 */
946		BRIDGE_UNLOCK(sc);
947		error = ether_ioctl(ifp, cmd, data);
948		BRIDGE_LOCK(sc);
949		break;
950	}
951
952	BRIDGE_UNLOCK(sc);
953
954	return (error);
955}
956
957/*
958 * bridge_mutecaps:
959 *
960 *	Clear or restore unwanted capabilities on the member interface
961 */
962static void
963bridge_mutecaps(struct bridge_softc *sc)
964{
965	struct bridge_iflist *bif;
966	int enabled, mask;
967
968	BRIDGE_LOCK_ASSERT(sc);
969
970	/* Initial bitmask of capabilities to test */
971	mask = BRIDGE_IFCAPS_MASK;
972
973	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
974		/* Every member must support it or its disabled */
975		mask &= bif->bif_savedcaps;
976	}
977
978	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
979		enabled = bif->bif_ifp->if_capenable;
980		enabled &= ~BRIDGE_IFCAPS_STRIP;
981		/* strip off mask bits and enable them again if allowed */
982		enabled &= ~BRIDGE_IFCAPS_MASK;
983		enabled |= mask;
984		bridge_set_ifcap(sc, bif, enabled);
985	}
986}
987
988static void
989bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
990{
991	struct ifnet *ifp = bif->bif_ifp;
992	struct ifreq ifr;
993	int error, mask, stuck;
994
995	bzero(&ifr, sizeof(ifr));
996	ifr.ifr_reqcap = set;
997
998	if (ifp->if_capenable != set) {
999		error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
1000		if (error)
1001			if_printf(sc->sc_ifp,
1002			    "error setting capabilities on %s: %d\n",
1003			    ifp->if_xname, error);
1004		mask = BRIDGE_IFCAPS_MASK | BRIDGE_IFCAPS_STRIP;
1005		stuck = ifp->if_capenable & mask & ~set;
1006		if (stuck != 0)
1007			if_printf(sc->sc_ifp,
1008			    "can't disable some capabilities on %s: 0x%x\n",
1009			    ifp->if_xname, stuck);
1010	}
1011}
1012
1013/*
1014 * bridge_lookup_member:
1015 *
1016 *	Lookup a bridge member interface.
1017 */
1018static struct bridge_iflist *
1019bridge_lookup_member(struct bridge_softc *sc, const char *name)
1020{
1021	struct bridge_iflist *bif;
1022	struct ifnet *ifp;
1023
1024	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1025
1026	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1027		ifp = bif->bif_ifp;
1028		if (strcmp(ifp->if_xname, name) == 0)
1029			return (bif);
1030	}
1031
1032	return (NULL);
1033}
1034
1035/*
1036 * bridge_lookup_member_if:
1037 *
1038 *	Lookup a bridge member interface by ifnet*.
1039 */
1040static struct bridge_iflist *
1041bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1042{
1043	struct bridge_iflist *bif;
1044
1045	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1046
1047	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1048		if (bif->bif_ifp == member_ifp)
1049			return (bif);
1050	}
1051
1052	return (NULL);
1053}
1054
1055static void
1056bridge_delete_member_cb(struct epoch_context *ctx)
1057{
1058	struct bridge_iflist *bif;
1059
1060	bif = __containerof(ctx, struct bridge_iflist, bif_epoch_ctx);
1061
1062	free(bif, M_DEVBUF);
1063}
1064
1065/*
1066 * bridge_delete_member:
1067 *
1068 *	Delete the specified member interface.
1069 */
1070static void
1071bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
1072    int gone)
1073{
1074	struct ifnet *ifs = bif->bif_ifp;
1075	struct ifnet *fif = NULL;
1076	struct bridge_iflist *bifl;
1077
1078	BRIDGE_LOCK_ASSERT(sc);
1079
1080	if (bif->bif_flags & IFBIF_STP)
1081		bstp_disable(&bif->bif_stp);
1082
1083	ifs->if_bridge = NULL;
1084	CK_LIST_REMOVE(bif, bif_next);
1085
1086	/*
1087	 * If removing the interface that gave the bridge its mac address, set
1088	 * the mac address of the bridge to the address of the next member, or
1089	 * to its default address if no members are left.
1090	 */
1091	if (V_bridge_inherit_mac && sc->sc_ifaddr == ifs) {
1092		if (CK_LIST_EMPTY(&sc->sc_iflist)) {
1093			bcopy(&sc->sc_defaddr,
1094			    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1095			sc->sc_ifaddr = NULL;
1096		} else {
1097			bifl = CK_LIST_FIRST(&sc->sc_iflist);
1098			fif = bifl->bif_ifp;
1099			bcopy(IF_LLADDR(fif),
1100			    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1101			sc->sc_ifaddr = fif;
1102		}
1103		EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1104	}
1105
1106	bridge_linkcheck(sc);
1107	bridge_mutecaps(sc);	/* recalcuate now this interface is removed */
1108	BRIDGE_RT_LOCK(sc);
1109	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
1110	BRIDGE_RT_UNLOCK(sc);
1111	KASSERT(bif->bif_addrcnt == 0,
1112	    ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
1113
1114	ifs->if_bridge_output = NULL;
1115	ifs->if_bridge_input = NULL;
1116	ifs->if_bridge_linkstate = NULL;
1117	if (!gone) {
1118		switch (ifs->if_type) {
1119		case IFT_ETHER:
1120		case IFT_L2VLAN:
1121			/*
1122			 * Take the interface out of promiscuous mode, but only
1123			 * if it was promiscuous in the first place. It might
1124			 * not be if we're in the bridge_ioctl_add() error path.
1125			 */
1126			if (ifs->if_flags & IFF_PROMISC)
1127				(void) ifpromisc(ifs, 0);
1128			break;
1129
1130		case IFT_GIF:
1131			break;
1132
1133		default:
1134#ifdef DIAGNOSTIC
1135			panic("bridge_delete_member: impossible");
1136#endif
1137			break;
1138		}
1139		/* reneable any interface capabilities */
1140		bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
1141	}
1142	bstp_destroy(&bif->bif_stp);	/* prepare to free */
1143
1144	NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1145}
1146
1147/*
1148 * bridge_delete_span:
1149 *
1150 *	Delete the specified span interface.
1151 */
1152static void
1153bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1154{
1155	BRIDGE_LOCK_ASSERT(sc);
1156
1157	KASSERT(bif->bif_ifp->if_bridge == NULL,
1158	    ("%s: not a span interface", __func__));
1159
1160	CK_LIST_REMOVE(bif, bif_next);
1161
1162	NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1163}
1164
1165static int
1166bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1167{
1168	struct ifbreq *req = arg;
1169	struct bridge_iflist *bif = NULL;
1170	struct ifnet *ifs;
1171	int error = 0;
1172
1173	ifs = ifunit(req->ifbr_ifsname);
1174	if (ifs == NULL)
1175		return (ENOENT);
1176	if (ifs->if_ioctl == NULL)	/* must be supported */
1177		return (EINVAL);
1178
1179	/* If it's in the span list, it can't be a member. */
1180	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1181		if (ifs == bif->bif_ifp)
1182			return (EBUSY);
1183
1184	if (ifs->if_bridge == sc)
1185		return (EEXIST);
1186
1187	if (ifs->if_bridge != NULL)
1188		return (EBUSY);
1189
1190	switch (ifs->if_type) {
1191	case IFT_ETHER:
1192	case IFT_L2VLAN:
1193	case IFT_GIF:
1194		/* permitted interface types */
1195		break;
1196	default:
1197		return (EINVAL);
1198	}
1199
1200#ifdef INET6
1201	/*
1202	 * Two valid inet6 addresses with link-local scope must not be
1203	 * on the parent interface and the member interfaces at the
1204	 * same time.  This restriction is needed to prevent violation
1205	 * of link-local scope zone.  Attempts to add a member
1206	 * interface which has inet6 addresses when the parent has
1207	 * inet6 triggers removal of all inet6 addresses on the member
1208	 * interface.
1209	 */
1210
1211	/* Check if the parent interface has a link-local scope addr. */
1212	if (V_allow_llz_overlap == 0 &&
1213	    in6ifa_llaonifp(sc->sc_ifp) != NULL) {
1214		/*
1215		 * If any, remove all inet6 addresses from the member
1216		 * interfaces.
1217		 */
1218		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1219 			if (in6ifa_llaonifp(bif->bif_ifp)) {
1220				in6_ifdetach(bif->bif_ifp);
1221				if_printf(sc->sc_ifp,
1222				    "IPv6 addresses on %s have been removed "
1223				    "before adding it as a member to prevent "
1224				    "IPv6 address scope violation.\n",
1225				    bif->bif_ifp->if_xname);
1226			}
1227		}
1228		if (in6ifa_llaonifp(ifs)) {
1229			in6_ifdetach(ifs);
1230			if_printf(sc->sc_ifp,
1231			    "IPv6 addresses on %s have been removed "
1232			    "before adding it as a member to prevent "
1233			    "IPv6 address scope violation.\n",
1234			    ifs->if_xname);
1235		}
1236	}
1237#endif
1238	/* Allow the first Ethernet member to define the MTU */
1239	if (CK_LIST_EMPTY(&sc->sc_iflist))
1240		sc->sc_ifp->if_mtu = ifs->if_mtu;
1241	else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
1242		if_printf(sc->sc_ifp, "invalid MTU: %u(%s) != %u\n",
1243		    ifs->if_mtu, ifs->if_xname, sc->sc_ifp->if_mtu);
1244		return (EINVAL);
1245	}
1246
1247	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1248	if (bif == NULL)
1249		return (ENOMEM);
1250
1251	bif->bif_ifp = ifs;
1252	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
1253	bif->bif_savedcaps = ifs->if_capenable;
1254
1255	/*
1256	 * Assign the interface's MAC address to the bridge if it's the first
1257	 * member and the MAC address of the bridge has not been changed from
1258	 * the default randomly generated one.
1259	 */
1260	if (V_bridge_inherit_mac && CK_LIST_EMPTY(&sc->sc_iflist) &&
1261	    !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr.octet, ETHER_ADDR_LEN)) {
1262		bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1263		sc->sc_ifaddr = ifs;
1264		EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1265	}
1266
1267	ifs->if_bridge = sc;
1268	ifs->if_bridge_output = bridge_output;
1269	ifs->if_bridge_input = bridge_input;
1270	ifs->if_bridge_linkstate = bridge_linkstate;
1271	bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
1272	/*
1273	 * XXX: XLOCK HERE!?!
1274	 *
1275	 * NOTE: insert_***HEAD*** should be safe for the traversals.
1276	 */
1277	CK_LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
1278
1279	/* Set interface capabilities to the intersection set of all members */
1280	bridge_mutecaps(sc);
1281	bridge_linkcheck(sc);
1282
1283	/* Place the interface into promiscuous mode */
1284	switch (ifs->if_type) {
1285		case IFT_ETHER:
1286		case IFT_L2VLAN:
1287			error = ifpromisc(ifs, 1);
1288			break;
1289	}
1290
1291	if (error)
1292		bridge_delete_member(sc, bif, 0);
1293	return (error);
1294}
1295
1296static int
1297bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1298{
1299	struct ifbreq *req = arg;
1300	struct bridge_iflist *bif;
1301
1302	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1303	if (bif == NULL)
1304		return (ENOENT);
1305
1306	bridge_delete_member(sc, bif, 0);
1307
1308	return (0);
1309}
1310
1311static int
1312bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1313{
1314	struct ifbreq *req = arg;
1315	struct bridge_iflist *bif;
1316	struct bstp_port *bp;
1317
1318	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1319	if (bif == NULL)
1320		return (ENOENT);
1321
1322	bp = &bif->bif_stp;
1323	req->ifbr_ifsflags = bif->bif_flags;
1324	req->ifbr_state = bp->bp_state;
1325	req->ifbr_priority = bp->bp_priority;
1326	req->ifbr_path_cost = bp->bp_path_cost;
1327	req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1328	req->ifbr_proto = bp->bp_protover;
1329	req->ifbr_role = bp->bp_role;
1330	req->ifbr_stpflags = bp->bp_flags;
1331	req->ifbr_addrcnt = bif->bif_addrcnt;
1332	req->ifbr_addrmax = bif->bif_addrmax;
1333	req->ifbr_addrexceeded = bif->bif_addrexceeded;
1334
1335	/* Copy STP state options as flags */
1336	if (bp->bp_operedge)
1337		req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
1338	if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
1339		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
1340	if (bp->bp_ptp_link)
1341		req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
1342	if (bp->bp_flags & BSTP_PORT_AUTOPTP)
1343		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
1344	if (bp->bp_flags & BSTP_PORT_ADMEDGE)
1345		req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
1346	if (bp->bp_flags & BSTP_PORT_ADMCOST)
1347		req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
1348	return (0);
1349}
1350
1351static int
1352bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1353{
1354	struct epoch_tracker et;
1355	struct ifbreq *req = arg;
1356	struct bridge_iflist *bif;
1357	struct bstp_port *bp;
1358	int error;
1359
1360	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1361	if (bif == NULL)
1362		return (ENOENT);
1363	bp = &bif->bif_stp;
1364
1365	if (req->ifbr_ifsflags & IFBIF_SPAN)
1366		/* SPAN is readonly */
1367		return (EINVAL);
1368
1369	NET_EPOCH_ENTER(et);
1370
1371	if (req->ifbr_ifsflags & IFBIF_STP) {
1372		if ((bif->bif_flags & IFBIF_STP) == 0) {
1373			error = bstp_enable(&bif->bif_stp);
1374			if (error) {
1375				NET_EPOCH_EXIT(et);
1376				return (error);
1377			}
1378		}
1379	} else {
1380		if ((bif->bif_flags & IFBIF_STP) != 0)
1381			bstp_disable(&bif->bif_stp);
1382	}
1383
1384	/* Pass on STP flags */
1385	bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
1386	bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
1387	bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
1388	bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
1389
1390	/* Save the bits relating to the bridge */
1391	bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
1392
1393	NET_EPOCH_EXIT(et);
1394
1395	return (0);
1396}
1397
1398static int
1399bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1400{
1401	struct ifbrparam *param = arg;
1402
1403	sc->sc_brtmax = param->ifbrp_csize;
1404	bridge_rttrim(sc);
1405
1406	return (0);
1407}
1408
1409static int
1410bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1411{
1412	struct ifbrparam *param = arg;
1413
1414	param->ifbrp_csize = sc->sc_brtmax;
1415
1416	return (0);
1417}
1418
1419static int
1420bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1421{
1422	struct ifbifconf *bifc = arg;
1423	struct bridge_iflist *bif;
1424	struct ifbreq breq;
1425	char *buf, *outbuf;
1426	int count, buflen, len, error = 0;
1427
1428	count = 0;
1429	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1430		count++;
1431	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1432		count++;
1433
1434	buflen = sizeof(breq) * count;
1435	if (bifc->ifbic_len == 0) {
1436		bifc->ifbic_len = buflen;
1437		return (0);
1438	}
1439	outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1440	if (outbuf == NULL)
1441		return (ENOMEM);
1442
1443	count = 0;
1444	buf = outbuf;
1445	len = min(bifc->ifbic_len, buflen);
1446	bzero(&breq, sizeof(breq));
1447	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1448		if (len < sizeof(breq))
1449			break;
1450
1451		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1452		    sizeof(breq.ifbr_ifsname));
1453		/* Fill in the ifbreq structure */
1454		error = bridge_ioctl_gifflags(sc, &breq);
1455		if (error)
1456			break;
1457		memcpy(buf, &breq, sizeof(breq));
1458		count++;
1459		buf += sizeof(breq);
1460		len -= sizeof(breq);
1461	}
1462	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1463		if (len < sizeof(breq))
1464			break;
1465
1466		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1467		    sizeof(breq.ifbr_ifsname));
1468		breq.ifbr_ifsflags = bif->bif_flags;
1469		breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1470		memcpy(buf, &breq, sizeof(breq));
1471		count++;
1472		buf += sizeof(breq);
1473		len -= sizeof(breq);
1474	}
1475
1476	bifc->ifbic_len = sizeof(breq) * count;
1477	error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
1478	free(outbuf, M_TEMP);
1479	return (error);
1480}
1481
1482static int
1483bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1484{
1485	struct ifbaconf *bac = arg;
1486	struct bridge_rtnode *brt;
1487	struct ifbareq bareq;
1488	char *buf, *outbuf;
1489	int count, buflen, len, error = 0;
1490
1491	if (bac->ifbac_len == 0)
1492		return (0);
1493
1494	count = 0;
1495	CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
1496		count++;
1497	buflen = sizeof(bareq) * count;
1498
1499	outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1500	if (outbuf == NULL)
1501		return (ENOMEM);
1502
1503	count = 0;
1504	buf = outbuf;
1505	len = min(bac->ifbac_len, buflen);
1506	bzero(&bareq, sizeof(bareq));
1507	CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1508		if (len < sizeof(bareq))
1509			goto out;
1510		strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1511		    sizeof(bareq.ifba_ifsname));
1512		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1513		bareq.ifba_vlan = brt->brt_vlan;
1514		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1515				time_uptime < brt->brt_expire)
1516			bareq.ifba_expire = brt->brt_expire - time_uptime;
1517		else
1518			bareq.ifba_expire = 0;
1519		bareq.ifba_flags = brt->brt_flags;
1520
1521		memcpy(buf, &bareq, sizeof(bareq));
1522		count++;
1523		buf += sizeof(bareq);
1524		len -= sizeof(bareq);
1525	}
1526out:
1527	bac->ifbac_len = sizeof(bareq) * count;
1528	error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
1529	free(outbuf, M_TEMP);
1530	return (error);
1531}
1532
1533static int
1534bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1535{
1536	struct ifbareq *req = arg;
1537	struct bridge_iflist *bif;
1538	struct epoch_tracker et;
1539	int error;
1540
1541	NET_EPOCH_ENTER(et);
1542	bif = bridge_lookup_member(sc, req->ifba_ifsname);
1543	if (bif == NULL) {
1544		NET_EPOCH_EXIT(et);
1545		return (ENOENT);
1546	}
1547
1548	/* bridge_rtupdate() may acquire the lock. */
1549	error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
1550	    req->ifba_flags);
1551	NET_EPOCH_EXIT(et);
1552
1553	return (error);
1554}
1555
1556static int
1557bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1558{
1559	struct ifbrparam *param = arg;
1560
1561	sc->sc_brttimeout = param->ifbrp_ctime;
1562	return (0);
1563}
1564
1565static int
1566bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1567{
1568	struct ifbrparam *param = arg;
1569
1570	param->ifbrp_ctime = sc->sc_brttimeout;
1571	return (0);
1572}
1573
1574static int
1575bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1576{
1577	struct ifbareq *req = arg;
1578
1579	return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
1580}
1581
1582static int
1583bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1584{
1585	struct ifbreq *req = arg;
1586
1587	BRIDGE_RT_LOCK(sc);
1588	bridge_rtflush(sc, req->ifbr_ifsflags);
1589	BRIDGE_RT_UNLOCK(sc);
1590
1591	return (0);
1592}
1593
1594static int
1595bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1596{
1597	struct ifbrparam *param = arg;
1598	struct bstp_state *bs = &sc->sc_stp;
1599
1600	param->ifbrp_prio = bs->bs_bridge_priority;
1601	return (0);
1602}
1603
1604static int
1605bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1606{
1607	struct ifbrparam *param = arg;
1608
1609	return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
1610}
1611
1612static int
1613bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1614{
1615	struct ifbrparam *param = arg;
1616	struct bstp_state *bs = &sc->sc_stp;
1617
1618	param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
1619	return (0);
1620}
1621
1622static int
1623bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1624{
1625	struct ifbrparam *param = arg;
1626
1627	return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
1628}
1629
1630static int
1631bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1632{
1633	struct ifbrparam *param = arg;
1634	struct bstp_state *bs = &sc->sc_stp;
1635
1636	param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
1637	return (0);
1638}
1639
1640static int
1641bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1642{
1643	struct ifbrparam *param = arg;
1644
1645	return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
1646}
1647
1648static int
1649bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1650{
1651	struct ifbrparam *param = arg;
1652	struct bstp_state *bs = &sc->sc_stp;
1653
1654	param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
1655	return (0);
1656}
1657
1658static int
1659bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1660{
1661	struct ifbrparam *param = arg;
1662
1663	return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
1664}
1665
1666static int
1667bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1668{
1669	struct ifbreq *req = arg;
1670	struct bridge_iflist *bif;
1671
1672	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1673	if (bif == NULL)
1674		return (ENOENT);
1675
1676	return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
1677}
1678
1679static int
1680bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1681{
1682	struct ifbreq *req = arg;
1683	struct bridge_iflist *bif;
1684
1685	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1686	if (bif == NULL)
1687		return (ENOENT);
1688
1689	return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
1690}
1691
1692static int
1693bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
1694{
1695	struct ifbreq *req = arg;
1696	struct bridge_iflist *bif;
1697
1698	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1699	if (bif == NULL)
1700		return (ENOENT);
1701
1702	bif->bif_addrmax = req->ifbr_addrmax;
1703	return (0);
1704}
1705
1706static int
1707bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1708{
1709	struct ifbreq *req = arg;
1710	struct bridge_iflist *bif = NULL;
1711	struct ifnet *ifs;
1712
1713	ifs = ifunit(req->ifbr_ifsname);
1714	if (ifs == NULL)
1715		return (ENOENT);
1716
1717	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1718		if (ifs == bif->bif_ifp)
1719			return (EBUSY);
1720
1721	if (ifs->if_bridge != NULL)
1722		return (EBUSY);
1723
1724	switch (ifs->if_type) {
1725		case IFT_ETHER:
1726		case IFT_GIF:
1727		case IFT_L2VLAN:
1728			break;
1729		default:
1730			return (EINVAL);
1731	}
1732
1733	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1734	if (bif == NULL)
1735		return (ENOMEM);
1736
1737	bif->bif_ifp = ifs;
1738	bif->bif_flags = IFBIF_SPAN;
1739
1740	CK_LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1741
1742	return (0);
1743}
1744
1745static int
1746bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1747{
1748	struct ifbreq *req = arg;
1749	struct bridge_iflist *bif;
1750	struct ifnet *ifs;
1751
1752	ifs = ifunit(req->ifbr_ifsname);
1753	if (ifs == NULL)
1754		return (ENOENT);
1755
1756	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1757		if (ifs == bif->bif_ifp)
1758			break;
1759
1760	if (bif == NULL)
1761		return (ENOENT);
1762
1763	bridge_delete_span(sc, bif);
1764
1765	return (0);
1766}
1767
1768static int
1769bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
1770{
1771	struct ifbropreq *req = arg;
1772	struct bstp_state *bs = &sc->sc_stp;
1773	struct bstp_port *root_port;
1774
1775	req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
1776	req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
1777	req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
1778
1779	root_port = bs->bs_root_port;
1780	if (root_port == NULL)
1781		req->ifbop_root_port = 0;
1782	else
1783		req->ifbop_root_port = root_port->bp_ifp->if_index;
1784
1785	req->ifbop_holdcount = bs->bs_txholdcount;
1786	req->ifbop_priority = bs->bs_bridge_priority;
1787	req->ifbop_protocol = bs->bs_protover;
1788	req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
1789	req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
1790	req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
1791	req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
1792	req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
1793	req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
1794
1795	return (0);
1796}
1797
1798static int
1799bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
1800{
1801	struct ifbrparam *param = arg;
1802
1803	param->ifbrp_cexceeded = sc->sc_brtexceeded;
1804	return (0);
1805}
1806
1807static int
1808bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
1809{
1810	struct ifbpstpconf *bifstp = arg;
1811	struct bridge_iflist *bif;
1812	struct bstp_port *bp;
1813	struct ifbpstpreq bpreq;
1814	char *buf, *outbuf;
1815	int count, buflen, len, error = 0;
1816
1817	count = 0;
1818	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1819		if ((bif->bif_flags & IFBIF_STP) != 0)
1820			count++;
1821	}
1822
1823	buflen = sizeof(bpreq) * count;
1824	if (bifstp->ifbpstp_len == 0) {
1825		bifstp->ifbpstp_len = buflen;
1826		return (0);
1827	}
1828
1829	outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1830	if (outbuf == NULL)
1831		return (ENOMEM);
1832
1833	count = 0;
1834	buf = outbuf;
1835	len = min(bifstp->ifbpstp_len, buflen);
1836	bzero(&bpreq, sizeof(bpreq));
1837	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1838		if (len < sizeof(bpreq))
1839			break;
1840
1841		if ((bif->bif_flags & IFBIF_STP) == 0)
1842			continue;
1843
1844		bp = &bif->bif_stp;
1845		bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
1846		bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
1847		bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
1848		bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
1849		bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
1850		bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
1851
1852		memcpy(buf, &bpreq, sizeof(bpreq));
1853		count++;
1854		buf += sizeof(bpreq);
1855		len -= sizeof(bpreq);
1856	}
1857
1858	bifstp->ifbpstp_len = sizeof(bpreq) * count;
1859	error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
1860	free(outbuf, M_TEMP);
1861	return (error);
1862}
1863
1864static int
1865bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
1866{
1867	struct ifbrparam *param = arg;
1868
1869	return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
1870}
1871
1872static int
1873bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
1874{
1875	struct ifbrparam *param = arg;
1876
1877	return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
1878}
1879
1880/*
1881 * bridge_ifdetach:
1882 *
1883 *	Detach an interface from a bridge.  Called when a member
1884 *	interface is detaching.
1885 */
1886static void
1887bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1888{
1889	struct bridge_softc *sc = ifp->if_bridge;
1890	struct bridge_iflist *bif;
1891
1892	if (ifp->if_flags & IFF_RENAMING)
1893		return;
1894	if (V_bridge_cloner == NULL) {
1895		/*
1896		 * This detach handler can be called after
1897		 * vnet_bridge_uninit().  Just return in that case.
1898		 */
1899		return;
1900	}
1901	/* Check if the interface is a bridge member */
1902	if (sc != NULL) {
1903		BRIDGE_LOCK(sc);
1904
1905		bif = bridge_lookup_member_if(sc, ifp);
1906		if (bif != NULL)
1907			bridge_delete_member(sc, bif, 1);
1908
1909		BRIDGE_UNLOCK(sc);
1910		return;
1911	}
1912
1913	/* Check if the interface is a span port */
1914	BRIDGE_LIST_LOCK();
1915	LIST_FOREACH(sc, &V_bridge_list, sc_list) {
1916		BRIDGE_LOCK(sc);
1917		CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1918			if (ifp == bif->bif_ifp) {
1919				bridge_delete_span(sc, bif);
1920				break;
1921			}
1922
1923		BRIDGE_UNLOCK(sc);
1924	}
1925	BRIDGE_LIST_UNLOCK();
1926}
1927
1928/*
1929 * bridge_init:
1930 *
1931 *	Initialize a bridge interface.
1932 */
1933static void
1934bridge_init(void *xsc)
1935{
1936	struct bridge_softc *sc = (struct bridge_softc *)xsc;
1937	struct ifnet *ifp = sc->sc_ifp;
1938
1939	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1940		return;
1941
1942	BRIDGE_LOCK(sc);
1943	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1944	    bridge_timer, sc);
1945
1946	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1947	bstp_init(&sc->sc_stp);		/* Initialize Spanning Tree */
1948
1949	BRIDGE_UNLOCK(sc);
1950}
1951
1952/*
1953 * bridge_stop:
1954 *
1955 *	Stop the bridge interface.
1956 */
1957static void
1958bridge_stop(struct ifnet *ifp, int disable)
1959{
1960	struct bridge_softc *sc = ifp->if_softc;
1961
1962	BRIDGE_LOCK_ASSERT(sc);
1963
1964	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1965		return;
1966
1967	BRIDGE_RT_LOCK(sc);
1968	callout_stop(&sc->sc_brcallout);
1969
1970	bstp_stop(&sc->sc_stp);
1971
1972	bridge_rtflush(sc, IFBF_FLUSHDYN);
1973	BRIDGE_RT_UNLOCK(sc);
1974
1975	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1976}
1977
1978/*
1979 * bridge_enqueue:
1980 *
1981 *	Enqueue a packet on a bridge member interface.
1982 *
1983 */
1984static int
1985bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
1986{
1987	int len, err = 0;
1988	short mflags;
1989	struct mbuf *m0;
1990
1991	/* We may be sending a fragment so traverse the mbuf */
1992	for (; m; m = m0) {
1993		m0 = m->m_nextpkt;
1994		m->m_nextpkt = NULL;
1995		len = m->m_pkthdr.len;
1996		mflags = m->m_flags;
1997
1998		/*
1999		 * If underlying interface can not do VLAN tag insertion itself
2000		 * then attach a packet tag that holds it.
2001		 */
2002		if ((m->m_flags & M_VLANTAG) &&
2003		    (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
2004			m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2005			if (m == NULL) {
2006				if_printf(dst_ifp,
2007				    "unable to prepend VLAN header\n");
2008				if_inc_counter(dst_ifp, IFCOUNTER_OERRORS, 1);
2009				continue;
2010			}
2011			m->m_flags &= ~M_VLANTAG;
2012		}
2013
2014		M_ASSERTPKTHDR(m); /* We shouldn't transmit mbuf without pkthdr */
2015		if ((err = dst_ifp->if_transmit(dst_ifp, m))) {
2016			m_freem(m0);
2017			if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2018			break;
2019		}
2020
2021		if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
2022		if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, len);
2023		if (mflags & M_MCAST)
2024			if_inc_counter(sc->sc_ifp, IFCOUNTER_OMCASTS, 1);
2025	}
2026
2027	return (err);
2028}
2029
2030/*
2031 * bridge_dummynet:
2032 *
2033 * 	Receive a queued packet from dummynet and pass it on to the output
2034 * 	interface.
2035 *
2036 *	The mbuf has the Ethernet header already attached.
2037 */
2038static void
2039bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
2040{
2041	struct bridge_softc *sc;
2042
2043	sc = ifp->if_bridge;
2044
2045	/*
2046	 * The packet didnt originate from a member interface. This should only
2047	 * ever happen if a member interface is removed while packets are
2048	 * queued for it.
2049	 */
2050	if (sc == NULL) {
2051		m_freem(m);
2052		return;
2053	}
2054
2055	if (PFIL_HOOKED_OUT(V_inet_pfil_head)
2056#ifdef INET6
2057	    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2058#endif
2059	    ) {
2060		if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
2061			return;
2062		if (m == NULL)
2063			return;
2064	}
2065
2066	bridge_enqueue(sc, ifp, m);
2067}
2068
2069/*
2070 * bridge_output:
2071 *
2072 *	Send output from a bridge member interface.  This
2073 *	performs the bridging function for locally originated
2074 *	packets.
2075 *
2076 *	The mbuf has the Ethernet header already attached.  We must
2077 *	enqueue or free the mbuf before returning.
2078 */
2079static int
2080bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
2081    struct rtentry *rt)
2082{
2083	struct ether_header *eh;
2084	struct ifnet *bifp, *dst_if;
2085	struct bridge_softc *sc;
2086	uint16_t vlan;
2087
2088	NET_EPOCH_ASSERT();
2089
2090	if (m->m_len < ETHER_HDR_LEN) {
2091		m = m_pullup(m, ETHER_HDR_LEN);
2092		if (m == NULL)
2093			return (0);
2094	}
2095
2096	eh = mtod(m, struct ether_header *);
2097	sc = ifp->if_bridge;
2098	vlan = VLANTAGOF(m);
2099
2100	bifp = sc->sc_ifp;
2101
2102	/*
2103	 * If bridge is down, but the original output interface is up,
2104	 * go ahead and send out that interface.  Otherwise, the packet
2105	 * is dropped below.
2106	 */
2107	if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2108		dst_if = ifp;
2109		goto sendunicast;
2110	}
2111
2112	/*
2113	 * If the packet is a multicast, or we don't know a better way to
2114	 * get there, send to all interfaces.
2115	 */
2116	if (ETHER_IS_MULTICAST(eh->ether_dhost))
2117		dst_if = NULL;
2118	else
2119		dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
2120	/* Tap any traffic not passing back out the originating interface */
2121	if (dst_if != ifp)
2122		ETHER_BPF_MTAP(bifp, m);
2123	if (dst_if == NULL) {
2124		struct bridge_iflist *bif;
2125		struct mbuf *mc;
2126		int used = 0;
2127
2128		bridge_span(sc, m);
2129
2130		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2131			dst_if = bif->bif_ifp;
2132
2133			if (dst_if->if_type == IFT_GIF)
2134				continue;
2135			if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2136				continue;
2137
2138			/*
2139			 * If this is not the original output interface,
2140			 * and the interface is participating in spanning
2141			 * tree, make sure the port is in a state that
2142			 * allows forwarding.
2143			 */
2144			if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
2145			    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2146				continue;
2147
2148			if (CK_LIST_NEXT(bif, bif_next) == NULL) {
2149				used = 1;
2150				mc = m;
2151			} else {
2152				mc = m_copypacket(m, M_NOWAIT);
2153				if (mc == NULL) {
2154					if_inc_counter(bifp, IFCOUNTER_OERRORS, 1);
2155					continue;
2156				}
2157			}
2158
2159			bridge_enqueue(sc, dst_if, mc);
2160		}
2161		if (used == 0)
2162			m_freem(m);
2163		return (0);
2164	}
2165
2166sendunicast:
2167	/*
2168	 * XXX Spanning tree consideration here?
2169	 */
2170
2171	bridge_span(sc, m);
2172	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2173		m_freem(m);
2174		return (0);
2175	}
2176
2177	bridge_enqueue(sc, dst_if, m);
2178	return (0);
2179}
2180
2181/*
2182 * bridge_transmit:
2183 *
2184 *	Do output on a bridge.
2185 *
2186 */
2187static int
2188bridge_transmit(struct ifnet *ifp, struct mbuf *m)
2189{
2190	struct bridge_softc *sc;
2191	struct ether_header *eh;
2192	struct ifnet *dst_if;
2193	int error = 0;
2194
2195	sc = ifp->if_softc;
2196
2197	ETHER_BPF_MTAP(ifp, m);
2198
2199	eh = mtod(m, struct ether_header *);
2200
2201	if (((m->m_flags & (M_BCAST|M_MCAST)) == 0) &&
2202	    (dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1)) != NULL) {
2203		error = bridge_enqueue(sc, dst_if, m);
2204	} else
2205		bridge_broadcast(sc, ifp, m, 0);
2206
2207	return (error);
2208}
2209
2210/*
2211 * The ifp->if_qflush entry point for if_bridge(4) is no-op.
2212 */
2213static void
2214bridge_qflush(struct ifnet *ifp __unused)
2215{
2216}
2217
2218/*
2219 * bridge_forward:
2220 *
2221 *	The forwarding function of the bridge.
2222 *
2223 *	NOTE: Releases the lock on return.
2224 */
2225static void
2226bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
2227    struct mbuf *m)
2228{
2229	struct bridge_iflist *dbif;
2230	struct ifnet *src_if, *dst_if, *ifp;
2231	struct ether_header *eh;
2232	uint16_t vlan;
2233	uint8_t *dst;
2234	int error;
2235
2236	NET_EPOCH_ASSERT();
2237
2238	src_if = m->m_pkthdr.rcvif;
2239	ifp = sc->sc_ifp;
2240
2241	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2242	if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2243	vlan = VLANTAGOF(m);
2244
2245	if ((sbif->bif_flags & IFBIF_STP) &&
2246	    sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2247		goto drop;
2248
2249	eh = mtod(m, struct ether_header *);
2250	dst = eh->ether_dhost;
2251
2252	/* If the interface is learning, record the address. */
2253	if (sbif->bif_flags & IFBIF_LEARNING) {
2254		error = bridge_rtupdate(sc, eh->ether_shost, vlan,
2255		    sbif, 0, IFBAF_DYNAMIC);
2256		/*
2257		 * If the interface has addresses limits then deny any source
2258		 * that is not in the cache.
2259		 */
2260		if (error && sbif->bif_addrmax)
2261			goto drop;
2262	}
2263
2264	if ((sbif->bif_flags & IFBIF_STP) != 0 &&
2265	    sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
2266		goto drop;
2267
2268	/*
2269	 * At this point, the port either doesn't participate
2270	 * in spanning tree or it is in the forwarding state.
2271	 */
2272
2273	/*
2274	 * If the packet is unicast, destined for someone on
2275	 * "this" side of the bridge, drop it.
2276	 */
2277	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2278		dst_if = bridge_rtlookup(sc, dst, vlan);
2279		if (src_if == dst_if)
2280			goto drop;
2281	} else {
2282		/*
2283		 * Check if its a reserved multicast address, any address
2284		 * listed in 802.1D section 7.12.6 may not be forwarded by the
2285		 * bridge.
2286		 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
2287		 */
2288		if (dst[0] == 0x01 && dst[1] == 0x80 &&
2289		    dst[2] == 0xc2 && dst[3] == 0x00 &&
2290		    dst[4] == 0x00 && dst[5] <= 0x0f)
2291			goto drop;
2292
2293		/* ...forward it to all interfaces. */
2294		if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
2295		dst_if = NULL;
2296	}
2297
2298	/*
2299	 * If we have a destination interface which is a member of our bridge,
2300	 * OR this is a unicast packet, push it through the bpf(4) machinery.
2301	 * For broadcast or multicast packets, don't bother because it will
2302	 * be reinjected into ether_input. We do this before we pass the packets
2303	 * through the pfil(9) framework, as it is possible that pfil(9) will
2304	 * drop the packet, or possibly modify it, making it difficult to debug
2305	 * firewall issues on the bridge.
2306	 */
2307	if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
2308		ETHER_BPF_MTAP(ifp, m);
2309
2310	/* run the packet filter */
2311	if (PFIL_HOOKED_IN(V_inet_pfil_head)
2312#ifdef INET6
2313	    || PFIL_HOOKED_IN(V_inet6_pfil_head)
2314#endif
2315	    ) {
2316		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2317			return;
2318		if (m == NULL)
2319			return;
2320	}
2321
2322	if (dst_if == NULL) {
2323		bridge_broadcast(sc, src_if, m, 1);
2324		return;
2325	}
2326
2327	/*
2328	 * At this point, we're dealing with a unicast frame
2329	 * going to a different interface.
2330	 */
2331	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2332		goto drop;
2333
2334	dbif = bridge_lookup_member_if(sc, dst_if);
2335	if (dbif == NULL)
2336		/* Not a member of the bridge (anymore?) */
2337		goto drop;
2338
2339	/* Private segments can not talk to each other */
2340	if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
2341		goto drop;
2342
2343	if ((dbif->bif_flags & IFBIF_STP) &&
2344	    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2345		goto drop;
2346
2347	if (PFIL_HOOKED_OUT(V_inet_pfil_head)
2348#ifdef INET6
2349	    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2350#endif
2351	    ) {
2352		if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2353			return;
2354		if (m == NULL)
2355			return;
2356	}
2357
2358	bridge_enqueue(sc, dst_if, m);
2359	return;
2360
2361drop:
2362	m_freem(m);
2363}
2364
2365/*
2366 * bridge_input:
2367 *
2368 *	Receive input from a member interface.  Queue the packet for
2369 *	bridging if it is not for us.
2370 */
2371static struct mbuf *
2372bridge_input(struct ifnet *ifp, struct mbuf *m)
2373{
2374	struct bridge_softc *sc = ifp->if_bridge;
2375	struct bridge_iflist *bif, *bif2;
2376	struct ifnet *bifp;
2377	struct ether_header *eh;
2378	struct mbuf *mc, *mc2;
2379	uint16_t vlan;
2380	int error;
2381
2382	NET_EPOCH_ASSERT();
2383
2384	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2385		return (m);
2386
2387	bifp = sc->sc_ifp;
2388	vlan = VLANTAGOF(m);
2389
2390	/*
2391	 * Implement support for bridge monitoring. If this flag has been
2392	 * set on this interface, discard the packet once we push it through
2393	 * the bpf(4) machinery, but before we do, increment the byte and
2394	 * packet counters associated with this interface.
2395	 */
2396	if ((bifp->if_flags & IFF_MONITOR) != 0) {
2397		m->m_pkthdr.rcvif  = bifp;
2398		ETHER_BPF_MTAP(bifp, m);
2399		if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);
2400		if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2401		m_freem(m);
2402		return (NULL);
2403	}
2404	bif = bridge_lookup_member_if(sc, ifp);
2405	if (bif == NULL) {
2406		return (m);
2407	}
2408
2409	eh = mtod(m, struct ether_header *);
2410
2411	bridge_span(sc, m);
2412
2413	if (m->m_flags & (M_BCAST|M_MCAST)) {
2414		/* Tap off 802.1D packets; they do not get forwarded. */
2415		if (memcmp(eh->ether_dhost, bstp_etheraddr,
2416		    ETHER_ADDR_LEN) == 0) {
2417			bstp_input(&bif->bif_stp, ifp, m); /* consumes mbuf */
2418			return (NULL);
2419		}
2420
2421		if ((bif->bif_flags & IFBIF_STP) &&
2422		    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2423			return (m);
2424		}
2425
2426		/*
2427		 * Make a deep copy of the packet and enqueue the copy
2428		 * for bridge processing; return the original packet for
2429		 * local processing.
2430		 */
2431		mc = m_dup(m, M_NOWAIT);
2432		if (mc == NULL) {
2433			return (m);
2434		}
2435
2436		/* Perform the bridge forwarding function with the copy. */
2437		bridge_forward(sc, bif, mc);
2438
2439		/*
2440		 * Reinject the mbuf as arriving on the bridge so we have a
2441		 * chance at claiming multicast packets. We can not loop back
2442		 * here from ether_input as a bridge is never a member of a
2443		 * bridge.
2444		 */
2445		KASSERT(bifp->if_bridge == NULL,
2446		    ("loop created in bridge_input"));
2447		mc2 = m_dup(m, M_NOWAIT);
2448		if (mc2 != NULL) {
2449			/* Keep the layer3 header aligned */
2450			int i = min(mc2->m_pkthdr.len, max_protohdr);
2451			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2452		}
2453		if (mc2 != NULL) {
2454			mc2->m_pkthdr.rcvif = bifp;
2455			(*bifp->if_input)(bifp, mc2);
2456		}
2457
2458		/* Return the original packet for local processing. */
2459		return (m);
2460	}
2461
2462	if ((bif->bif_flags & IFBIF_STP) &&
2463	    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2464		return (m);
2465	}
2466
2467#if (defined(INET) || defined(INET6))
2468#   define OR_CARP_CHECK_WE_ARE_DST(iface) \
2469	|| ((iface)->if_carp \
2470	    && (*carp_forus_p)((iface), eh->ether_dhost))
2471#   define OR_CARP_CHECK_WE_ARE_SRC(iface) \
2472	|| ((iface)->if_carp \
2473	    && (*carp_forus_p)((iface), eh->ether_shost))
2474#else
2475#   define OR_CARP_CHECK_WE_ARE_DST(iface)
2476#   define OR_CARP_CHECK_WE_ARE_SRC(iface)
2477#endif
2478
2479#ifdef INET6
2480#   define OR_PFIL_HOOKED_INET6 \
2481	|| PFIL_HOOKED_IN(V_inet6_pfil_head)
2482#else
2483#   define OR_PFIL_HOOKED_INET6
2484#endif
2485
2486#define GRAB_OUR_PACKETS(iface) \
2487	if ((iface)->if_type == IFT_GIF) \
2488		continue; \
2489	/* It is destined for us. */ \
2490	if (memcmp(IF_LLADDR((iface)), eh->ether_dhost,  ETHER_ADDR_LEN) == 0 \
2491	    OR_CARP_CHECK_WE_ARE_DST((iface))				\
2492	    ) {								\
2493		if (bif->bif_flags & IFBIF_LEARNING) {			\
2494			error = bridge_rtupdate(sc, eh->ether_shost,	\
2495			    vlan, bif, 0, IFBAF_DYNAMIC);		\
2496			if (error && bif->bif_addrmax) {		\
2497				m_freem(m);				\
2498				return (NULL);				\
2499			}						\
2500		}							\
2501		m->m_pkthdr.rcvif = iface;				\
2502		if ((iface) == ifp) {					\
2503			/* Skip bridge processing... src == dest */	\
2504			return (m);					\
2505		}							\
2506		/* It's passing over or to the bridge, locally. */	\
2507		ETHER_BPF_MTAP(bifp, m);				\
2508		if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);		\
2509		if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); \
2510		/* Filter on the physical interface. */			\
2511		if (V_pfil_local_phys && (PFIL_HOOKED_IN(V_inet_pfil_head) \
2512		     OR_PFIL_HOOKED_INET6)) {				\
2513			if (bridge_pfil(&m, NULL, ifp,			\
2514			    PFIL_IN) != 0 || m == NULL) {		\
2515				return (NULL);				\
2516			}						\
2517		}							\
2518		if ((iface) != bifp)					\
2519			ETHER_BPF_MTAP(iface, m);			\
2520		return (m);						\
2521	}								\
2522									\
2523	/* We just received a packet that we sent out. */		\
2524	if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \
2525	    OR_CARP_CHECK_WE_ARE_SRC((iface))			\
2526	    ) {								\
2527		m_freem(m);						\
2528		return (NULL);						\
2529	}
2530
2531	/*
2532	 * Unicast.  Make sure it's not for the bridge.
2533	 */
2534	do { GRAB_OUR_PACKETS(bifp) } while (0);
2535
2536	/*
2537	 * Give a chance for ifp at first priority. This will help when	the
2538	 * packet comes through the interface like VLAN's with the same MACs
2539	 * on several interfaces from the same bridge. This also will save
2540	 * some CPU cycles in case the destination interface and the input
2541	 * interface (eq ifp) are the same.
2542	 */
2543	do { GRAB_OUR_PACKETS(ifp) } while (0);
2544
2545	/* Now check the all bridge members. */
2546	CK_LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
2547		GRAB_OUR_PACKETS(bif2->bif_ifp)
2548	}
2549
2550#undef OR_CARP_CHECK_WE_ARE_DST
2551#undef OR_CARP_CHECK_WE_ARE_SRC
2552#undef OR_PFIL_HOOKED_INET6
2553#undef GRAB_OUR_PACKETS
2554
2555	/* Perform the bridge forwarding function. */
2556	bridge_forward(sc, bif, m);
2557
2558	return (NULL);
2559}
2560
2561/*
2562 * bridge_broadcast:
2563 *
2564 *	Send a frame to all interfaces that are members of
2565 *	the bridge, except for the one on which the packet
2566 *	arrived.
2567 *
2568 *	NOTE: Releases the lock on return.
2569 */
2570static void
2571bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2572    struct mbuf *m, int runfilt)
2573{
2574	struct bridge_iflist *dbif, *sbif;
2575	struct mbuf *mc;
2576	struct ifnet *dst_if;
2577	int used = 0, i;
2578
2579	NET_EPOCH_ASSERT();
2580
2581	sbif = bridge_lookup_member_if(sc, src_if);
2582
2583	/* Filter on the bridge interface before broadcasting */
2584	if (runfilt && (PFIL_HOOKED_OUT(V_inet_pfil_head)
2585#ifdef INET6
2586	    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2587#endif
2588	    )) {
2589		if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
2590			return;
2591		if (m == NULL)
2592			return;
2593	}
2594
2595	CK_LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
2596		dst_if = dbif->bif_ifp;
2597		if (dst_if == src_if)
2598			continue;
2599
2600		/* Private segments can not talk to each other */
2601		if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
2602			continue;
2603
2604		if ((dbif->bif_flags & IFBIF_STP) &&
2605		    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2606			continue;
2607
2608		if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
2609		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2610			continue;
2611
2612		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2613			continue;
2614
2615		if (CK_LIST_NEXT(dbif, bif_next) == NULL) {
2616			mc = m;
2617			used = 1;
2618		} else {
2619			mc = m_dup(m, M_NOWAIT);
2620			if (mc == NULL) {
2621				if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2622				continue;
2623			}
2624		}
2625
2626		/*
2627		 * Filter on the output interface. Pass a NULL bridge interface
2628		 * pointer so we do not redundantly filter on the bridge for
2629		 * each interface we broadcast on.
2630		 */
2631		if (runfilt && (PFIL_HOOKED_OUT(V_inet_pfil_head)
2632#ifdef INET6
2633		    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2634#endif
2635		    )) {
2636			if (used == 0) {
2637				/* Keep the layer3 header aligned */
2638				i = min(mc->m_pkthdr.len, max_protohdr);
2639				mc = m_copyup(mc, i, ETHER_ALIGN);
2640				if (mc == NULL) {
2641					if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2642					continue;
2643				}
2644			}
2645			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2646				continue;
2647			if (mc == NULL)
2648				continue;
2649		}
2650
2651		bridge_enqueue(sc, dst_if, mc);
2652	}
2653	if (used == 0)
2654		m_freem(m);
2655}
2656
2657/*
2658 * bridge_span:
2659 *
2660 *	Duplicate a packet out one or more interfaces that are in span mode,
2661 *	the original mbuf is unmodified.
2662 */
2663static void
2664bridge_span(struct bridge_softc *sc, struct mbuf *m)
2665{
2666	struct bridge_iflist *bif;
2667	struct ifnet *dst_if;
2668	struct mbuf *mc;
2669
2670	NET_EPOCH_ASSERT();
2671
2672	if (CK_LIST_EMPTY(&sc->sc_spanlist))
2673		return;
2674
2675	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2676		dst_if = bif->bif_ifp;
2677
2678		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2679			continue;
2680
2681		mc = m_copypacket(m, M_NOWAIT);
2682		if (mc == NULL) {
2683			if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2684			continue;
2685		}
2686
2687		bridge_enqueue(sc, dst_if, mc);
2688	}
2689}
2690
2691/*
2692 * bridge_rtupdate:
2693 *
2694 *	Add a bridge routing entry.
2695 */
2696static int
2697bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
2698    struct bridge_iflist *bif, int setflags, uint8_t flags)
2699{
2700	struct bridge_rtnode *brt;
2701	int error;
2702
2703	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
2704
2705	/* Check the source address is valid and not multicast. */
2706	if (ETHER_IS_MULTICAST(dst) ||
2707	    (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
2708	     dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
2709		return (EINVAL);
2710
2711	/* 802.1p frames map to vlan 1 */
2712	if (vlan == 0)
2713		vlan = 1;
2714
2715	/*
2716	 * A route for this destination might already exist.  If so,
2717	 * update it, otherwise create a new one.
2718	 */
2719	if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
2720		BRIDGE_RT_LOCK(sc);
2721
2722		/* Check again, now that we have the lock. There could have
2723		 * been a race and we only want to insert this once. */
2724		if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) != NULL) {
2725			BRIDGE_RT_UNLOCK(sc);
2726			return (0);
2727		}
2728
2729		if (sc->sc_brtcnt >= sc->sc_brtmax) {
2730			sc->sc_brtexceeded++;
2731			BRIDGE_RT_UNLOCK(sc);
2732			return (ENOSPC);
2733		}
2734		/* Check per interface address limits (if enabled) */
2735		if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
2736			bif->bif_addrexceeded++;
2737			BRIDGE_RT_UNLOCK(sc);
2738			return (ENOSPC);
2739		}
2740
2741		/*
2742		 * Allocate a new bridge forwarding node, and
2743		 * initialize the expiration time and Ethernet
2744		 * address.
2745		 */
2746		brt = uma_zalloc(V_bridge_rtnode_zone, M_NOWAIT | M_ZERO);
2747		if (brt == NULL) {
2748			BRIDGE_RT_UNLOCK(sc);
2749			return (ENOMEM);
2750		}
2751		brt->brt_vnet = curvnet;
2752
2753		if (bif->bif_flags & IFBIF_STICKY)
2754			brt->brt_flags = IFBAF_STICKY;
2755		else
2756			brt->brt_flags = IFBAF_DYNAMIC;
2757
2758		memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2759		brt->brt_vlan = vlan;
2760
2761		if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
2762			uma_zfree(V_bridge_rtnode_zone, brt);
2763			BRIDGE_RT_UNLOCK(sc);
2764			return (error);
2765		}
2766		brt->brt_dst = bif;
2767		bif->bif_addrcnt++;
2768
2769		BRIDGE_RT_UNLOCK(sc);
2770	}
2771
2772	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2773	    brt->brt_dst != bif) {
2774		BRIDGE_RT_LOCK(sc);
2775		brt->brt_dst->bif_addrcnt--;
2776		brt->brt_dst = bif;
2777		brt->brt_dst->bif_addrcnt++;
2778		BRIDGE_RT_UNLOCK(sc);
2779	}
2780
2781	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2782		brt->brt_expire = time_uptime + sc->sc_brttimeout;
2783	if (setflags)
2784		brt->brt_flags = flags;
2785
2786	return (0);
2787}
2788
2789/*
2790 * bridge_rtlookup:
2791 *
2792 *	Lookup the destination interface for an address.
2793 */
2794static struct ifnet *
2795bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2796{
2797	struct bridge_rtnode *brt;
2798
2799	NET_EPOCH_ASSERT();
2800
2801	if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
2802		return (NULL);
2803
2804	return (brt->brt_ifp);
2805}
2806
2807/*
2808 * bridge_rttrim:
2809 *
2810 *	Trim the routine table so that we have a number
2811 *	of routing entries less than or equal to the
2812 *	maximum number.
2813 */
2814static void
2815bridge_rttrim(struct bridge_softc *sc)
2816{
2817	struct bridge_rtnode *brt, *nbrt;
2818
2819	NET_EPOCH_ASSERT();
2820	BRIDGE_RT_LOCK_ASSERT(sc);
2821
2822	/* Make sure we actually need to do this. */
2823	if (sc->sc_brtcnt <= sc->sc_brtmax)
2824		return;
2825
2826	/* Force an aging cycle; this might trim enough addresses. */
2827	bridge_rtage(sc);
2828	if (sc->sc_brtcnt <= sc->sc_brtmax)
2829		return;
2830
2831	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2832		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2833			bridge_rtnode_destroy(sc, brt);
2834			if (sc->sc_brtcnt <= sc->sc_brtmax)
2835				return;
2836		}
2837	}
2838}
2839
2840/*
2841 * bridge_timer:
2842 *
2843 *	Aging timer for the bridge.
2844 */
2845static void
2846bridge_timer(void *arg)
2847{
2848	struct bridge_softc *sc = arg;
2849
2850	BRIDGE_RT_LOCK_ASSERT(sc);
2851
2852	/* Destruction of rtnodes requires a proper vnet context */
2853	CURVNET_SET(sc->sc_ifp->if_vnet);
2854	bridge_rtage(sc);
2855
2856	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
2857		callout_reset(&sc->sc_brcallout,
2858		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2859	CURVNET_RESTORE();
2860}
2861
2862/*
2863 * bridge_rtage:
2864 *
2865 *	Perform an aging cycle.
2866 */
2867static void
2868bridge_rtage(struct bridge_softc *sc)
2869{
2870	struct bridge_rtnode *brt, *nbrt;
2871
2872	BRIDGE_RT_LOCK_ASSERT(sc);
2873
2874	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2875		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2876			if (time_uptime >= brt->brt_expire)
2877				bridge_rtnode_destroy(sc, brt);
2878		}
2879	}
2880}
2881
2882/*
2883 * bridge_rtflush:
2884 *
2885 *	Remove all dynamic addresses from the bridge.
2886 */
2887static void
2888bridge_rtflush(struct bridge_softc *sc, int full)
2889{
2890	struct bridge_rtnode *brt, *nbrt;
2891
2892	BRIDGE_RT_LOCK_ASSERT(sc);
2893
2894	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2895		if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2896			bridge_rtnode_destroy(sc, brt);
2897	}
2898}
2899
2900/*
2901 * bridge_rtdaddr:
2902 *
2903 *	Remove an address from the table.
2904 */
2905static int
2906bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2907{
2908	struct bridge_rtnode *brt;
2909	int found = 0;
2910
2911	BRIDGE_RT_LOCK(sc);
2912
2913	/*
2914	 * If vlan is zero then we want to delete for all vlans so the lookup
2915	 * may return more than one.
2916	 */
2917	while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
2918		bridge_rtnode_destroy(sc, brt);
2919		found = 1;
2920	}
2921
2922	BRIDGE_RT_UNLOCK(sc);
2923
2924	return (found ? 0 : ENOENT);
2925}
2926
2927/*
2928 * bridge_rtdelete:
2929 *
2930 *	Delete routes to a speicifc member interface.
2931 */
2932static void
2933bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
2934{
2935	struct bridge_rtnode *brt, *nbrt;
2936
2937	BRIDGE_RT_LOCK_ASSERT(sc);
2938
2939	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2940		if (brt->brt_ifp == ifp && (full ||
2941			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
2942			bridge_rtnode_destroy(sc, brt);
2943	}
2944}
2945
2946/*
2947 * bridge_rtable_init:
2948 *
2949 *	Initialize the route table for this bridge.
2950 */
2951static void
2952bridge_rtable_init(struct bridge_softc *sc)
2953{
2954	int i;
2955
2956	sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2957	    M_DEVBUF, M_WAITOK);
2958
2959	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2960		CK_LIST_INIT(&sc->sc_rthash[i]);
2961
2962	sc->sc_rthash_key = arc4random();
2963	CK_LIST_INIT(&sc->sc_rtlist);
2964}
2965
2966/*
2967 * bridge_rtable_fini:
2968 *
2969 *	Deconstruct the route table for this bridge.
2970 */
2971static void
2972bridge_rtable_fini(struct bridge_softc *sc)
2973{
2974
2975	KASSERT(sc->sc_brtcnt == 0,
2976	    ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
2977	free(sc->sc_rthash, M_DEVBUF);
2978}
2979
2980/*
2981 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2982 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2983 */
2984#define	mix(a, b, c)							\
2985do {									\
2986	a -= b; a -= c; a ^= (c >> 13);					\
2987	b -= c; b -= a; b ^= (a << 8);					\
2988	c -= a; c -= b; c ^= (b >> 13);					\
2989	a -= b; a -= c; a ^= (c >> 12);					\
2990	b -= c; b -= a; b ^= (a << 16);					\
2991	c -= a; c -= b; c ^= (b >> 5);					\
2992	a -= b; a -= c; a ^= (c >> 3);					\
2993	b -= c; b -= a; b ^= (a << 10);					\
2994	c -= a; c -= b; c ^= (b >> 15);					\
2995} while (/*CONSTCOND*/0)
2996
2997static __inline uint32_t
2998bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2999{
3000	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3001
3002	b += addr[5] << 8;
3003	b += addr[4];
3004	a += addr[3] << 24;
3005	a += addr[2] << 16;
3006	a += addr[1] << 8;
3007	a += addr[0];
3008
3009	mix(a, b, c);
3010
3011	return (c & BRIDGE_RTHASH_MASK);
3012}
3013
3014#undef mix
3015
3016static int
3017bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3018{
3019	int i, d;
3020
3021	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3022		d = ((int)a[i]) - ((int)b[i]);
3023	}
3024
3025	return (d);
3026}
3027
3028/*
3029 * bridge_rtnode_lookup:
3030 *
3031 *	Look up a bridge route node for the specified destination. Compare the
3032 *	vlan id or if zero then just return the first match.
3033 */
3034static struct bridge_rtnode *
3035bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
3036{
3037	struct bridge_rtnode *brt;
3038	uint32_t hash;
3039	int dir;
3040
3041	BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(sc);
3042
3043	hash = bridge_rthash(sc, addr);
3044	CK_LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
3045		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3046		if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0))
3047			return (brt);
3048		if (dir > 0)
3049			return (NULL);
3050	}
3051
3052	return (NULL);
3053}
3054
3055/*
3056 * bridge_rtnode_insert:
3057 *
3058 *	Insert the specified bridge node into the route table.  We
3059 *	assume the entry is not already in the table.
3060 */
3061static int
3062bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3063{
3064	struct bridge_rtnode *lbrt;
3065	uint32_t hash;
3066	int dir;
3067
3068	BRIDGE_RT_LOCK_ASSERT(sc);
3069
3070	hash = bridge_rthash(sc, brt->brt_addr);
3071
3072	lbrt = CK_LIST_FIRST(&sc->sc_rthash[hash]);
3073	if (lbrt == NULL) {
3074		CK_LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
3075		goto out;
3076	}
3077
3078	do {
3079		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3080		if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
3081			return (EEXIST);
3082		if (dir > 0) {
3083			CK_LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3084			goto out;
3085		}
3086		if (CK_LIST_NEXT(lbrt, brt_hash) == NULL) {
3087			CK_LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3088			goto out;
3089		}
3090		lbrt = CK_LIST_NEXT(lbrt, brt_hash);
3091	} while (lbrt != NULL);
3092
3093#ifdef DIAGNOSTIC
3094	panic("bridge_rtnode_insert: impossible");
3095#endif
3096
3097out:
3098	CK_LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
3099	sc->sc_brtcnt++;
3100
3101	return (0);
3102}
3103
3104static void
3105bridge_rtnode_destroy_cb(struct epoch_context *ctx)
3106{
3107	struct bridge_rtnode *brt;
3108
3109	brt = __containerof(ctx, struct bridge_rtnode, brt_epoch_ctx);
3110
3111	CURVNET_SET(brt->brt_vnet);
3112	uma_zfree(V_bridge_rtnode_zone, brt);
3113	CURVNET_RESTORE();
3114}
3115
3116/*
3117 * bridge_rtnode_destroy:
3118 *
3119 *	Destroy a bridge rtnode.
3120 */
3121static void
3122bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3123{
3124	BRIDGE_RT_LOCK_ASSERT(sc);
3125
3126	CK_LIST_REMOVE(brt, brt_hash);
3127
3128	CK_LIST_REMOVE(brt, brt_list);
3129	sc->sc_brtcnt--;
3130	brt->brt_dst->bif_addrcnt--;
3131
3132	NET_EPOCH_CALL(bridge_rtnode_destroy_cb, &brt->brt_epoch_ctx);
3133}
3134
3135/*
3136 * bridge_rtable_expire:
3137 *
3138 *	Set the expiry time for all routes on an interface.
3139 */
3140static void
3141bridge_rtable_expire(struct ifnet *ifp, int age)
3142{
3143	struct bridge_softc *sc = ifp->if_bridge;
3144	struct bridge_rtnode *brt;
3145
3146	CURVNET_SET(ifp->if_vnet);
3147	BRIDGE_RT_LOCK(sc);
3148
3149	/*
3150	 * If the age is zero then flush, otherwise set all the expiry times to
3151	 * age for the interface
3152	 */
3153	if (age == 0)
3154		bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
3155	else {
3156		CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
3157			/* Cap the expiry time to 'age' */
3158			if (brt->brt_ifp == ifp &&
3159			    brt->brt_expire > time_uptime + age &&
3160			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3161				brt->brt_expire = time_uptime + age;
3162		}
3163	}
3164	BRIDGE_RT_UNLOCK(sc);
3165	CURVNET_RESTORE();
3166}
3167
3168/*
3169 * bridge_state_change:
3170 *
3171 *	Callback from the bridgestp code when a port changes states.
3172 */
3173static void
3174bridge_state_change(struct ifnet *ifp, int state)
3175{
3176	struct bridge_softc *sc = ifp->if_bridge;
3177	static const char *stpstates[] = {
3178		"disabled",
3179		"listening",
3180		"learning",
3181		"forwarding",
3182		"blocking",
3183		"discarding"
3184	};
3185
3186	CURVNET_SET(ifp->if_vnet);
3187	if (V_log_stp)
3188		log(LOG_NOTICE, "%s: state changed to %s on %s\n",
3189		    sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
3190	CURVNET_RESTORE();
3191}
3192
3193/*
3194 * Send bridge packets through pfil if they are one of the types pfil can deal
3195 * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
3196 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3197 * that interface.
3198 */
3199static int
3200bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3201{
3202	int snap, error, i, hlen;
3203	struct ether_header *eh1, eh2;
3204	struct ip *ip;
3205	struct llc llc1;
3206	u_int16_t ether_type;
3207	pfil_return_t rv;
3208
3209	snap = 0;
3210	error = -1;	/* Default error if not error == 0 */
3211
3212#if 0
3213	/* we may return with the IP fields swapped, ensure its not shared */
3214	KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
3215#endif
3216
3217	if (V_pfil_bridge == 0 && V_pfil_member == 0 && V_pfil_ipfw == 0)
3218		return (0); /* filtering is disabled */
3219
3220	i = min((*mp)->m_pkthdr.len, max_protohdr);
3221	if ((*mp)->m_len < i) {
3222	    *mp = m_pullup(*mp, i);
3223	    if (*mp == NULL) {
3224		printf("%s: m_pullup failed\n", __func__);
3225		return (-1);
3226	    }
3227	}
3228
3229	eh1 = mtod(*mp, struct ether_header *);
3230	ether_type = ntohs(eh1->ether_type);
3231
3232	/*
3233	 * Check for SNAP/LLC.
3234	 */
3235	if (ether_type < ETHERMTU) {
3236		struct llc *llc2 = (struct llc *)(eh1 + 1);
3237
3238		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3239		    llc2->llc_dsap == LLC_SNAP_LSAP &&
3240		    llc2->llc_ssap == LLC_SNAP_LSAP &&
3241		    llc2->llc_control == LLC_UI) {
3242			ether_type = htons(llc2->llc_un.type_snap.ether_type);
3243			snap = 1;
3244		}
3245	}
3246
3247	/*
3248	 * If we're trying to filter bridge traffic, don't look at anything
3249	 * other than IP and ARP traffic.  If the filter doesn't understand
3250	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
3251	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
3252	 * but of course we don't have an AppleTalk filter to begin with.
3253	 * (Note that since pfil doesn't understand ARP it will pass *ALL*
3254	 * ARP traffic.)
3255	 */
3256	switch (ether_type) {
3257		case ETHERTYPE_ARP:
3258		case ETHERTYPE_REVARP:
3259			if (V_pfil_ipfw_arp == 0)
3260				return (0); /* Automatically pass */
3261			break;
3262
3263		case ETHERTYPE_IP:
3264#ifdef INET6
3265		case ETHERTYPE_IPV6:
3266#endif /* INET6 */
3267			break;
3268		default:
3269			/*
3270			 * Check to see if the user wants to pass non-ip
3271			 * packets, these will not be checked by pfil(9) and
3272			 * passed unconditionally so the default is to drop.
3273			 */
3274			if (V_pfil_onlyip)
3275				goto bad;
3276	}
3277
3278	/* Run the packet through pfil before stripping link headers */
3279	if (PFIL_HOOKED_OUT(V_link_pfil_head) && V_pfil_ipfw != 0 &&
3280	    dir == PFIL_OUT && ifp != NULL) {
3281		switch (pfil_run_hooks(V_link_pfil_head, mp, ifp, dir, NULL)) {
3282		case PFIL_DROPPED:
3283			return (EACCES);
3284		case PFIL_CONSUMED:
3285			return (0);
3286		}
3287	}
3288
3289	/* Strip off the Ethernet header and keep a copy. */
3290	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3291	m_adj(*mp, ETHER_HDR_LEN);
3292
3293	/* Strip off snap header, if present */
3294	if (snap) {
3295		m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3296		m_adj(*mp, sizeof(struct llc));
3297	}
3298
3299	/*
3300	 * Check the IP header for alignment and errors
3301	 */
3302	if (dir == PFIL_IN) {
3303		switch (ether_type) {
3304			case ETHERTYPE_IP:
3305				error = bridge_ip_checkbasic(mp);
3306				break;
3307#ifdef INET6
3308			case ETHERTYPE_IPV6:
3309				error = bridge_ip6_checkbasic(mp);
3310				break;
3311#endif /* INET6 */
3312			default:
3313				error = 0;
3314		}
3315		if (error)
3316			goto bad;
3317	}
3318
3319	error = 0;
3320
3321	/*
3322	 * Run the packet through pfil
3323	 */
3324	rv = PFIL_PASS;
3325	switch (ether_type) {
3326	case ETHERTYPE_IP:
3327		/*
3328		 * Run pfil on the member interface and the bridge, both can
3329		 * be skipped by clearing pfil_member or pfil_bridge.
3330		 *
3331		 * Keep the order:
3332		 *   in_if -> bridge_if -> out_if
3333		 */
3334		if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
3335		    pfil_run_hooks(V_inet_pfil_head, mp, bifp, dir, NULL)) !=
3336		    PFIL_PASS)
3337			break;
3338
3339		if (V_pfil_member && ifp != NULL && (rv =
3340		    pfil_run_hooks(V_inet_pfil_head, mp, ifp, dir, NULL)) !=
3341		    PFIL_PASS)
3342			break;
3343
3344		if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
3345		    pfil_run_hooks(V_inet_pfil_head, mp, bifp, dir, NULL)) !=
3346		    PFIL_PASS)
3347			break;
3348
3349		/* check if we need to fragment the packet */
3350		/* bridge_fragment generates a mbuf chain of packets */
3351		/* that already include eth headers */
3352		if (V_pfil_member && ifp != NULL && dir == PFIL_OUT) {
3353			i = (*mp)->m_pkthdr.len;
3354			if (i > ifp->if_mtu) {
3355				error = bridge_fragment(ifp, mp, &eh2, snap,
3356					    &llc1);
3357				return (error);
3358			}
3359		}
3360
3361		/* Recalculate the ip checksum. */
3362		ip = mtod(*mp, struct ip *);
3363		hlen = ip->ip_hl << 2;
3364		if (hlen < sizeof(struct ip))
3365			goto bad;
3366		if (hlen > (*mp)->m_len) {
3367			if ((*mp = m_pullup(*mp, hlen)) == NULL)
3368				goto bad;
3369			ip = mtod(*mp, struct ip *);
3370			if (ip == NULL)
3371				goto bad;
3372		}
3373		ip->ip_sum = 0;
3374		if (hlen == sizeof(struct ip))
3375			ip->ip_sum = in_cksum_hdr(ip);
3376		else
3377			ip->ip_sum = in_cksum(*mp, hlen);
3378
3379		break;
3380#ifdef INET6
3381	case ETHERTYPE_IPV6:
3382		if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
3383		    pfil_run_hooks(V_inet6_pfil_head, mp, bifp, dir, NULL)) !=
3384		    PFIL_PASS)
3385			break;
3386
3387		if (V_pfil_member && ifp != NULL && (rv =
3388		    pfil_run_hooks(V_inet6_pfil_head, mp, ifp, dir, NULL)) !=
3389		    PFIL_PASS)
3390			break;
3391
3392		if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
3393		    pfil_run_hooks(V_inet6_pfil_head, mp, bifp, dir, NULL)) !=
3394		    PFIL_PASS)
3395			break;
3396		break;
3397#endif
3398	}
3399
3400	switch (rv) {
3401	case PFIL_CONSUMED:
3402		return (0);
3403	case PFIL_DROPPED:
3404		return (EACCES);
3405	default:
3406		break;
3407	}
3408
3409	error = -1;
3410
3411	/*
3412	 * Finally, put everything back the way it was and return
3413	 */
3414	if (snap) {
3415		M_PREPEND(*mp, sizeof(struct llc), M_NOWAIT);
3416		if (*mp == NULL)
3417			return (error);
3418		bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3419	}
3420
3421	M_PREPEND(*mp, ETHER_HDR_LEN, M_NOWAIT);
3422	if (*mp == NULL)
3423		return (error);
3424	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3425
3426	return (0);
3427
3428bad:
3429	m_freem(*mp);
3430	*mp = NULL;
3431	return (error);
3432}
3433
3434/*
3435 * Perform basic checks on header size since
3436 * pfil assumes ip_input has already processed
3437 * it for it.  Cut-and-pasted from ip_input.c.
3438 * Given how simple the IPv6 version is,
3439 * does the IPv4 version really need to be
3440 * this complicated?
3441 *
3442 * XXX Should we update ipstat here, or not?
3443 * XXX Right now we update ipstat but not
3444 * XXX csum_counter.
3445 */
3446static int
3447bridge_ip_checkbasic(struct mbuf **mp)
3448{
3449	struct mbuf *m = *mp;
3450	struct ip *ip;
3451	int len, hlen;
3452	u_short sum;
3453
3454	if (*mp == NULL)
3455		return (-1);
3456
3457	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3458		if ((m = m_copyup(m, sizeof(struct ip),
3459			(max_linkhdr + 3) & ~3)) == NULL) {
3460			/* XXXJRT new stat, please */
3461			KMOD_IPSTAT_INC(ips_toosmall);
3462			goto bad;
3463		}
3464	} else if (__predict_false(m->m_len < sizeof (struct ip))) {
3465		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3466			KMOD_IPSTAT_INC(ips_toosmall);
3467			goto bad;
3468		}
3469	}
3470	ip = mtod(m, struct ip *);
3471	if (ip == NULL) goto bad;
3472
3473	if (ip->ip_v != IPVERSION) {
3474		KMOD_IPSTAT_INC(ips_badvers);
3475		goto bad;
3476	}
3477	hlen = ip->ip_hl << 2;
3478	if (hlen < sizeof(struct ip)) { /* minimum header length */
3479		KMOD_IPSTAT_INC(ips_badhlen);
3480		goto bad;
3481	}
3482	if (hlen > m->m_len) {
3483		if ((m = m_pullup(m, hlen)) == NULL) {
3484			KMOD_IPSTAT_INC(ips_badhlen);
3485			goto bad;
3486		}
3487		ip = mtod(m, struct ip *);
3488		if (ip == NULL) goto bad;
3489	}
3490
3491	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3492		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3493	} else {
3494		if (hlen == sizeof(struct ip)) {
3495			sum = in_cksum_hdr(ip);
3496		} else {
3497			sum = in_cksum(m, hlen);
3498		}
3499	}
3500	if (sum) {
3501		KMOD_IPSTAT_INC(ips_badsum);
3502		goto bad;
3503	}
3504
3505	/* Retrieve the packet length. */
3506	len = ntohs(ip->ip_len);
3507
3508	/*
3509	 * Check for additional length bogosity
3510	 */
3511	if (len < hlen) {
3512		KMOD_IPSTAT_INC(ips_badlen);
3513		goto bad;
3514	}
3515
3516	/*
3517	 * Check that the amount of data in the buffers
3518	 * is as at least much as the IP header would have us expect.
3519	 * Drop packet if shorter than we expect.
3520	 */
3521	if (m->m_pkthdr.len < len) {
3522		KMOD_IPSTAT_INC(ips_tooshort);
3523		goto bad;
3524	}
3525
3526	/* Checks out, proceed */
3527	*mp = m;
3528	return (0);
3529
3530bad:
3531	*mp = m;
3532	return (-1);
3533}
3534
3535#ifdef INET6
3536/*
3537 * Same as above, but for IPv6.
3538 * Cut-and-pasted from ip6_input.c.
3539 * XXX Should we update ip6stat, or not?
3540 */
3541static int
3542bridge_ip6_checkbasic(struct mbuf **mp)
3543{
3544	struct mbuf *m = *mp;
3545	struct ip6_hdr *ip6;
3546
3547	/*
3548	 * If the IPv6 header is not aligned, slurp it up into a new
3549	 * mbuf with space for link headers, in the event we forward
3550	 * it.  Otherwise, if it is aligned, make sure the entire base
3551	 * IPv6 header is in the first mbuf of the chain.
3552	 */
3553	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3554		struct ifnet *inifp = m->m_pkthdr.rcvif;
3555		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3556			    (max_linkhdr + 3) & ~3)) == NULL) {
3557			/* XXXJRT new stat, please */
3558			IP6STAT_INC(ip6s_toosmall);
3559			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3560			goto bad;
3561		}
3562	} else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3563		struct ifnet *inifp = m->m_pkthdr.rcvif;
3564		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3565			IP6STAT_INC(ip6s_toosmall);
3566			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3567			goto bad;
3568		}
3569	}
3570
3571	ip6 = mtod(m, struct ip6_hdr *);
3572
3573	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3574		IP6STAT_INC(ip6s_badvers);
3575		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3576		goto bad;
3577	}
3578
3579	/* Checks out, proceed */
3580	*mp = m;
3581	return (0);
3582
3583bad:
3584	*mp = m;
3585	return (-1);
3586}
3587#endif /* INET6 */
3588
3589/*
3590 * bridge_fragment:
3591 *
3592 *	Fragment mbuf chain in multiple packets and prepend ethernet header.
3593 */
3594static int
3595bridge_fragment(struct ifnet *ifp, struct mbuf **mp, struct ether_header *eh,
3596    int snap, struct llc *llc)
3597{
3598	struct mbuf *m = *mp, *nextpkt = NULL, *mprev = NULL, *mcur = NULL;
3599	struct ip *ip;
3600	int error = -1;
3601
3602	if (m->m_len < sizeof(struct ip) &&
3603	    (m = m_pullup(m, sizeof(struct ip))) == NULL)
3604		goto dropit;
3605	ip = mtod(m, struct ip *);
3606
3607	m->m_pkthdr.csum_flags |= CSUM_IP;
3608	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist);
3609	if (error)
3610		goto dropit;
3611
3612	/*
3613	 * Walk the chain and re-add the Ethernet header for
3614	 * each mbuf packet.
3615	 */
3616	for (mcur = m; mcur; mcur = mcur->m_nextpkt) {
3617		nextpkt = mcur->m_nextpkt;
3618		mcur->m_nextpkt = NULL;
3619		if (snap) {
3620			M_PREPEND(mcur, sizeof(struct llc), M_NOWAIT);
3621			if (mcur == NULL) {
3622				error = ENOBUFS;
3623				if (mprev != NULL)
3624					mprev->m_nextpkt = nextpkt;
3625				goto dropit;
3626			}
3627			bcopy(llc, mtod(mcur, caddr_t),sizeof(struct llc));
3628		}
3629
3630		M_PREPEND(mcur, ETHER_HDR_LEN, M_NOWAIT);
3631		if (mcur == NULL) {
3632			error = ENOBUFS;
3633			if (mprev != NULL)
3634				mprev->m_nextpkt = nextpkt;
3635			goto dropit;
3636		}
3637		bcopy(eh, mtod(mcur, caddr_t), ETHER_HDR_LEN);
3638
3639		/*
3640		 * The previous two M_PREPEND could have inserted one or two
3641		 * mbufs in front so we have to update the previous packet's
3642		 * m_nextpkt.
3643		 */
3644		mcur->m_nextpkt = nextpkt;
3645		if (mprev != NULL)
3646			mprev->m_nextpkt = mcur;
3647		else {
3648			/* The first mbuf in the original chain needs to be
3649			 * updated. */
3650			*mp = mcur;
3651		}
3652		mprev = mcur;
3653	}
3654
3655	KMOD_IPSTAT_INC(ips_fragmented);
3656	return (error);
3657
3658dropit:
3659	for (mcur = *mp; mcur; mcur = m) { /* droping the full packet chain */
3660		m = mcur->m_nextpkt;
3661		m_freem(mcur);
3662	}
3663	return (error);
3664}
3665
3666static void
3667bridge_linkstate(struct ifnet *ifp)
3668{
3669	struct bridge_softc *sc = ifp->if_bridge;
3670	struct bridge_iflist *bif;
3671	struct epoch_tracker et;
3672
3673	NET_EPOCH_ENTER(et);
3674
3675	bif = bridge_lookup_member_if(sc, ifp);
3676	if (bif == NULL) {
3677		NET_EPOCH_EXIT(et);
3678		return;
3679	}
3680	bridge_linkcheck(sc);
3681
3682	bstp_linkstate(&bif->bif_stp);
3683
3684	NET_EPOCH_EXIT(et);
3685}
3686
3687static void
3688bridge_linkcheck(struct bridge_softc *sc)
3689{
3690	struct bridge_iflist *bif;
3691	int new_link, hasls;
3692
3693	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
3694
3695	new_link = LINK_STATE_DOWN;
3696	hasls = 0;
3697	/* Our link is considered up if at least one of our ports is active */
3698	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
3699		if (bif->bif_ifp->if_capabilities & IFCAP_LINKSTATE)
3700			hasls++;
3701		if (bif->bif_ifp->if_link_state == LINK_STATE_UP) {
3702			new_link = LINK_STATE_UP;
3703			break;
3704		}
3705	}
3706	if (!CK_LIST_EMPTY(&sc->sc_iflist) && !hasls) {
3707		/* If no interfaces support link-state then we default to up */
3708		new_link = LINK_STATE_UP;
3709	}
3710	if_link_state_change(sc->sc_ifp, new_link);
3711}
3712