1/*
2 * Copyright (c) 2004-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*	$NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $	*/
30/*
31 * Copyright 2001 Wasabi Systems, Inc.
32 * All rights reserved.
33 *
34 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 *    notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 *    notice, this list of conditions and the following disclaimer in the
43 *    documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 *    must display the following acknowledgement:
46 *	This product includes software developed for the NetBSD Project by
47 *	Wasabi Systems, Inc.
48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
49 *    or promote products derived from this software without specific prior
50 *    written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
63 */
64
65/*
66 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
67 * All rights reserved.
68 *
69 * Redistribution and use in source and binary forms, with or without
70 * modification, are permitted provided that the following conditions
71 * are met:
72 * 1. Redistributions of source code must retain the above copyright
73 *    notice, this list of conditions and the following disclaimer.
74 * 2. Redistributions in binary form must reproduce the above copyright
75 *    notice, this list of conditions and the following disclaimer in the
76 *    documentation and/or other materials provided with the distribution.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
79 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
80 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
81 * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
82 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
83 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
84 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
86 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
87 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
88 * POSSIBILITY OF SUCH DAMAGE.
89 *
90 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
91 */
92
93/*
94 * Network interface bridge support.
95 *
96 * TODO:
97 *
98 *	- Currently only supports Ethernet-like interfaces (Ethernet,
99 *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
100 *	  to bridge other types of interfaces (FDDI-FDDI, and maybe
101 *	  consider heterogenous bridges).
102 */
103
104#include <sys/cdefs.h>
105
106#define BRIDGE_DEBUG 1
107#ifndef BRIDGE_DEBUG
108#define BRIDGE_DEBUG 0
109#endif /* BRIDGE_DEBUG */
110
111#include <sys/param.h>
112#include <sys/mbuf.h>
113#include <sys/malloc.h>
114#include <sys/protosw.h>
115#include <sys/systm.h>
116#include <sys/time.h>
117#include <sys/socket.h> /* for net/if.h */
118#include <sys/sockio.h>
119#include <sys/kernel.h>
120#include <sys/random.h>
121#include <sys/syslog.h>
122#include <sys/sysctl.h>
123#include <sys/proc.h>
124#include <sys/lock.h>
125#include <sys/mcache.h>
126
127#include <sys/kauth.h>
128
129#include <libkern/libkern.h>
130
131#include <kern/zalloc.h>
132
133#if NBPFILTER > 0
134#include <net/bpf.h>
135#endif
136#include <net/if.h>
137#include <net/if_dl.h>
138#include <net/if_types.h>
139#include <net/if_var.h>
140
141#include <netinet/in.h> /* for struct arpcom */
142#include <netinet/in_systm.h>
143#include <netinet/in_var.h>
144#include <netinet/ip.h>
145#include <netinet/ip_var.h>
146#ifdef INET6
147#include <netinet/ip6.h>
148#include <netinet6/ip6_var.h>
149#endif
150#ifdef DEV_CARP
151#include <netinet/ip_carp.h>
152#endif
153#include <netinet/if_ether.h> /* for struct arpcom */
154#include <net/bridgestp.h>
155#include <net/if_bridgevar.h>
156#include <net/if_llc.h>
157#if NVLAN > 0
158#include <net/if_vlan_var.h>
159#endif /* NVLAN > 0 */
160
161#include <net/if_ether.h>
162#include <net/dlil.h>
163#include <net/kpi_interfacefilter.h>
164
165#include <net/route.h>
166#ifdef PFIL_HOOKS
167#include <netinet/ip_fw2.h>
168#include <netinet/ip_dummynet.h>
169#endif /* PFIL_HOOKS */
170
171#if BRIDGE_DEBUG
172
173#define BR_LCKDBG_MAX			4
174
175#define BRIDGE_LOCK(_sc)		bridge_lock(_sc)
176#define BRIDGE_UNLOCK(_sc)		bridge_unlock(_sc)
177#define BRIDGE_LOCK_ASSERT(_sc)		\
178	lck_mtx_assert((_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED)
179#define	BRIDGE_LOCK2REF(_sc, _err)	_err = bridge_lock2ref(_sc)
180#define	BRIDGE_UNREF(_sc)		bridge_unref(_sc)
181#define	BRIDGE_XLOCK(_sc)		bridge_xlock(_sc)
182#define	BRIDGE_XDROP(_sc)		bridge_xdrop(_sc)
183
184#else /* BRIDGE_DEBUG */
185
186#define BRIDGE_LOCK(_sc)		lck_mtx_lock((_sc)->sc_mtx)
187#define BRIDGE_UNLOCK(_sc)		lck_mtx_unlock((_sc)->sc_mtx)
188#define BRIDGE_LOCK_ASSERT(_sc)		\
189	lck_mtx_assert((_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED)
190#define	BRIDGE_LOCK2REF(_sc, _err)	do {				\
191	lck_mtx_assert((_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED);		\
192	if ((_sc)->sc_iflist_xcnt > 0)					\
193		(_err) = EBUSY;						\
194	else								\
195		(_sc)->sc_iflist_ref++;					\
196	lck_mtx_unlock((_sc)->sc_mtx);					\
197} while (0)
198#define	BRIDGE_UNREF(_sc)		do {				\
199	lck_mtx_lock((_sc)->sc_mtx);					\
200	(_sc)->sc_iflist_ref--;						\
201	if (((_sc)->sc_iflist_xcnt > 0) && ((_sc)->sc_iflist_ref == 0))	{ \
202		lck_mtx_unlock((_sc)->sc_mtx);				\
203		wakeup(&(_sc)->sc_cv);					\
204	} else								\
205		lck_mtx_unlock((_sc)->sc_mtx);				\
206} while (0)
207#define	BRIDGE_XLOCK(_sc)		do {				\
208	lck_mtx_assert((_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED);		\
209	(_sc)->sc_iflist_xcnt++;					\
210	while ((_sc)->sc_iflist_ref > 0)				\
211		msleep(&(_sc)->sc_cv, (_sc)->sc_mtx, PZERO,		\
212		    "BRIDGE_XLOCK", NULL);				\
213} while (0)
214#define	BRIDGE_XDROP(_sc)		do {				\
215	lck_mtx_assert((_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED);		\
216	(_sc)->sc_iflist_xcnt--;					\
217} while (0)
218
219#endif /* BRIDGE_DEBUG */
220
221#if NBPFILTER > 0
222#define BRIDGE_BPF_MTAP_INPUT(sc, m)					\
223	if (sc->sc_bpf_input)						\
224		bridge_bpf_input(sc->sc_ifp, m)
225#else /* NBPFILTER */
226#define BRIDGE_BPF_MTAP_INPUT(ifp, m)
227#endif /* NBPFILTER */
228
229/*
230 * Size of the route hash table.  Must be a power of two.
231 */
232/* APPLE MODIFICATION - per Wasabi performance improvement, change the hash table size */
233#if 0
234#ifndef BRIDGE_RTHASH_SIZE
235#define	BRIDGE_RTHASH_SIZE		1024
236#endif
237#else
238#ifndef BRIDGE_RTHASH_SIZE
239#define	BRIDGE_RTHASH_SIZE		256
240#endif
241#endif
242
243/* APPLE MODIFICATION - support for HW checksums */
244#if APPLE_BRIDGE_HWCKSUM_SUPPORT
245#include <netinet/udp.h>
246#include <netinet/tcp.h>
247#endif
248
249#define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
250
251/*
252 * Maximum number of addresses to cache.
253 */
254#ifndef BRIDGE_RTABLE_MAX
255#define	BRIDGE_RTABLE_MAX		100
256#endif
257
258
259/*
260 * Timeout (in seconds) for entries learned dynamically.
261 */
262#ifndef BRIDGE_RTABLE_TIMEOUT
263#define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
264#endif
265
266/*
267 * Number of seconds between walks of the route list.
268 */
269#ifndef BRIDGE_RTABLE_PRUNE_PERIOD
270#define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
271#endif
272
273/*
274 * List of capabilities to possibly mask on the member interface.
275 */
276#define	BRIDGE_IFCAPS_MASK		(IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM)
277/*
278 * List of capabilities to disable on the member interface.
279 */
280#define	BRIDGE_IFCAPS_STRIP		IFCAP_LRO
281
282/*
283 * Bridge interface list entry.
284 */
285struct bridge_iflist {
286	TAILQ_ENTRY(bridge_iflist) bif_next;
287	struct ifnet		*bif_ifp;	/* member if */
288	struct bstp_port	bif_stp;	/* STP state */
289	uint32_t		bif_flags;	/* member if flags */
290	int			bif_savedcaps;	/* saved capabilities */
291	uint32_t		bif_addrmax;	/* max # of addresses */
292	uint32_t		bif_addrcnt;	/* cur. # of addresses */
293	uint32_t		bif_addrexceeded;/* # of address violations */
294
295	interface_filter_t	bif_iff_ref;
296	struct bridge_softc	*bif_sc;
297	char		bif_promisc;		/* promiscuous mode set */
298	char		bif_proto_attached;	/* protocol attached */
299	char		bif_filter_attached;	/* interface filter attached */
300};
301
302/*
303 * Bridge route node.
304 */
305struct bridge_rtnode {
306	LIST_ENTRY(bridge_rtnode) brt_hash;	/* hash table linkage */
307	LIST_ENTRY(bridge_rtnode) brt_list;	/* list linkage */
308	struct bridge_iflist	*brt_dst;	/* destination if */
309	unsigned long		brt_expire;	/* expiration time */
310	uint8_t			brt_flags;	/* address flags */
311	uint8_t			brt_addr[ETHER_ADDR_LEN];
312	uint16_t		brt_vlan;	/* vlan id */
313
314};
315#define	brt_ifp			brt_dst->bif_ifp
316
317/*
318 * Software state for each bridge.
319 */
320struct bridge_softc {
321	struct ifnet		*sc_ifp;	/* make this an interface */
322	LIST_ENTRY(bridge_softc) sc_list;
323	lck_mtx_t		*sc_mtx;
324	void			*sc_cv;
325	uint32_t		sc_brtmax;	/* max # of addresses */
326	uint32_t		sc_brtcnt;	/* cur. # of addresses */
327	uint32_t		sc_brttimeout;	/* rt timeout in seconds */
328	uint32_t		sc_iflist_ref;	/* refcount for sc_iflist */
329	uint32_t		sc_iflist_xcnt;	/* refcount for sc_iflist */
330	TAILQ_HEAD(, bridge_iflist) sc_iflist;	/* member interface list */
331	LIST_HEAD(, bridge_rtnode) *sc_rthash;	/* our forwarding table */
332	LIST_HEAD(, bridge_rtnode) sc_rtlist;	/* list version of above */
333	uint32_t		sc_rthash_key;	/* key for hash */
334	TAILQ_HEAD(, bridge_iflist) sc_spanlist;	/* span ports list */
335	struct bstp_state	sc_stp;		/* STP state */
336	uint32_t		sc_brtexceeded;	/* # of cache drops */
337	uint32_t		sc_filter_flags; /* ipf and flags */
338
339	char			sc_if_xname[IFNAMSIZ];
340	bpf_packet_func		sc_bpf_input;
341	bpf_packet_func		sc_bpf_output;
342	u_int32_t		sc_flags;
343
344#if BRIDGE_DEBUG
345	void			*lock_lr[BR_LCKDBG_MAX];        /* locking calling history */
346	int			next_lock_lr;
347	void			*unlock_lr[BR_LCKDBG_MAX];      /* unlocking caller history */
348	int			next_unlock_lr;
349#endif /* BRIDGE_DEBUG */
350};
351
352#define SCF_DETACHING 0x1
353
354decl_lck_mtx_data(static, bridge_list_mtx_data);
355static lck_mtx_t	*bridge_list_mtx = &bridge_list_mtx_data;
356
357int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
358
359static zone_t bridge_rtnode_pool = NULL;
360
361static int	bridge_clone_create(struct if_clone *, uint32_t, void *);
362static int	bridge_clone_destroy(struct ifnet *);
363
364static errno_t	bridge_ioctl(struct ifnet *, u_long, void *);
365#if HAS_IF_CAP
366static void	bridge_mutecaps(struct bridge_softc *);
367static void	bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
368		    int);
369#endif
370__private_extern__ void	bridge_ifdetach(struct bridge_iflist *, struct ifnet *);
371static int	bridge_init(struct ifnet *);
372#if HAS_BRIDGE_DUMMYNET
373static void	bridge_dummynet(struct mbuf *, struct ifnet *);
374#endif
375static void	bridge_ifstop(struct ifnet *, int);
376static int	bridge_output(struct ifnet *, struct mbuf *);
377static void	bridge_start(struct ifnet *);
378__private_extern__ errno_t bridge_input(struct ifnet *, struct mbuf *, void *);
379#if BRIDGE_MEMBER_OUT_FILTER
380static errno_t bridge_iff_output(void *, ifnet_t, protocol_family_t ,
381    mbuf_t *);
382static int	bridge_member_output(struct ifnet *, struct mbuf *,
383		    struct sockaddr *, struct rtentry *);
384#endif
385static int	bridge_enqueue(struct bridge_softc *, struct ifnet *,
386		    struct mbuf *);
387static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
388
389static void	bridge_forward(struct bridge_softc *, struct bridge_iflist *,
390		    struct mbuf *m);
391
392static void	bridge_timer(void *);
393
394static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
395		    struct mbuf *, int);
396static void	bridge_span(struct bridge_softc *, struct mbuf *);
397
398static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
399		    uint16_t, struct bridge_iflist *, int, uint8_t);
400static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
401		    uint16_t);
402static void	bridge_rttrim(struct bridge_softc *);
403static void	bridge_rtage(struct bridge_softc *);
404static void	bridge_rtflush(struct bridge_softc *, int);
405static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
406		    uint16_t);
407
408static int	bridge_rtable_init(struct bridge_softc *);
409static void	bridge_rtable_fini(struct bridge_softc *);
410
411static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
412static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
413		    const uint8_t *, uint16_t);
414static int	bridge_rtnode_insert(struct bridge_softc *,
415		    struct bridge_rtnode *);
416static void	bridge_rtnode_destroy(struct bridge_softc *,
417		    struct bridge_rtnode *);
418#if BRIDGESTP
419static void	bridge_rtable_expire(struct ifnet *, int);
420static void	bridge_state_change(struct ifnet *, int);
421#endif /* BRIDGESTP */
422
423static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
424		    const char *name);
425static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
426		    struct ifnet *ifp);
427static void	bridge_delete_member(struct bridge_softc *,
428		    struct bridge_iflist *, int);
429static void	bridge_delete_span(struct bridge_softc *,
430		    struct bridge_iflist *);
431
432static int	bridge_ioctl_add(struct bridge_softc *, void *);
433static int	bridge_ioctl_del(struct bridge_softc *, void *);
434static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
435static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
436static int	bridge_ioctl_scache(struct bridge_softc *, void *);
437static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
438static int	bridge_ioctl_gifs32(struct bridge_softc *, void *);
439static int	bridge_ioctl_gifs64(struct bridge_softc *, void *);
440static int	bridge_ioctl_rts32(struct bridge_softc *, void *);
441static int	bridge_ioctl_rts64(struct bridge_softc *, void *);
442static int	bridge_ioctl_saddr32(struct bridge_softc *, void *);
443static int	bridge_ioctl_saddr64(struct bridge_softc *, void *);
444static int	bridge_ioctl_sto(struct bridge_softc *, void *);
445static int	bridge_ioctl_gto(struct bridge_softc *, void *);
446static int	bridge_ioctl_daddr32(struct bridge_softc *, void *);
447static int	bridge_ioctl_daddr64(struct bridge_softc *, void *);
448static int	bridge_ioctl_flush(struct bridge_softc *, void *);
449static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
450static int	bridge_ioctl_spri(struct bridge_softc *, void *);
451static int	bridge_ioctl_ght(struct bridge_softc *, void *);
452static int	bridge_ioctl_sht(struct bridge_softc *, void *);
453static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
454static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
455static int	bridge_ioctl_gma(struct bridge_softc *, void *);
456static int	bridge_ioctl_sma(struct bridge_softc *, void *);
457static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
458static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
459static int	bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
460static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
461static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
462static int	bridge_ioctl_gbparam32(struct bridge_softc *, void *);
463static int	bridge_ioctl_gbparam64(struct bridge_softc *, void *);
464static int	bridge_ioctl_grte(struct bridge_softc *, void *);
465static int	bridge_ioctl_gifsstp32(struct bridge_softc *, void *);
466static int	bridge_ioctl_gifsstp64(struct bridge_softc *, void *);
467static int	bridge_ioctl_sproto(struct bridge_softc *, void *);
468static int	bridge_ioctl_stxhc(struct bridge_softc *, void *);
469static int  bridge_ioctl_purge(struct bridge_softc *sc, void *arg);
470static int	bridge_ioctl_gfilt(struct bridge_softc *, void *);
471static int	bridge_ioctl_sfilt(struct bridge_softc *, void *);
472#ifdef PFIL_HOOKS
473static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
474		    int);
475static int	bridge_ip_checkbasic(struct mbuf **mp);
476#ifdef INET6
477static int	bridge_ip6_checkbasic(struct mbuf **mp);
478#endif /* INET6 */
479static int	bridge_fragment(struct ifnet *, struct mbuf *,
480		    struct ether_header *, int, struct llc *);
481#endif /* PFIL_HOOKS */
482
483static errno_t bridge_set_bpf_tap(ifnet_t ifn, bpf_tap_mode mode, bpf_packet_func bpf_callback);
484__private_extern__ errno_t bridge_bpf_input(ifnet_t ifp, struct mbuf *m);
485__private_extern__ errno_t bridge_bpf_output(ifnet_t ifp, struct mbuf *m);
486
487static void bridge_detach(ifnet_t ifp);
488
489#define m_copypacket(m, how) m_copym(m, 0, M_COPYALL, how)
490
491/* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
492#define	VLANTAGOF(_m)	0
493
494u_int8_t bstp_etheraddr[ETHER_ADDR_LEN] =
495    { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
496
497#if BRIDGESTP
498static struct bstp_cb_ops bridge_ops = {
499	.bcb_state = bridge_state_change,
500	.bcb_rtage = bridge_rtable_expire
501};
502#endif /* BRIDGESTP */
503
504SYSCTL_DECL(_net_link);
505SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
506    "Bridge");
507
508#if defined(PFIL_HOOKS)
509static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
510static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
511static int pfil_member = 1; /* run pfil hooks on the member interface */
512static int pfil_ipfw = 0;   /* layer2 filter with ipfw */
513static int pfil_ipfw_arp = 0;   /* layer2 filter with ipfw */
514static int pfil_local_phys = 0; /* run pfil hooks on the physical interface for
515                                   locally destined packets */
516SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW|CTLFLAG_LOCKED,
517    &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
518SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW|CTLFLAG_LOCKED,
519    &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2");
520SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW|CTLFLAG_LOCKED,
521    &pfil_bridge, 0, "Packet filter on the bridge interface");
522SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW|CTLFLAG_LOCKED,
523    &pfil_member, 0, "Packet filter on the member interface");
524SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
525    CTLFLAG_RW|CTLFLAG_LOCKED, &pfil_local_phys, 0,
526    "Packet filter on the physical interface for locally destined packets");
527#endif /* PFIL_HOOKS */
528
529#if BRIDGESTP
530static int log_stp   = 0;   /* log STP state changes */
531SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW,
532    &log_stp, 0, "Log STP state changes");
533#endif /* BRIDGESTP */
534
535struct bridge_control {
536	int		(*bc_func)(struct bridge_softc *, void *);
537	unsigned int	bc_argsize;
538	unsigned int	bc_flags;
539};
540
541#define	BC_F_COPYIN		0x01	/* copy arguments in */
542#define	BC_F_COPYOUT		0x02	/* copy arguments out */
543#define	BC_F_SUSER		0x04	/* do super-user check */
544
545static const struct bridge_control bridge_control_table32[] = {
546	{ bridge_ioctl_add,		sizeof (struct ifbreq),
547	    BC_F_COPYIN|BC_F_SUSER },
548	{ bridge_ioctl_del,		sizeof (struct ifbreq),
549	    BC_F_COPYIN|BC_F_SUSER },
550
551	{ bridge_ioctl_gifflags,	sizeof (struct ifbreq),
552	    BC_F_COPYIN|BC_F_COPYOUT },
553	{ bridge_ioctl_sifflags,	sizeof (struct ifbreq),
554	    BC_F_COPYIN|BC_F_SUSER },
555
556	{ bridge_ioctl_scache,		sizeof (struct ifbrparam),
557	    BC_F_COPYIN|BC_F_SUSER },
558	{ bridge_ioctl_gcache,		sizeof (struct ifbrparam),
559	    BC_F_COPYOUT },
560
561	{ bridge_ioctl_gifs32,		sizeof (struct ifbifconf32),
562	    BC_F_COPYIN|BC_F_COPYOUT },
563	{ bridge_ioctl_rts32,		sizeof (struct ifbaconf32),
564	    BC_F_COPYIN|BC_F_COPYOUT },
565
566	{ bridge_ioctl_saddr32,		sizeof (struct ifbareq32),
567	    BC_F_COPYIN|BC_F_SUSER },
568
569	{ bridge_ioctl_sto,		sizeof (struct ifbrparam),
570	    BC_F_COPYIN|BC_F_SUSER },
571	{ bridge_ioctl_gto,		sizeof (struct ifbrparam),
572	    BC_F_COPYOUT },
573
574	{ bridge_ioctl_daddr32,		sizeof (struct ifbareq32),
575	    BC_F_COPYIN|BC_F_SUSER },
576
577	{ bridge_ioctl_flush,		sizeof (struct ifbreq),
578	    BC_F_COPYIN|BC_F_SUSER },
579
580	{ bridge_ioctl_gpri,		sizeof (struct ifbrparam),
581	    BC_F_COPYOUT },
582	{ bridge_ioctl_spri,		sizeof (struct ifbrparam),
583	    BC_F_COPYIN|BC_F_SUSER },
584
585	{ bridge_ioctl_ght,		sizeof (struct ifbrparam),
586	    BC_F_COPYOUT },
587	{ bridge_ioctl_sht,		sizeof (struct ifbrparam),
588	    BC_F_COPYIN|BC_F_SUSER },
589
590	{ bridge_ioctl_gfd,		sizeof (struct ifbrparam),
591	    BC_F_COPYOUT },
592	{ bridge_ioctl_sfd,		sizeof (struct ifbrparam),
593	    BC_F_COPYIN|BC_F_SUSER },
594
595	{ bridge_ioctl_gma,		sizeof (struct ifbrparam),
596	    BC_F_COPYOUT },
597	{ bridge_ioctl_sma,		sizeof (struct ifbrparam),
598	    BC_F_COPYIN|BC_F_SUSER },
599
600	{ bridge_ioctl_sifprio,		sizeof (struct ifbreq),
601	    BC_F_COPYIN|BC_F_SUSER },
602
603	{ bridge_ioctl_sifcost,		sizeof (struct ifbreq),
604	    BC_F_COPYIN|BC_F_SUSER },
605
606	{ bridge_ioctl_gfilt,		sizeof (struct ifbrparam),
607	    BC_F_COPYOUT },
608	{ bridge_ioctl_sfilt,		sizeof (struct ifbrparam),
609	    BC_F_COPYIN|BC_F_SUSER },
610
611	{ bridge_ioctl_purge,		sizeof (struct ifbreq),
612	    BC_F_COPYIN|BC_F_SUSER },
613
614	{ bridge_ioctl_addspan,		sizeof (struct ifbreq),
615		BC_F_COPYIN|BC_F_SUSER },
616	{ bridge_ioctl_delspan,		sizeof (struct ifbreq),
617		BC_F_COPYIN|BC_F_SUSER },
618
619	{ bridge_ioctl_gbparam32,	sizeof (struct ifbropreq32),
620	    BC_F_COPYOUT },
621
622	{ bridge_ioctl_grte,		sizeof (struct ifbrparam),
623	    BC_F_COPYOUT },
624
625	{ bridge_ioctl_gifsstp32,	sizeof (struct ifbpstpconf32),
626	    BC_F_COPYIN|BC_F_COPYOUT },
627
628	{ bridge_ioctl_sproto,		sizeof (struct ifbrparam),
629	    BC_F_COPYIN|BC_F_SUSER },
630
631	{ bridge_ioctl_stxhc,		sizeof (struct ifbrparam),
632	    BC_F_COPYIN|BC_F_SUSER },
633
634	{ bridge_ioctl_sifmaxaddr,	sizeof (struct ifbreq),
635	    BC_F_COPYIN|BC_F_SUSER },
636};
637
638static const struct bridge_control bridge_control_table64[] = {
639	{ bridge_ioctl_add,		sizeof (struct ifbreq),
640	    BC_F_COPYIN|BC_F_SUSER },
641	{ bridge_ioctl_del,		sizeof (struct ifbreq),
642	    BC_F_COPYIN|BC_F_SUSER },
643
644	{ bridge_ioctl_gifflags,	sizeof (struct ifbreq),
645	    BC_F_COPYIN|BC_F_COPYOUT },
646	{ bridge_ioctl_sifflags,	sizeof (struct ifbreq),
647	    BC_F_COPYIN|BC_F_SUSER },
648
649	{ bridge_ioctl_scache,		sizeof (struct ifbrparam),
650	    BC_F_COPYIN|BC_F_SUSER },
651	{ bridge_ioctl_gcache,		sizeof (struct ifbrparam),
652	    BC_F_COPYOUT },
653
654	{ bridge_ioctl_gifs64,		sizeof (struct ifbifconf64),
655	    BC_F_COPYIN|BC_F_COPYOUT },
656	{ bridge_ioctl_rts64,		sizeof (struct ifbaconf64),
657	    BC_F_COPYIN|BC_F_COPYOUT },
658
659	{ bridge_ioctl_saddr64,		sizeof (struct ifbareq64),
660	    BC_F_COPYIN|BC_F_SUSER },
661
662	{ bridge_ioctl_sto,		sizeof (struct ifbrparam),
663	    BC_F_COPYIN|BC_F_SUSER },
664	{ bridge_ioctl_gto,		sizeof (struct ifbrparam),
665	    BC_F_COPYOUT },
666
667	{ bridge_ioctl_daddr64,		sizeof (struct ifbareq64),
668	    BC_F_COPYIN|BC_F_SUSER },
669
670	{ bridge_ioctl_flush,		sizeof (struct ifbreq),
671	    BC_F_COPYIN|BC_F_SUSER },
672
673	{ bridge_ioctl_gpri,		sizeof (struct ifbrparam),
674	    BC_F_COPYOUT },
675	{ bridge_ioctl_spri,		sizeof (struct ifbrparam),
676	    BC_F_COPYIN|BC_F_SUSER },
677
678	{ bridge_ioctl_ght,		sizeof (struct ifbrparam),
679	    BC_F_COPYOUT },
680	{ bridge_ioctl_sht,		sizeof (struct ifbrparam),
681	    BC_F_COPYIN|BC_F_SUSER },
682
683	{ bridge_ioctl_gfd,		sizeof (struct ifbrparam),
684	    BC_F_COPYOUT },
685	{ bridge_ioctl_sfd,		sizeof (struct ifbrparam),
686	    BC_F_COPYIN|BC_F_SUSER },
687
688	{ bridge_ioctl_gma,		sizeof (struct ifbrparam),
689	    BC_F_COPYOUT },
690	{ bridge_ioctl_sma,		sizeof (struct ifbrparam),
691	    BC_F_COPYIN|BC_F_SUSER },
692
693	{ bridge_ioctl_sifprio,		sizeof (struct ifbreq),
694	    BC_F_COPYIN|BC_F_SUSER },
695
696	{ bridge_ioctl_sifcost,		sizeof (struct ifbreq),
697	    BC_F_COPYIN|BC_F_SUSER },
698
699	{ bridge_ioctl_gfilt,		sizeof (struct ifbrparam),
700	    BC_F_COPYOUT },
701	{ bridge_ioctl_sfilt,		sizeof (struct ifbrparam),
702	    BC_F_COPYIN|BC_F_SUSER },
703
704	{ bridge_ioctl_purge,	sizeof (struct ifbreq),
705	    BC_F_COPYIN|BC_F_SUSER },
706
707	{ bridge_ioctl_addspan,		sizeof (struct ifbreq),
708	    BC_F_COPYIN|BC_F_SUSER },
709	{ bridge_ioctl_delspan,		sizeof (struct ifbreq),
710	    BC_F_COPYIN|BC_F_SUSER },
711
712	{ bridge_ioctl_gbparam64,	sizeof (struct ifbropreq64),
713	    BC_F_COPYOUT },
714
715	{ bridge_ioctl_grte,		sizeof (struct ifbrparam),
716	    BC_F_COPYOUT },
717
718	{ bridge_ioctl_gifsstp64,	sizeof (struct ifbpstpconf64),
719	    BC_F_COPYIN|BC_F_COPYOUT },
720
721	{ bridge_ioctl_sproto,		sizeof (struct ifbrparam),
722	    BC_F_COPYIN|BC_F_SUSER },
723
724	{ bridge_ioctl_stxhc,		sizeof (struct ifbrparam),
725	    BC_F_COPYIN|BC_F_SUSER },
726
727	{ bridge_ioctl_sifmaxaddr,	sizeof (struct ifbreq),
728	    BC_F_COPYIN|BC_F_SUSER },
729};
730
731static const unsigned int bridge_control_table_size =
732    sizeof (bridge_control_table32) / sizeof (bridge_control_table32[0]);
733
734static LIST_HEAD(, bridge_softc) bridge_list =
735    LIST_HEAD_INITIALIZER(bridge_list);
736
737static lck_grp_t *bridge_lock_grp = NULL;
738static lck_attr_t *bridge_lock_attr = NULL;
739
740static if_clone_t bridge_cloner = NULL;
741
742static int if_bridge_txstart = 0;
743SYSCTL_INT(_net_link_bridge, OID_AUTO, txstart, CTLFLAG_RW | CTLFLAG_LOCKED,
744    &if_bridge_txstart, 0, "Bridge interface uses TXSTART model");
745
746#if BRIDGE_DEBUG
747static int if_bridge_debug = 0;
748SYSCTL_INT(_net_link_bridge, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
749    &if_bridge_debug, 0, "Bridge debug");
750
751static void printf_ether_header(struct ether_header *eh);
752static void printf_mbuf_data(mbuf_t m, size_t offset, size_t len);
753static void printf_mbuf_pkthdr(mbuf_t m, const char *prefix, const char *suffix);
754static void printf_mbuf(mbuf_t m, const char *prefix, const char *suffix);
755static void link_print(struct sockaddr_dl *dl_p);
756
757static void bridge_lock(struct bridge_softc *);
758static void bridge_unlock(struct bridge_softc *);
759static int bridge_lock2ref(struct bridge_softc *);
760static void bridge_unref(struct bridge_softc *);
761static void bridge_xlock(struct bridge_softc *);
762static void bridge_xdrop(struct bridge_softc *);
763
764static void
765bridge_lock(struct bridge_softc *sc)
766{
767	void *lr_saved = __builtin_return_address(0);
768
769	lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
770
771	lck_mtx_lock(sc->sc_mtx);
772
773	sc->lock_lr[sc->next_lock_lr] = lr_saved;
774	sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX;
775}
776
777static void
778bridge_unlock(struct bridge_softc *sc)
779{
780	void *lr_saved = __builtin_return_address(0);
781
782	lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
783
784	sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
785	sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
786
787	lck_mtx_unlock(sc->sc_mtx);
788}
789
790static int
791bridge_lock2ref(struct bridge_softc *sc)
792{
793	int error = 0;
794	void *lr_saved = __builtin_return_address(0);
795
796	lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
797
798	if (sc->sc_iflist_xcnt > 0)
799		error = EBUSY;
800	else
801		sc->sc_iflist_ref++;
802
803	sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
804	sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
805	lck_mtx_unlock(sc->sc_mtx);
806
807	return (error);
808}
809
810static void
811bridge_unref(struct bridge_softc *sc)
812{
813	void *lr_saved = __builtin_return_address(0);
814
815	lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
816
817	lck_mtx_lock(sc->sc_mtx);
818	sc->lock_lr[sc->next_lock_lr] = lr_saved;
819	sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX;
820
821	sc->sc_iflist_ref--;
822
823	sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
824	sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
825	if ((sc->sc_iflist_xcnt > 0) && (sc->sc_iflist_ref == 0)) {
826		lck_mtx_unlock(sc->sc_mtx);
827		wakeup(&sc->sc_cv);
828	} else
829		lck_mtx_unlock(sc->sc_mtx);
830}
831
832static void
833bridge_xlock(struct bridge_softc *sc)
834{
835	void *lr_saved = __builtin_return_address(0);
836
837	lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
838
839	sc->sc_iflist_xcnt++;
840	while (sc->sc_iflist_ref > 0) {
841		sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
842		sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
843
844		msleep(&sc->sc_cv, sc->sc_mtx, PZERO, "BRIDGE_XLOCK", NULL);
845
846		sc->lock_lr[sc->next_lock_lr] = lr_saved;
847		sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX;
848	}
849}
850
851static void
852bridge_xdrop(struct bridge_softc *sc)
853{
854	lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
855
856	sc->sc_iflist_xcnt--;
857}
858
859void
860printf_mbuf_pkthdr(mbuf_t m, const char *prefix, const char *suffix)
861{
862	if (m)
863		printf("%spktlen: %u rcvif: %p header: %p nextpkt: %p%s",
864		    prefix ? prefix : "", (unsigned int)mbuf_pkthdr_len(m),
865		    mbuf_pkthdr_rcvif(m), mbuf_pkthdr_header(m),
866		    mbuf_nextpkt(m), suffix ? suffix : "");
867	else
868		printf("%s<NULL>%s\n", prefix, suffix);
869}
870
871void
872printf_mbuf(mbuf_t m, const char *prefix, const char *suffix)
873{
874	if (m) {
875		printf("%s%p type: %u flags: 0x%x len: %u data: %p maxlen: %u "
876		    "datastart: %p next: %p%s", prefix ? prefix : "",
877		    m, mbuf_type(m), mbuf_flags(m), (unsigned int)mbuf_len(m),
878		    mbuf_data(m), (unsigned int)mbuf_maxlen(m),
879		    mbuf_datastart(m), mbuf_next(m),
880		    !suffix || (mbuf_flags(m) & MBUF_PKTHDR) ? "" : suffix);
881		if ((mbuf_flags(m) & MBUF_PKTHDR))
882			printf_mbuf_pkthdr(m, " ", suffix);
883	} else
884		printf("%s<NULL>%s\n", prefix, suffix);
885}
886
887void
888printf_mbuf_data(mbuf_t m, size_t offset, size_t len)
889{
890	mbuf_t			n;
891	size_t			i, j;
892	size_t			pktlen, mlen, maxlen;
893	unsigned char	*ptr;
894
895	pktlen = mbuf_pkthdr_len(m);
896
897	if (offset > pktlen)
898		return;
899
900	maxlen = (pktlen - offset > len) ? len : pktlen;
901	n = m;
902	mlen = mbuf_len(n);
903	ptr = mbuf_data(n);
904	for (i = 0, j = 0; i < maxlen; i++, j++) {
905		if (j >= mlen) {
906			n = mbuf_next(n);
907			if (n == 0)
908				break;
909			ptr = mbuf_data(n);
910			mlen = mbuf_len(n);
911			j = 0;
912		}
913		if (i >= offset) {
914			printf("%02x%s", ptr[j], i % 2 ? " " : "");
915		}
916	}
917}
918
919static void
920printf_ether_header(struct ether_header *eh)
921{
922	printf("%02x:%02x:%02x:%02x:%02x:%02x > "
923	    "%02x:%02x:%02x:%02x:%02x:%02x 0x%04x ",
924	    eh->ether_shost[0], eh->ether_shost[1], eh->ether_shost[2],
925	    eh->ether_shost[3], eh->ether_shost[4], eh->ether_shost[5],
926	    eh->ether_dhost[0], eh->ether_dhost[1], eh->ether_dhost[2],
927	    eh->ether_dhost[3], eh->ether_dhost[4], eh->ether_dhost[5],
928	    eh->ether_type);
929}
930
931static void
932link_print(struct sockaddr_dl *dl_p)
933{
934	int i;
935
936#if 1
937	printf("sdl len %d index %d family %d type 0x%x nlen %d alen %d"
938           " slen %d addr ", dl_p->sdl_len,
939           dl_p->sdl_index,  dl_p->sdl_family, dl_p->sdl_type,
940           dl_p->sdl_nlen, dl_p->sdl_alen, dl_p->sdl_slen);
941#endif
942	for (i = 0; i < dl_p->sdl_alen; i++)
943        printf("%s%x", i ? ":" : "", (CONST_LLADDR(dl_p))[i]);
944	printf("\n");
945}
946
947#endif /* BRIDGE_DEBUG */
948
949/*
950 * bridgeattach:
951 *
952 *	Pseudo-device attach routine.
953 */
954__private_extern__ int
955bridgeattach(__unused int n)
956{
957	int error;
958	lck_grp_attr_t *lck_grp_attr = NULL;
959	struct ifnet_clone_params ifnet_clone_params;
960
961	bridge_rtnode_pool = zinit(sizeof (struct bridge_rtnode),
962	    1024 * sizeof (struct bridge_rtnode), 0, "bridge_rtnode");
963	zone_change(bridge_rtnode_pool, Z_CALLERACCT, FALSE);
964
965	lck_grp_attr = lck_grp_attr_alloc_init();
966
967	bridge_lock_grp = lck_grp_alloc_init("if_bridge", lck_grp_attr);
968
969	bridge_lock_attr = lck_attr_alloc_init();
970
971#if BRIDGE_DEBUG
972	lck_attr_setdebug(bridge_lock_attr);
973#endif
974
975	lck_mtx_init(bridge_list_mtx, bridge_lock_grp, bridge_lock_attr);
976
977	/* can free the attributes once we've allocated the group lock */
978	lck_grp_attr_free(lck_grp_attr);
979
980	LIST_INIT(&bridge_list);
981
982#if BRIDGESTP
983	bstp_sys_init();
984#endif /* BRIDGESTP */
985
986	ifnet_clone_params.ifc_name = "bridge";
987	ifnet_clone_params.ifc_create = bridge_clone_create;
988	ifnet_clone_params.ifc_destroy = bridge_clone_destroy;
989
990	error = ifnet_clone_attach(&ifnet_clone_params, &bridge_cloner);
991	if (error != 0)
992		printf("%s: ifnet_clone_attach failed %d\n", __func__, error);
993
994	return (error);
995}
996
997#if defined(PFIL_HOOKS)
998/*
999 * handler for net.link.bridge.pfil_ipfw
1000 */
1001static int
1002sysctl_pfil_ipfw SYSCTL_HANDLER_ARGS
1003{
1004#pragma unused(arg1, arg2)
1005	int enable = pfil_ipfw;
1006	int error;
1007
1008	error = sysctl_handle_int(oidp, &enable, 0, req);
1009	enable = (enable) ? 1 : 0;
1010
1011	if (enable != pfil_ipfw) {
1012		pfil_ipfw = enable;
1013
1014		/*
1015		 * Disable pfil so that ipfw doesnt run twice, if the user
1016		 * really wants both then they can re-enable pfil_bridge and/or
1017		 * pfil_member. Also allow non-ip packets as ipfw can filter by
1018		 * layer2 type.
1019		 */
1020		if (pfil_ipfw) {
1021			pfil_onlyip = 0;
1022			pfil_bridge = 0;
1023			pfil_member = 0;
1024		}
1025	}
1026
1027	return (error);
1028}
1029
1030SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW,
1031	    &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW");
1032#endif /* PFIL_HOOKS */
1033
1034/*
1035 * bridge_clone_create:
1036 *
1037 *	Create a new bridge instance.
1038 */
1039static int
1040bridge_clone_create(struct if_clone *ifc, uint32_t unit, __unused void *params)
1041{
1042	struct ifnet *ifp = NULL;
1043	struct bridge_softc *sc;
1044	u_char eaddr[6];
1045	struct ifnet_init_eparams init_params;
1046	errno_t error = 0;
1047	uint32_t sdl_buffer[offsetof(struct sockaddr_dl, sdl_data) +
1048	    IFNAMSIZ + ETHER_ADDR_LEN];
1049	struct sockaddr_dl *sdl = (struct sockaddr_dl *)sdl_buffer;
1050
1051	sc = _MALLOC(sizeof (*sc), M_DEVBUF, M_WAITOK);
1052	memset(sc, 0, sizeof (*sc));
1053
1054	sc->sc_mtx = lck_mtx_alloc_init(bridge_lock_grp, bridge_lock_attr);
1055	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
1056	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
1057	sc->sc_filter_flags = IFBF_FILT_DEFAULT;
1058#ifndef BRIDGE_IPF
1059	/*
1060	 * For backwards compatibility with previous behaviour...
1061	 * Switch off filtering on the bridge itself if BRIDGE_IPF is
1062	 * not defined.
1063	 */
1064	sc->sc_filter_flags &= ~IFBF_FILT_USEIPF;
1065#endif
1066
1067	/* Initialize our routing table. */
1068	error = bridge_rtable_init(sc);
1069	if (error != 0) {
1070		printf("%s: bridge_rtable_init failed %d\n", __func__, error);
1071		goto done;
1072	}
1073
1074	TAILQ_INIT(&sc->sc_iflist);
1075	TAILQ_INIT(&sc->sc_spanlist);
1076
1077	/* use the interface name as the unique id for ifp recycle */
1078	snprintf(sc->sc_if_xname, sizeof (sc->sc_if_xname), "%s%d",
1079             ifc->ifc_name, unit);
1080	bzero(&init_params, sizeof (init_params));
1081	init_params.ver			= IFNET_INIT_CURRENT_VERSION;
1082	init_params.len			= sizeof (init_params);
1083	if (if_bridge_txstart) {
1084		init_params.start	= bridge_start;
1085	} else {
1086		init_params.flags	= IFNET_INIT_LEGACY;
1087		init_params.output	= bridge_output;
1088	}
1089	init_params.uniqueid		= sc->sc_if_xname;
1090	init_params.uniqueid_len	= strlen(sc->sc_if_xname);
1091	init_params.sndq_maxlen		= IFQ_MAXLEN;
1092	init_params.name		= ifc->ifc_name;
1093	init_params.unit		= unit;
1094	init_params.family		= IFNET_FAMILY_ETHERNET;
1095	init_params.type		= IFT_BRIDGE;
1096	init_params.demux		= ether_demux;
1097	init_params.add_proto		= ether_add_proto;
1098	init_params.del_proto		= ether_del_proto;
1099	init_params.check_multi		= ether_check_multi;
1100	init_params.framer		= ether_frameout;
1101	init_params.softc		= sc;
1102	init_params.ioctl		= bridge_ioctl;
1103	init_params.set_bpf_tap		= bridge_set_bpf_tap;
1104	init_params.detach		= bridge_detach;
1105	init_params.broadcast_addr	= etherbroadcastaddr;
1106	init_params.broadcast_len	= ETHER_ADDR_LEN;
1107	error = ifnet_allocate_extended(&init_params, &ifp);
1108	if (error != 0) {
1109		printf("%s: ifnet_allocate failed %d\n", __func__, error);
1110		goto done;
1111	}
1112	sc->sc_ifp = ifp;
1113
1114	error = ifnet_set_mtu(ifp, ETHERMTU);
1115	if (error != 0) {
1116		printf("%s: ifnet_set_mtu failed %d\n", __func__, error);
1117		goto done;
1118	}
1119	error = ifnet_set_addrlen(ifp, ETHER_ADDR_LEN);
1120	if (error != 0) {
1121		printf("%s: ifnet_set_addrlen failed %d\n", __func__, error);
1122		goto done;
1123	}
1124	error = ifnet_set_hdrlen(ifp, ETHER_HDR_LEN);
1125	if (error != 0) {
1126		printf("%s: ifnet_set_hdrlen failed %d\n", __func__, error);
1127		goto done;
1128	}
1129	error = ifnet_set_flags(ifp,
1130	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST,
1131	    0xffff);
1132	if (error != 0) {
1133		printf("%s: ifnet_set_flags failed %d\n", __func__, error);
1134		goto done;
1135	}
1136
1137#if 0
1138	/*
1139	 * Generate a random ethernet address with a locally administered
1140	 * address.
1141	 *
1142	 * Since we are using random ethernet addresses for the bridge, it is
1143	 * possible that we might have address collisions, so make sure that
1144	 * this hardware address isn't already in use on another bridge.
1145	 */
1146	{
1147		int retry;
1148
1149		for (retry = 1; retry != 0;) {
1150			struct ifnet *bifp;
1151			struct bridge_softc *sc2;
1152
1153			read_random(eaddr, ETHER_ADDR_LEN);
1154			eaddr[0] &= ~1;		/* clear multicast bit */
1155			eaddr[0] |= 2;		/* set the LAA bit */
1156			retry = 0;
1157			lck_mtx_lock(bridge_list_mtx);
1158			LIST_FOREACH(sc2, &bridge_list, sc_list) {
1159				bifp = sc2->sc_ifp;
1160				if (memcmp(eaddr, ifnet_lladdr(bifp),
1161				    ETHER_ADDR_LEN) == 0)
1162					retry = 1;
1163			}
1164			lck_mtx_unlock(bridge_list_mtx);
1165		}
1166	}
1167#else
1168	/*
1169	 * Generate a random ethernet address and use the private AC:DE:48
1170	 * OUI code.
1171	 */
1172	{
1173		uint32_t r;
1174
1175		read_random(&r, sizeof (r));
1176		eaddr[0] = 0xAC;
1177		eaddr[1] = 0xDE;
1178		eaddr[2] = 0x48;
1179		eaddr[3] = (r >> 0)  & 0xffu;
1180		eaddr[4] = (r >> 8)  & 0xffu;
1181		eaddr[5] = (r >> 16) & 0xffu;
1182	}
1183#endif
1184
1185	memset(sdl, 0, sizeof (sdl_buffer));
1186	sdl->sdl_family = AF_LINK;
1187	sdl->sdl_nlen = strlen(sc->sc_if_xname);
1188	sdl->sdl_alen = ETHER_ADDR_LEN;
1189	sdl->sdl_len = offsetof(struct sockaddr_dl, sdl_data);
1190	memcpy(sdl->sdl_data, sc->sc_if_xname, sdl->sdl_nlen);
1191	memcpy(LLADDR(sdl), eaddr, ETHER_ADDR_LEN);
1192
1193#if BRIDGE_DEBUG
1194	if (if_bridge_debug)
1195		link_print(sdl);
1196#endif
1197
1198	error = ifnet_attach(ifp, NULL);
1199	if (error != 0) {
1200		printf("%s: ifnet_attach failed %d\n", __func__, error);
1201		goto done;
1202	}
1203
1204	error = ifnet_set_lladdr_and_type(ifp, eaddr, ETHER_ADDR_LEN,
1205	    IFT_ETHER);
1206	if (error != 0) {
1207		printf("%s: ifnet_set_lladdr_and_type failed %d\n", __func__,
1208		    error);
1209		goto done;
1210	}
1211
1212#if APPLE_BRIDGE_HWCKSUM_SUPPORT
1213	/*
1214	 * APPLE MODIFICATION - our bridge can support HW checksums
1215	 * (useful if underlying interfaces support them) on TX,
1216	 * RX is not that interesting, since the stack just looks to
1217	 * see if the packet has been checksummed already (I think)
1218	 * but we might as well indicate we support it
1219	 */
1220	ifp->if_capabilities =
1221	    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx |
1222	    IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx;
1223#endif
1224
1225#if BRIDGESTP
1226	bstp_attach(&sc->sc_stp, &bridge_ops);
1227#endif /* BRIDGESTP */
1228
1229	lck_mtx_lock(bridge_list_mtx);
1230	LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
1231	lck_mtx_unlock(bridge_list_mtx);
1232
1233	/* attach as ethernet */
1234	error = bpf_attach(ifp, DLT_EN10MB, sizeof (struct ether_header),
1235	    NULL, NULL);
1236
1237done:
1238	if (error != 0) {
1239		printf("%s failed error %d\n", __func__, error);
1240		/* Cleanup TBD */
1241	}
1242
1243	return (error);
1244}
1245
1246/*
1247 * bridge_clone_destroy:
1248 *
1249 *	Destroy a bridge instance.
1250 */
1251static int
1252bridge_clone_destroy(struct ifnet *ifp)
1253{
1254	struct bridge_softc *sc = ifp->if_softc;
1255	struct bridge_iflist *bif;
1256	errno_t error;
1257
1258	BRIDGE_LOCK(sc);
1259	if ((sc->sc_flags & SCF_DETACHING)) {
1260		BRIDGE_UNLOCK(sc);
1261		return (0);
1262	}
1263	sc->sc_flags |= SCF_DETACHING;
1264
1265	bridge_ifstop(ifp, 1);
1266
1267	error = ifnet_set_flags(ifp, 0, IFF_UP);
1268	if (error != 0) {
1269		printf("%s: ifnet_set_flags failed %d\n", __func__, error);
1270	}
1271
1272	while ((bif = TAILQ_FIRST(&sc->sc_iflist)) != NULL)
1273		bridge_delete_member(sc, bif, 0);
1274
1275	while ((bif = TAILQ_FIRST(&sc->sc_spanlist)) != NULL) {
1276		bridge_delete_span(sc, bif);
1277	}
1278
1279	BRIDGE_UNLOCK(sc);
1280
1281	error = ifnet_detach(ifp);
1282	if (error != 0) {
1283		panic("bridge_clone_destroy: ifnet_detach(%p) failed %d\n",
1284		    ifp, error);
1285		if ((sc = (struct bridge_softc *)ifnet_softc(ifp)) != NULL) {
1286			BRIDGE_LOCK(sc);
1287			sc->sc_flags &= ~SCF_DETACHING;
1288			BRIDGE_UNLOCK(sc);
1289		}
1290		return (0);
1291	}
1292
1293	return (0);
1294}
1295
1296#define DRVSPEC do { \
1297	if (ifd->ifd_cmd >= bridge_control_table_size) {		\
1298		error = EINVAL;						\
1299		break;							\
1300	}								\
1301	bc = &bridge_control_table[ifd->ifd_cmd];			\
1302									\
1303	if (cmd == SIOCGDRVSPEC &&					\
1304	    (bc->bc_flags & BC_F_COPYOUT) == 0) {			\
1305		error = EINVAL;						\
1306		break;							\
1307	} else if (cmd == SIOCSDRVSPEC &&				\
1308	    (bc->bc_flags & BC_F_COPYOUT) != 0) {			\
1309		error = EINVAL;						\
1310		break;							\
1311	}								\
1312									\
1313	if (bc->bc_flags & BC_F_SUSER) {				\
1314		error = kauth_authorize_generic(kauth_cred_get(),	\
1315		    KAUTH_GENERIC_ISSUSER);				\
1316		if (error)						\
1317			break;						\
1318	}								\
1319									\
1320	if (ifd->ifd_len != bc->bc_argsize ||				\
1321	    ifd->ifd_len > sizeof (args)) {				\
1322		error = EINVAL;						\
1323		break;							\
1324	}								\
1325									\
1326	bzero(&args, sizeof (args));					\
1327	if (bc->bc_flags & BC_F_COPYIN) {				\
1328		error = copyin(ifd->ifd_data, &args, ifd->ifd_len);	\
1329		if (error)						\
1330			break;						\
1331	}								\
1332									\
1333	BRIDGE_LOCK(sc);						\
1334	error = (*bc->bc_func)(sc, &args);				\
1335	BRIDGE_UNLOCK(sc);						\
1336	if (error)							\
1337		break;							\
1338									\
1339	if (bc->bc_flags & BC_F_COPYOUT)				\
1340		error = copyout(&args, ifd->ifd_data, ifd->ifd_len);	\
1341} while (0)
1342
1343
1344/*
1345 * bridge_ioctl:
1346 *
1347 *	Handle a control request from the operator.
1348 */
1349static errno_t
1350bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1351{
1352	struct bridge_softc *sc = ifp->if_softc;
1353	struct ifreq *ifr = (struct ifreq *)data;
1354	int error = 0;
1355
1356	lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
1357
1358#if BRIDGE_DEBUG
1359	if (if_bridge_debug)
1360		printf("%s: ifp %p cmd 0x%08lx (%c%c [%lu] %c %lu)\n",
1361		    __func__, ifp, cmd, (cmd & IOC_IN) ? 'I' : ' ',
1362		    (cmd & IOC_OUT) ? 'O' : ' ', IOCPARM_LEN(cmd),
1363		    (char)IOCGROUP(cmd), cmd & 0xff);
1364#endif
1365
1366	switch (cmd) {
1367
1368	case SIOCSIFADDR:
1369	case SIOCAIFADDR:
1370		ifnet_set_flags(ifp, IFF_UP, IFF_UP);
1371		break;
1372
1373	case SIOCGIFMEDIA32:
1374	case SIOCGIFMEDIA64:
1375		error = EINVAL;
1376		break;
1377
1378	case SIOCADDMULTI:
1379	case SIOCDELMULTI:
1380		break;
1381
1382	case SIOCSDRVSPEC32:
1383	case SIOCGDRVSPEC32: {
1384		union {
1385			struct ifbreq ifbreq;
1386			struct ifbifconf32 ifbifconf;
1387			struct ifbareq32 ifbareq;
1388			struct ifbaconf32 ifbaconf;
1389			struct ifbrparam ifbrparam;
1390			struct ifbropreq32 ifbropreq;
1391		} args;
1392		struct ifdrv32 *ifd = (struct ifdrv32 *)data;
1393		const struct bridge_control *bridge_control_table =
1394		    bridge_control_table32, *bc;
1395
1396		DRVSPEC;
1397
1398		break;
1399	}
1400	case SIOCSDRVSPEC64:
1401	case SIOCGDRVSPEC64: {
1402		union {
1403			struct ifbreq ifbreq;
1404			struct ifbifconf64 ifbifconf;
1405			struct ifbareq64 ifbareq;
1406			struct ifbaconf64 ifbaconf;
1407			struct ifbrparam ifbrparam;
1408			struct ifbropreq64 ifbropreq;
1409		} args;
1410		struct ifdrv64 *ifd = (struct ifdrv64 *)data;
1411		const struct bridge_control *bridge_control_table =
1412		    bridge_control_table64, *bc;
1413
1414		DRVSPEC;
1415
1416		break;
1417	}
1418
1419	case SIOCSIFFLAGS:
1420		if (!(ifp->if_flags & IFF_UP) &&
1421		    (ifp->if_flags & IFF_RUNNING)) {
1422			/*
1423			 * If interface is marked down and it is running,
1424			 * then stop and disable it.
1425			 */
1426			BRIDGE_LOCK(sc);
1427			bridge_ifstop(ifp, 1);
1428			BRIDGE_UNLOCK(sc);
1429		} else if ((ifp->if_flags & IFF_UP) &&
1430		    !(ifp->if_flags & IFF_RUNNING)) {
1431			/*
1432			 * If interface is marked up and it is stopped, then
1433			 * start it.
1434			 */
1435			BRIDGE_LOCK(sc);
1436			error = bridge_init(ifp);
1437			BRIDGE_UNLOCK(sc);
1438		}
1439		break;
1440
1441	case SIOCSIFLLADDR:
1442		error = ifnet_set_lladdr(ifp, ifr->ifr_addr.sa_data,
1443		    ifr->ifr_addr.sa_len);
1444		if (error != 0)
1445			printf("%s: ifnet_set_lladdr failed %d\n", __func__,
1446			    error);
1447		break;
1448
1449	case SIOCSIFMTU:
1450		/* Do not allow the MTU to be changed on the bridge */
1451		error = EINVAL;
1452		break;
1453
1454	default:
1455		error = ether_ioctl(ifp, cmd, data);
1456#if BRIDGE_DEBUG
1457		if (error != 0 && error != EOPNOTSUPP)
1458			printf("%s: ether_ioctl ifp %p cmd 0x%08lx "
1459			    "(%c%c [%lu] %c %lu) failed error: %d\n",
1460			    __func__, ifp, cmd, (cmd & IOC_IN) ? 'I' : ' ',
1461			    (cmd & IOC_OUT) ? 'O' : ' ',
1462			    IOCPARM_LEN(cmd), (char)IOCGROUP(cmd),
1463			    cmd & 0xff, error);
1464#endif /* BRIDGE_DEBUG */
1465		break;
1466	}
1467	lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
1468
1469	return (error);
1470}
1471
1472#if HAS_IF_CAP
1473/*
1474 * bridge_mutecaps:
1475 *
1476 *	Clear or restore unwanted capabilities on the member interface
1477 */
1478static void
1479bridge_mutecaps(struct bridge_softc *sc)
1480{
1481	struct bridge_iflist *bif;
1482	int enabled, mask;
1483
1484	/* Initial bitmask of capabilities to test */
1485	mask = BRIDGE_IFCAPS_MASK;
1486
1487	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1488		/* Every member must support it or its disabled */
1489		mask &= bif->bif_savedcaps;
1490	}
1491
1492	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1493		enabled = bif->bif_ifp->if_capenable;
1494		enabled &= ~BRIDGE_IFCAPS_STRIP;
1495		/* strip off mask bits and enable them again if allowed */
1496		enabled &= ~BRIDGE_IFCAPS_MASK;
1497		enabled |= mask;
1498
1499		bridge_set_ifcap(sc, bif, enabled);
1500	}
1501
1502}
1503
1504static void
1505bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
1506{
1507	struct ifnet *ifp = bif->bif_ifp;
1508	struct ifreq ifr;
1509	int error;
1510
1511	bzero(&ifr, sizeof (ifr));
1512	ifr.ifr_reqcap = set;
1513
1514	if (ifp->if_capenable != set) {
1515		IFF_LOCKGIANT(ifp);
1516		error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
1517		IFF_UNLOCKGIANT(ifp);
1518		if (error)
1519			printf("%s: error setting interface capabilities "
1520			    "on %s\n", __func__, ifnet_name(sc->sc_ifp),
1521			    ifnet_unit(sc->sc_ifp), ifp->if_xname);
1522	}
1523}
1524#endif /* HAS_IF_CAP */
1525
1526/*
1527 * bridge_lookup_member:
1528 *
1529 *	Lookup a bridge member interface.
1530 */
1531static struct bridge_iflist *
1532bridge_lookup_member(struct bridge_softc *sc, const char *name)
1533{
1534	struct bridge_iflist *bif;
1535	struct ifnet *ifp;
1536	char if_xname[IFNAMSIZ];
1537
1538	BRIDGE_LOCK_ASSERT(sc);
1539
1540	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1541		ifp = bif->bif_ifp;
1542		snprintf(if_xname, sizeof (if_xname), "%s%d",
1543                 ifnet_name(ifp), ifnet_unit(ifp));
1544		if (strncmp(if_xname, name, sizeof (if_xname)) == 0)
1545			return (bif);
1546	}
1547
1548	return (NULL);
1549}
1550
1551/*
1552 * bridge_lookup_member_if:
1553 *
1554 *	Lookup a bridge member interface by ifnet*.
1555 */
1556static struct bridge_iflist *
1557bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1558{
1559	struct bridge_iflist *bif;
1560
1561	BRIDGE_LOCK_ASSERT(sc);
1562
1563	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1564		if (bif->bif_ifp == member_ifp)
1565			return (bif);
1566	}
1567
1568	return (NULL);
1569}
1570
1571static errno_t
1572bridge_iff_input(void *cookie, ifnet_t ifp, __unused protocol_family_t protocol,
1573    mbuf_t *data, char **frame_ptr)
1574{
1575	errno_t error = 0;
1576	struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
1577	struct bridge_softc *sc = bif->bif_sc;
1578	int included = 0;
1579	size_t frmlen = 0;
1580	mbuf_t m = *data;
1581
1582	if ((m->m_flags & M_PROTO1))
1583		goto out;
1584
1585	if (*frame_ptr >= (char *)mbuf_datastart(m) &&
1586	    *frame_ptr <= (char *)mbuf_data(m)) {
1587		included = 1;
1588		frmlen = (char *)mbuf_data(m) - *frame_ptr;
1589	}
1590#if BRIDGE_DEBUG
1591	if (if_bridge_debug) {
1592		printf("%s: %s%d from %s%d m %p data %p frame %p %s "
1593		    "frmlen %lu\n", __func__, ifnet_name(sc->sc_ifp),
1594		    ifnet_unit(sc->sc_ifp), ifnet_name(ifp), ifnet_unit(ifp),
1595		    m, mbuf_data(m), *frame_ptr,
1596		    included ? "inside" : "outside", frmlen);
1597
1598		if (if_bridge_debug > 1) {
1599			printf_mbuf(m, "bridge_iff_input[", "\n");
1600			printf_ether_header((struct ether_header *)
1601			    (void *)*frame_ptr);
1602			printf_mbuf_data(m, 0, 20);
1603			printf("\n");
1604		}
1605	}
1606#endif /* BRIDGE_DEBUG */
1607
1608	/* Move data pointer to start of frame to the link layer header */
1609	if (included) {
1610		(void) mbuf_setdata(m, (char *)mbuf_data(m) - frmlen,
1611		    mbuf_len(m) + frmlen);
1612		(void) mbuf_pkthdr_adjustlen(m, frmlen);
1613	} else {
1614		printf("%s: frame_ptr outside mbuf\n", __func__);
1615		goto out;
1616	}
1617
1618	error = bridge_input(ifp, m, *frame_ptr);
1619
1620	/* Adjust packet back to original */
1621	if (error == 0) {
1622		(void) mbuf_setdata(m, (char *)mbuf_data(m) + frmlen,
1623		    mbuf_len(m) - frmlen);
1624		(void) mbuf_pkthdr_adjustlen(m, -frmlen);
1625	}
1626#if BRIDGE_DEBUG
1627	if (if_bridge_debug > 1) {
1628		printf("\n");
1629		printf_mbuf(m, "bridge_iff_input]", "\n");
1630	}
1631#endif /* BRIDGE_DEBUG */
1632
1633out:
1634	lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
1635
1636	return (error);
1637}
1638
1639
1640#if BRIDGE_MEMBER_OUT_FILTER
1641static errno_t
1642bridge_iff_output(void *cookie, ifnet_t ifp, __unused protocol_family_t protocol, mbuf_t *data)
1643{
1644	errno_t error = 0;
1645	struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
1646	struct bridge_softc *sc = bif->bif_sc;
1647	mbuf_t m = *data;
1648
1649	if ((m->m_flags & M_PROTO1))
1650		goto out;
1651
1652#if BRIDGE_DEBUG
1653	if (if_bridge_debug) {
1654		printf("%s: %s%d from %s%d m %p data %p\n", __func__,
1655		    ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp),
1656		    ifnet_name(ifp), ifnet_unit(ifp), m, mbuf_data(m));
1657	}
1658#endif /* BRIDGE_DEBUG */
1659
1660	error = bridge_member_output(sc, ifp, m);
1661	if (error != 0) {
1662		printf("%s: bridge_member_output failed error %d\n", __func__,
1663		    error);
1664	}
1665
1666out:
1667	lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
1668
1669	return (error);
1670}
1671#endif /* BRIDGE_MEMBER_OUT_FILTER */
1672
1673
1674static void
1675bridge_iff_event(void *cookie, ifnet_t ifp, __unused protocol_family_t protocol,
1676    const struct kev_msg *event_msg)
1677{
1678	struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
1679
1680	if (event_msg->vendor_code == KEV_VENDOR_APPLE &&
1681		event_msg->kev_class == KEV_NETWORK_CLASS &&
1682		event_msg->kev_subclass == KEV_DL_SUBCLASS) {
1683		switch (event_msg->event_code) {
1684			case KEV_DL_IF_DETACHING:
1685			case KEV_DL_IF_DETACHED:
1686				bridge_ifdetach(bif, ifp);
1687				break;
1688
1689			case KEV_DL_LINK_OFF:
1690			case KEV_DL_LINK_ON: {
1691#if BRIDGESTP
1692				bstp_linkstate(ifp, event_msg->event_code);
1693#endif /* BRIDGESTP */
1694				break;
1695			}
1696
1697			case KEV_DL_SIFFLAGS: {
1698				if (bif->bif_promisc == 0 &&
1699				    (ifp->if_flags & IFF_UP)) {
1700					errno_t error =
1701					    ifnet_set_promiscuous(ifp, 1);
1702					if (error != 0) {
1703						printf("%s: "
1704						    "ifnet_set_promiscuous"
1705						    "(%s%d) failed %d\n",
1706						    __func__, ifnet_name(ifp),
1707						    ifnet_unit(ifp), error);
1708					} else {
1709						bif->bif_promisc = 1;
1710					}
1711				}
1712				break;
1713			}
1714
1715			default:
1716				break;
1717		}
1718	}
1719}
1720
1721/*
1722 * bridge_iff_detached:
1723 *
1724 *	Detach an interface from a bridge.  Called when a member
1725 *	interface is detaching.
1726 */
1727static void
1728bridge_iff_detached(void *cookie, __unused ifnet_t ifp)
1729{
1730	struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
1731
1732#if BRIDGE_DEBUG
1733	printf("%s: %s%d\n", __func__, ifnet_name(ifp), ifnet_unit(ifp));
1734#endif
1735
1736	bridge_ifdetach(bif, ifp);
1737
1738	_FREE(bif, M_DEVBUF);
1739}
1740
1741static errno_t
1742bridge_proto_input(ifnet_t ifp, __unused protocol_family_t protocol,
1743    __unused mbuf_t packet, __unused char *header)
1744{
1745	printf("%s: unexpected packet from %s%d\n", __func__,
1746	    ifnet_name(ifp), ifnet_unit(ifp));
1747	return (0);
1748}
1749
1750static int
1751bridge_attach_protocol(struct ifnet *ifp)
1752{
1753	int	error;
1754	struct ifnet_attach_proto_param	reg;
1755
1756	printf("%s: %s%d\n", __func__, ifnet_name(ifp), ifnet_unit(ifp));
1757
1758	bzero(&reg, sizeof (reg));
1759	reg.input = bridge_proto_input;
1760
1761	error = ifnet_attach_protocol(ifp, PF_BRIDGE, &reg);
1762	if (error)
1763		printf("%s: ifnet_attach_protocol(%s%d) failed, %d\n",
1764		    __func__, ifnet_name(ifp), ifnet_unit(ifp), error);
1765
1766	return (error);
1767}
1768
1769static int
1770bridge_detach_protocol(struct ifnet *ifp)
1771{
1772	int         error;
1773
1774	printf("%s: %s%d\n", __func__, ifnet_name(ifp), ifnet_unit(ifp));
1775
1776	error = ifnet_detach_protocol(ifp, PF_BRIDGE);
1777	if (error)
1778		printf("%s: ifnet_detach_protocol(%s%d) failed, %d\n",
1779		    __func__, ifnet_name(ifp), ifnet_unit(ifp), error);
1780
1781	return (error);
1782}
1783
1784/*
1785 * bridge_delete_member:
1786 *
1787 *	Delete the specified member interface.
1788 */
1789static void
1790bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
1791    int gone)
1792{
1793	struct ifnet *ifs = bif->bif_ifp;
1794
1795	BRIDGE_LOCK_ASSERT(sc);
1796
1797	if (!gone) {
1798		switch (ifs->if_type) {
1799		case IFT_ETHER:
1800		case IFT_L2VLAN:
1801			/*
1802			 * Take the interface out of promiscuous mode.
1803			 */
1804			if (bif->bif_promisc)
1805				(void) ifnet_set_promiscuous(ifs, 0);
1806			break;
1807
1808		case IFT_GIF:
1809			break;
1810
1811		default:
1812#ifdef DIAGNOSTIC
1813			panic("bridge_delete_member: impossible");
1814#endif
1815			break;
1816		}
1817
1818#if HAS_IF_CAP
1819		/* reneable any interface capabilities */
1820		bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
1821#endif
1822	}
1823
1824	if (bif->bif_proto_attached) {
1825		/* Respect lock ordering with DLIL lock */
1826		BRIDGE_UNLOCK(sc);
1827		(void) bridge_detach_protocol(ifs);
1828		BRIDGE_LOCK(sc);
1829	}
1830#if BRIDGESTP
1831	if (bif->bif_flags & IFBIF_STP)
1832		bstp_disable(&bif->bif_stp);
1833#endif /* BRIDGESTP */
1834
1835	ifs->if_bridge = NULL;
1836	BRIDGE_XLOCK(sc);
1837	TAILQ_REMOVE(&sc->sc_iflist, bif, bif_next);
1838	BRIDGE_XDROP(sc);
1839
1840	ifnet_release(ifs);
1841
1842#if HAS_IF_CAP
1843	bridge_mutecaps(sc);	/* recalcuate now this interface is removed */
1844#endif /* HAS_IF_CAP */
1845	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
1846	KASSERT(bif->bif_addrcnt == 0,
1847	    ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
1848
1849#if BRIDGESTP
1850	BRIDGE_UNLOCK(sc);
1851	bstp_destroy(&bif->bif_stp);	/* prepare to free */
1852	BRIDGE_LOCK(sc);
1853#endif /* BRIDGESTP */
1854
1855	if (bif->bif_filter_attached) {
1856		/* Respect lock ordering with DLIL lock */
1857		BRIDGE_UNLOCK(sc);
1858		iflt_detach(bif->bif_iff_ref);
1859		BRIDGE_LOCK(sc);
1860	} else {
1861		_FREE(bif, M_DEVBUF);
1862	}
1863}
1864
1865/*
1866 * bridge_delete_span:
1867 *
1868 *	Delete the specified span interface.
1869 */
1870static void
1871bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1872{
1873	BRIDGE_LOCK_ASSERT(sc);
1874
1875	KASSERT(bif->bif_ifp->if_bridge == NULL,
1876	    ("%s: not a span interface", __func__));
1877
1878	ifnet_release(bif->bif_ifp);
1879
1880	TAILQ_REMOVE(&sc->sc_spanlist, bif, bif_next);
1881	_FREE(bif, M_DEVBUF);
1882}
1883
1884static int
1885bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1886{
1887	struct ifbreq *req = arg;
1888	struct bridge_iflist *bif = NULL;
1889	struct ifnet *ifs;
1890	int error = 0;
1891	struct iff_filter iff;
1892
1893	ifs = ifunit(req->ifbr_ifsname);
1894	if (ifs == NULL)
1895		return (ENOENT);
1896	if (ifs->if_ioctl == NULL)	/* must be supported */
1897		return (EINVAL);
1898
1899	/* If it's in the span list, it can't be a member. */
1900	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
1901		if (ifs == bif->bif_ifp)
1902			return (EBUSY);
1903
1904	/* Allow the first Ethernet member to define the MTU */
1905	if (ifs->if_type != IFT_GIF) {
1906		if (TAILQ_EMPTY(&sc->sc_iflist))
1907			sc->sc_ifp->if_mtu = ifs->if_mtu;
1908		else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
1909			printf("%s: %s%d: invalid MTU for %s%d", __func__,
1910			    ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp),
1911			    ifnet_name(ifs), ifnet_unit(ifs));
1912			return (EINVAL);
1913		}
1914	}
1915
1916	if (ifs->if_bridge == sc)
1917		return (EEXIST);
1918
1919	if (ifs->if_bridge != NULL)
1920		return (EBUSY);
1921
1922	bif = _MALLOC(sizeof (*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1923	if (bif == NULL)
1924		return (ENOMEM);
1925
1926	bif->bif_ifp = ifs;
1927	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
1928#if HAS_IF_CAP
1929	bif->bif_savedcaps = ifs->if_capenable;
1930#endif /* HAS_IF_CAP */
1931	bif->bif_sc = sc;
1932
1933	ifnet_reference(ifs);
1934
1935	ifs->if_bridge = sc;
1936#if BRIDGESTP
1937	bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
1938#endif /* BRIDGESTP */
1939	/*
1940	 * XXX: XLOCK HERE!?!
1941	 */
1942	TAILQ_INSERT_TAIL(&sc->sc_iflist, bif, bif_next);
1943
1944#if HAS_IF_CAP
1945	/* Set interface capabilities to the intersection set of all members */
1946	bridge_mutecaps(sc);
1947#endif /* HAS_IF_CAP */
1948
1949
1950	switch (ifs->if_type) {
1951	case IFT_ETHER:
1952	case IFT_L2VLAN:
1953		/*
1954		 * Place the interface into promiscuous mode.
1955		 */
1956		error = ifnet_set_promiscuous(ifs, 1);
1957		if (error) {
1958			/* Ignore error when device is not up */
1959			if (error != ENETDOWN)
1960				goto out;
1961			error = 0;
1962		} else {
1963			bif->bif_promisc = 1;
1964		}
1965		break;
1966
1967	case IFT_GIF:
1968		break;
1969
1970	default:
1971		error = EINVAL;
1972		goto out;
1973	}
1974
1975	/*
1976	 * Respect lock ordering with DLIL lock for the following operations
1977	 */
1978	BRIDGE_UNLOCK(sc);
1979
1980	/*
1981	 * install an interface filter
1982	 */
1983	memset(&iff, 0, sizeof (struct iff_filter));
1984	iff.iff_cookie = bif;
1985	iff.iff_name = "com.apple.kernel.bsd.net.if_bridge";
1986	iff.iff_input = bridge_iff_input;
1987#if BRIDGE_MEMBER_OUT_FILTER
1988	iff.iff_output = bridge_iff_output;
1989#endif /* BRIDGE_MEMBER_OUT_FILTER */
1990	iff.iff_event = bridge_iff_event;
1991	iff.iff_detached = bridge_iff_detached;
1992	error = iflt_attach(ifs, &iff, &bif->bif_iff_ref);
1993	if (error != 0) {
1994		printf("%s: iflt_attach failed %d\n", __func__, error);
1995		BRIDGE_LOCK(sc);
1996		goto out;
1997	}
1998	bif->bif_filter_attached = 1;
1999
2000	/*
2001	 * install an dummy "bridge" protocol
2002	 */
2003	if ((error = bridge_attach_protocol(ifs)) != 0) {
2004		if (error != 0) {
2005			printf("%s: bridge_attach_protocol failed %d\n",
2006			    __func__, error);
2007			BRIDGE_LOCK(sc);
2008			goto out;
2009		}
2010	}
2011	bif->bif_proto_attached = 1;
2012
2013	BRIDGE_LOCK(sc);
2014
2015out:
2016	if (error && bif != NULL)
2017		bridge_delete_member(sc, bif, 1);
2018
2019	return (error);
2020}
2021
2022static int
2023bridge_ioctl_del(struct bridge_softc *sc, void *arg)
2024{
2025	struct ifbreq *req = arg;
2026	struct bridge_iflist *bif;
2027
2028	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2029	if (bif == NULL)
2030		return (ENOENT);
2031
2032	bridge_delete_member(sc, bif, 0);
2033
2034	return (0);
2035}
2036
2037static int
2038bridge_ioctl_purge(__unused struct bridge_softc *sc, __unused void *arg)
2039{
2040	return (0);
2041}
2042
2043static int
2044bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
2045{
2046	struct ifbreq *req = arg;
2047	struct bridge_iflist *bif;
2048	struct bstp_port *bp;
2049
2050	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2051	if (bif == NULL)
2052		return (ENOENT);
2053
2054	bp = &bif->bif_stp;
2055	req->ifbr_ifsflags = bif->bif_flags;
2056	req->ifbr_state = bp->bp_state;
2057	req->ifbr_priority = bp->bp_priority;
2058	req->ifbr_path_cost = bp->bp_path_cost;
2059	req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
2060	req->ifbr_proto = bp->bp_protover;
2061	req->ifbr_role = bp->bp_role;
2062	req->ifbr_stpflags = bp->bp_flags;
2063	req->ifbr_addrcnt = bif->bif_addrcnt;
2064	req->ifbr_addrmax = bif->bif_addrmax;
2065	req->ifbr_addrexceeded = bif->bif_addrexceeded;
2066
2067	/* Copy STP state options as flags */
2068	if (bp->bp_operedge)
2069		req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
2070	if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
2071		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
2072	if (bp->bp_ptp_link)
2073		req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
2074	if (bp->bp_flags & BSTP_PORT_AUTOPTP)
2075		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
2076	if (bp->bp_flags & BSTP_PORT_ADMEDGE)
2077		req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
2078	if (bp->bp_flags & BSTP_PORT_ADMCOST)
2079		req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
2080	return (0);
2081}
2082
2083static int
2084bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
2085{
2086	struct ifbreq *req = arg;
2087	struct bridge_iflist *bif;
2088#if BRIDGESTP
2089	struct bstp_port *bp;
2090	int error;
2091#endif /* BRIDGESTP */
2092
2093	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2094	if (bif == NULL)
2095		return (ENOENT);
2096
2097	if (req->ifbr_ifsflags & IFBIF_SPAN)
2098		/* SPAN is readonly */
2099		return (EINVAL);
2100
2101
2102#if BRIDGESTP
2103	if (req->ifbr_ifsflags & IFBIF_STP) {
2104		if ((bif->bif_flags & IFBIF_STP) == 0) {
2105			error = bstp_enable(&bif->bif_stp);
2106			if (error)
2107				return (error);
2108		}
2109	} else {
2110		if ((bif->bif_flags & IFBIF_STP) != 0)
2111			bstp_disable(&bif->bif_stp);
2112	}
2113
2114	/* Pass on STP flags */
2115	bp = &bif->bif_stp;
2116	bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
2117	bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
2118	bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
2119	bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
2120#else /* !BRIDGESTP */
2121	if (req->ifbr_ifsflags & IFBIF_STP)
2122		return (EOPNOTSUPP);
2123#endif /* !BRIDGESTP */
2124
2125	/* Save the bits relating to the bridge */
2126	bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
2127
2128
2129	return (0);
2130}
2131
2132static int
2133bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
2134{
2135	struct ifbrparam *param = arg;
2136
2137	sc->sc_brtmax = param->ifbrp_csize;
2138	bridge_rttrim(sc);
2139
2140	return (0);
2141}
2142
2143static int
2144bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
2145{
2146	struct ifbrparam *param = arg;
2147
2148	param->ifbrp_csize = sc->sc_brtmax;
2149
2150	return (0);
2151}
2152
2153
2154#define BRIDGE_IOCTL_GIFS do { \
2155	struct bridge_iflist *bif;					\
2156	struct ifbreq breq;						\
2157	char *buf, *outbuf;						\
2158	unsigned int count, buflen, len;				\
2159									\
2160	count = 0;							\
2161	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next)			\
2162		count++;						\
2163	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)			\
2164		count++;						\
2165									\
2166	buflen = sizeof (breq) * count;					\
2167	if (bifc->ifbic_len == 0) {					\
2168		bifc->ifbic_len = buflen;				\
2169		return (0);						\
2170	}								\
2171	BRIDGE_UNLOCK(sc);						\
2172	outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO);		\
2173	BRIDGE_LOCK(sc);						\
2174									\
2175	count = 0;							\
2176	buf = outbuf;							\
2177	len = min(bifc->ifbic_len, buflen);				\
2178	bzero(&breq, sizeof (breq));					\
2179	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {			\
2180		if (len < sizeof (breq))				\
2181			break;						\
2182									\
2183		snprintf(breq.ifbr_ifsname, sizeof (breq.ifbr_ifsname),	\
2184		    "%s%d", ifnet_name(bif->bif_ifp),			\
2185		    ifnet_unit(bif->bif_ifp));				\
2186		/* Fill in the ifbreq structure */			\
2187		error = bridge_ioctl_gifflags(sc, &breq);		\
2188		if (error)						\
2189			break;						\
2190		memcpy(buf, &breq, sizeof (breq));			\
2191		count++;						\
2192		buf += sizeof (breq);					\
2193		len -= sizeof (breq);					\
2194	}								\
2195	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) {		\
2196		if (len < sizeof (breq))				\
2197			break;						\
2198									\
2199		snprintf(breq.ifbr_ifsname, sizeof (breq.ifbr_ifsname),	\
2200		    "%s%d", ifnet_name(bif->bif_ifp),			\
2201		    ifnet_unit(bif->bif_ifp));				\
2202		breq.ifbr_ifsflags = bif->bif_flags;			\
2203		breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;	\
2204		memcpy(buf, &breq, sizeof (breq));			\
2205		count++;						\
2206		buf += sizeof (breq);					\
2207		len -= sizeof (breq);					\
2208	}								\
2209									\
2210	BRIDGE_UNLOCK(sc);						\
2211	bifc->ifbic_len = sizeof (breq) * count;			\
2212	error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);	\
2213	BRIDGE_LOCK(sc);						\
2214	_FREE(outbuf, M_TEMP);						\
2215} while (0)
2216
2217static int
2218bridge_ioctl_gifs64(struct bridge_softc *sc, void *arg)
2219{
2220	struct ifbifconf64 *bifc = arg;
2221	int error = 0;
2222
2223	BRIDGE_IOCTL_GIFS;
2224
2225	return (error);
2226}
2227
2228static int
2229bridge_ioctl_gifs32(struct bridge_softc *sc, void *arg)
2230{
2231	struct ifbifconf32 *bifc = arg;
2232	int error = 0;
2233
2234	BRIDGE_IOCTL_GIFS;
2235
2236	return (error);
2237}
2238
2239
2240#define BRIDGE_IOCTL_RTS do {						    \
2241	struct bridge_rtnode *brt;					    \
2242	char *buf, *outbuf;						    \
2243	unsigned int count, buflen, len;				    \
2244	struct timespec now;						    \
2245									    \
2246	if (bac->ifbac_len == 0)					    \
2247		return (0);						    \
2248									    \
2249	count = 0;							    \
2250	LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)			    \
2251		count++;						    \
2252	buflen = sizeof (bareq) * count;				    \
2253									    \
2254	BRIDGE_UNLOCK(sc);						    \
2255	outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO);		    \
2256	BRIDGE_LOCK(sc);						    \
2257									    \
2258	count = 0;							    \
2259	buf = outbuf;							    \
2260	len = min(bac->ifbac_len, buflen);				    \
2261	bzero(&bareq, sizeof (bareq));					    \
2262	LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {			    \
2263		if (len < sizeof (bareq))				    \
2264			goto out;					    \
2265		snprintf(bareq.ifba_ifsname, sizeof (bareq.ifba_ifsname),    \
2266		    "%s%d", ifnet_name(brt->brt_ifp),			    \
2267		    ifnet_unit(brt->brt_ifp));				    \
2268		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof (brt->brt_addr)); \
2269		bareq.ifba_vlan = brt->brt_vlan;			    \
2270		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {   \
2271			nanouptime(&now);				    \
2272			if ((unsigned long)now.tv_sec < brt->brt_expire)    \
2273				bareq.ifba_expire =			    \
2274				    brt->brt_expire - now.tv_sec;	    \
2275		} else							    \
2276			bareq.ifba_expire = 0;				    \
2277		bareq.ifba_flags = brt->brt_flags;			    \
2278									    \
2279		memcpy(buf, &bareq, sizeof (bareq));			    \
2280		count++;						    \
2281		buf += sizeof (bareq);					    \
2282		len -= sizeof (bareq);					    \
2283	}								    \
2284out:									    \
2285	BRIDGE_UNLOCK(sc);						    \
2286	bac->ifbac_len = sizeof (bareq) * count;				    \
2287	error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);	    \
2288	BRIDGE_LOCK(sc);						    \
2289	_FREE(outbuf, M_TEMP);						    \
2290	return (error);							    \
2291} while (0)
2292
2293static int
2294bridge_ioctl_rts64(struct bridge_softc *sc, void *arg)
2295{
2296	struct ifbaconf64 *bac = arg;
2297	struct ifbareq64 bareq;
2298	int error = 0;
2299
2300	BRIDGE_IOCTL_RTS;
2301
2302	return (error);
2303}
2304
2305static int
2306bridge_ioctl_rts32(struct bridge_softc *sc, void *arg)
2307{
2308	struct ifbaconf32 *bac = arg;
2309	struct ifbareq32 bareq;
2310	int error = 0;
2311
2312	BRIDGE_IOCTL_RTS;
2313
2314	return (error);
2315}
2316
2317static int
2318bridge_ioctl_saddr32(struct bridge_softc *sc, void *arg)
2319{
2320	struct ifbareq32 *req = arg;
2321	struct bridge_iflist *bif;
2322	int error;
2323
2324	bif = bridge_lookup_member(sc, req->ifba_ifsname);
2325	if (bif == NULL)
2326		return (ENOENT);
2327
2328	error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
2329	    req->ifba_flags);
2330
2331	return (error);
2332}
2333
2334static int
2335bridge_ioctl_saddr64(struct bridge_softc *sc, void *arg)
2336{
2337	struct ifbareq64 *req = arg;
2338	struct bridge_iflist *bif;
2339	int error;
2340
2341	bif = bridge_lookup_member(sc, req->ifba_ifsname);
2342	if (bif == NULL)
2343		return (ENOENT);
2344
2345	error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
2346	    req->ifba_flags);
2347
2348	return (error);
2349}
2350
2351static int
2352bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
2353{
2354	struct ifbrparam *param = arg;
2355
2356	sc->sc_brttimeout = param->ifbrp_ctime;
2357	return (0);
2358}
2359
2360static int
2361bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
2362{
2363	struct ifbrparam *param = arg;
2364
2365	param->ifbrp_ctime = sc->sc_brttimeout;
2366	return (0);
2367}
2368
2369static int
2370bridge_ioctl_daddr32(struct bridge_softc *sc, void *arg)
2371{
2372	struct ifbareq32 *req = arg;
2373
2374	return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
2375}
2376
2377static int
2378bridge_ioctl_daddr64(struct bridge_softc *sc, void *arg)
2379{
2380	struct ifbareq64 *req = arg;
2381
2382	return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
2383}
2384
2385static int
2386bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
2387{
2388	struct ifbreq *req = arg;
2389
2390	bridge_rtflush(sc, req->ifbr_ifsflags);
2391	return (0);
2392}
2393
2394static int
2395bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
2396{
2397	struct ifbrparam *param = arg;
2398	struct bstp_state *bs = &sc->sc_stp;
2399
2400	param->ifbrp_prio = bs->bs_bridge_priority;
2401	return (0);
2402}
2403
2404static int
2405bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
2406{
2407#if BRIDGESTP
2408	struct ifbrparam *param = arg;
2409
2410	return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
2411#else /* !BRIDGESTP */
2412#pragma unused(sc, arg)
2413	return (EOPNOTSUPP);
2414#endif /* !BRIDGESTP */
2415}
2416
2417static int
2418bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
2419{
2420	struct ifbrparam *param = arg;
2421	struct bstp_state *bs = &sc->sc_stp;
2422
2423	param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
2424	return (0);
2425}
2426
2427static int
2428bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
2429{
2430#if BRIDGESTP
2431	struct ifbrparam *param = arg;
2432
2433	return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
2434#else /* !BRIDGESTP */
2435#pragma unused(sc, arg)
2436	return (EOPNOTSUPP);
2437#endif /* !BRIDGESTP */
2438}
2439
2440static int
2441bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
2442{
2443	struct ifbrparam *param = arg;
2444	struct bstp_state *bs = &sc->sc_stp;
2445
2446	param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
2447	return (0);
2448}
2449
2450static int
2451bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
2452{
2453#if BRIDGESTP
2454	struct ifbrparam *param = arg;
2455
2456	return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
2457#else /* !BRIDGESTP */
2458#pragma unused(sc, arg)
2459	return (EOPNOTSUPP);
2460#endif /* !BRIDGESTP */
2461}
2462
2463static int
2464bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
2465{
2466	struct ifbrparam *param = arg;
2467	struct bstp_state *bs = &sc->sc_stp;
2468
2469	param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
2470	return (0);
2471}
2472
2473static int
2474bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
2475{
2476#if BRIDGESTP
2477	struct ifbrparam *param = arg;
2478
2479	return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
2480#else /* !BRIDGESTP */
2481#pragma unused(sc, arg)
2482	return (EOPNOTSUPP);
2483#endif /* !BRIDGESTP */
2484}
2485
2486static int
2487bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
2488{
2489#if BRIDGESTP
2490	struct ifbreq *req = arg;
2491	struct bridge_iflist *bif;
2492
2493	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2494	if (bif == NULL)
2495		return (ENOENT);
2496
2497	return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
2498#else /* !BRIDGESTP */
2499#pragma unused(sc, arg)
2500	return (EOPNOTSUPP);
2501#endif /* !BRIDGESTP */
2502}
2503
2504static int
2505bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
2506{
2507#if BRIDGESTP
2508	struct ifbreq *req = arg;
2509	struct bridge_iflist *bif;
2510
2511	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2512	if (bif == NULL)
2513		return (ENOENT);
2514
2515	return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
2516#else /* !BRIDGESTP */
2517#pragma unused(sc, arg)
2518	return (EOPNOTSUPP);
2519#endif /* !BRIDGESTP */
2520}
2521
2522static int
2523bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
2524{
2525	struct ifbrparam *param = arg;
2526
2527	param->ifbrp_filter = sc->sc_filter_flags;
2528
2529	return (0);
2530}
2531
2532static int
2533bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
2534{
2535	struct ifbrparam *param = arg;
2536
2537	if (param->ifbrp_filter & ~IFBF_FILT_MASK)
2538		return (EINVAL);
2539
2540#ifndef BRIDGE_IPF
2541	if (param->ifbrp_filter & IFBF_FILT_USEIPF)
2542		return (EINVAL);
2543#endif
2544
2545	sc->sc_filter_flags = param->ifbrp_filter;
2546
2547	return (0);
2548}
2549
2550static int
2551bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
2552{
2553	struct ifbreq *req = arg;
2554	struct bridge_iflist *bif;
2555
2556	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2557	if (bif == NULL)
2558		return (ENOENT);
2559
2560	bif->bif_addrmax = req->ifbr_addrmax;
2561	return (0);
2562}
2563
2564static int
2565bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
2566{
2567	struct ifbreq *req = arg;
2568	struct bridge_iflist *bif = NULL;
2569	struct ifnet *ifs;
2570
2571	ifs = ifunit(req->ifbr_ifsname);
2572	if (ifs == NULL)
2573		return (ENOENT);
2574
2575	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
2576		if (ifs == bif->bif_ifp)
2577			return (EBUSY);
2578
2579	if (ifs->if_bridge != NULL)
2580		return (EBUSY);
2581
2582	switch (ifs->if_type) {
2583		case IFT_ETHER:
2584		case IFT_GIF:
2585		case IFT_L2VLAN:
2586			break;
2587		default:
2588			return (EINVAL);
2589	}
2590
2591	bif = _MALLOC(sizeof (*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
2592	if (bif == NULL)
2593		return (ENOMEM);
2594
2595	bif->bif_ifp = ifs;
2596	bif->bif_flags = IFBIF_SPAN;
2597
2598	ifnet_reference(bif->bif_ifp);
2599
2600	TAILQ_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
2601
2602	return (0);
2603}
2604
2605static int
2606bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
2607{
2608	struct ifbreq *req = arg;
2609	struct bridge_iflist *bif;
2610	struct ifnet *ifs;
2611
2612	ifs = ifunit(req->ifbr_ifsname);
2613	if (ifs == NULL)
2614		return (ENOENT);
2615
2616	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
2617		if (ifs == bif->bif_ifp)
2618			break;
2619
2620	if (bif == NULL)
2621		return (ENOENT);
2622
2623	bridge_delete_span(sc, bif);
2624
2625	return (0);
2626}
2627
2628#define BRIDGE_IOCTL_GBPARAM do {					\
2629	struct bstp_state *bs = &sc->sc_stp;				\
2630	struct bstp_port *root_port;					\
2631									\
2632	req->ifbop_maxage = bs->bs_bridge_max_age >> 8;			\
2633	req->ifbop_hellotime = bs->bs_bridge_htime >> 8;		\
2634	req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;		\
2635									\
2636	root_port = bs->bs_root_port;					\
2637	if (root_port == NULL)						\
2638		req->ifbop_root_port = 0;				\
2639	else								\
2640		req->ifbop_root_port = root_port->bp_ifp->if_index;	\
2641									\
2642	req->ifbop_holdcount = bs->bs_txholdcount;			\
2643	req->ifbop_priority = bs->bs_bridge_priority;			\
2644	req->ifbop_protocol = bs->bs_protover;				\
2645	req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;		\
2646	req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;		\
2647	req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;		\
2648	req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;	\
2649	req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;	\
2650	req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;	\
2651} while (0)
2652
2653static int
2654bridge_ioctl_gbparam32(struct bridge_softc *sc, void *arg)
2655{
2656	struct ifbropreq32 *req = arg;
2657
2658	BRIDGE_IOCTL_GBPARAM;
2659
2660	return (0);
2661}
2662
2663static int
2664bridge_ioctl_gbparam64(struct bridge_softc *sc, void *arg)
2665{
2666	struct ifbropreq64 *req = arg;
2667
2668	BRIDGE_IOCTL_GBPARAM;
2669
2670	return (0);
2671}
2672
2673static int
2674bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
2675{
2676	struct ifbrparam *param = arg;
2677
2678	param->ifbrp_cexceeded = sc->sc_brtexceeded;
2679	return (0);
2680}
2681
2682#define BRIDGE_IOCTL_GIFSSTP do {					\
2683	struct bridge_iflist *bif;					\
2684	struct bstp_port *bp;						\
2685	struct ifbpstpreq bpreq;					\
2686	char *buf, *outbuf;						\
2687	unsigned int count, buflen, len;				\
2688									\
2689	count = 0;							\
2690	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {			\
2691		if ((bif->bif_flags & IFBIF_STP) != 0)			\
2692			count++;					\
2693	}								\
2694									\
2695	buflen = sizeof (bpreq) * count;				\
2696	if (bifstp->ifbpstp_len == 0) {					\
2697		bifstp->ifbpstp_len = buflen;				\
2698		return (0);						\
2699	}								\
2700									\
2701	BRIDGE_UNLOCK(sc);						\
2702	outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO);		\
2703	BRIDGE_LOCK(sc);						\
2704									\
2705	count = 0;							\
2706	buf = outbuf;							\
2707	len = min(bifstp->ifbpstp_len, buflen);				\
2708	bzero(&bpreq, sizeof (bpreq));					\
2709	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {			\
2710		if (len < sizeof (bpreq))				\
2711			break;						\
2712									\
2713		if ((bif->bif_flags & IFBIF_STP) == 0)			\
2714			continue;					\
2715									\
2716		bp = &bif->bif_stp;					\
2717		bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;	\
2718		bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;	\
2719		bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;	\
2720		bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;	\
2721		bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; \
2722		bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;	\
2723									\
2724		memcpy(buf, &bpreq, sizeof (bpreq));			\
2725		count++;						\
2726		buf += sizeof (bpreq);					\
2727		len -= sizeof (bpreq);					\
2728	}								\
2729									\
2730	BRIDGE_UNLOCK(sc);						\
2731	bifstp->ifbpstp_len = sizeof (bpreq) * count;			\
2732	error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); \
2733	BRIDGE_LOCK(sc);						\
2734	_FREE(outbuf, M_TEMP);						\
2735	return (error);							\
2736} while (0)
2737
2738static int
2739bridge_ioctl_gifsstp32(struct bridge_softc *sc, void *arg)
2740{
2741	struct ifbpstpconf32 *bifstp = arg;
2742	int error = 0;
2743
2744	BRIDGE_IOCTL_GIFSSTP;
2745
2746	return (error);
2747}
2748
2749static int
2750bridge_ioctl_gifsstp64(struct bridge_softc *sc, void *arg)
2751{
2752	struct ifbpstpconf64 *bifstp = arg;
2753	int error = 0;
2754
2755	BRIDGE_IOCTL_GIFSSTP;
2756
2757	return (error);
2758}
2759
2760static int
2761bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
2762{
2763#if BRIDGESTP
2764	struct ifbrparam *param = arg;
2765
2766	return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
2767#else /* !BRIDGESTP */
2768#pragma unused(sc, arg)
2769	return (EOPNOTSUPP);
2770#endif /* !BRIDGESTP */
2771}
2772
2773static int
2774bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
2775{
2776#if BRIDGESTP
2777	struct ifbrparam *param = arg;
2778
2779	return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
2780#else /* !BRIDGESTP */
2781#pragma unused(sc, arg)
2782	return (EOPNOTSUPP);
2783#endif /* !BRIDGESTP */
2784}
2785
2786/*
2787 * bridge_ifdetach:
2788 *
2789 *	Detach an interface from a bridge.  Called when a member
2790 *	interface is detaching.
2791 */
2792__private_extern__ void
2793bridge_ifdetach(struct bridge_iflist *bif, struct ifnet *ifp)
2794{
2795	struct bridge_softc *sc = ifp->if_bridge;
2796
2797#if BRIDGE_DEBUG
2798	printf("%s: %s%d\n", __func__, ifnet_name(ifp), ifnet_unit(ifp));
2799#endif
2800
2801	/* Check if the interface is a bridge member */
2802	if (sc != NULL) {
2803		BRIDGE_LOCK(sc);
2804
2805		bif = bridge_lookup_member_if(sc, ifp);
2806		if (bif != NULL)
2807			bridge_delete_member(sc, bif, 1);
2808
2809		BRIDGE_UNLOCK(sc);
2810		return;
2811	}
2812
2813	/* Check if the interface is a span port */
2814	lck_mtx_lock(bridge_list_mtx);
2815	LIST_FOREACH(sc, &bridge_list, sc_list) {
2816		BRIDGE_LOCK(sc);
2817		TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
2818			if (ifp == bif->bif_ifp) {
2819				bridge_delete_span(sc, bif);
2820				break;
2821			}
2822
2823		BRIDGE_UNLOCK(sc);
2824	}
2825	lck_mtx_unlock(bridge_list_mtx);
2826}
2827
2828/*
2829 * bridge_init:
2830 *
2831 *	Initialize a bridge interface.
2832 */
2833static int
2834bridge_init(struct ifnet *ifp)
2835{
2836	struct bridge_softc *sc = (struct bridge_softc *)ifp->if_softc;
2837	struct timespec ts;
2838	errno_t error;
2839
2840	BRIDGE_LOCK_ASSERT(sc);
2841
2842	if ((ifnet_flags(ifp) & IFF_RUNNING))
2843		return (0);
2844
2845	ts.tv_sec = bridge_rtable_prune_period;
2846	ts.tv_nsec = 0;
2847	bsd_timeout(bridge_timer, sc, &ts);
2848
2849	error = ifnet_set_flags(ifp, IFF_RUNNING, IFF_RUNNING);
2850#if BRIDGESTP
2851	if (error == 0)
2852		bstp_init(&sc->sc_stp);		/* Initialize Spanning Tree */
2853#endif /* BRIDGESTP */
2854
2855	return (error);
2856}
2857
2858/*
2859 * bridge_ifstop:
2860 *
2861 *	Stop the bridge interface.
2862 */
2863static void
2864bridge_ifstop(struct ifnet *ifp, __unused int disable)
2865{
2866	struct bridge_softc *sc = ifp->if_softc;
2867
2868	BRIDGE_LOCK_ASSERT(sc);
2869
2870	if ((ifnet_flags(ifp) & IFF_RUNNING) == 0)
2871		return;
2872
2873	bsd_untimeout(bridge_timer, sc);
2874#if BRIDGESTP
2875	bstp_stop(&sc->sc_stp);
2876#endif /* BRIDGESTP */
2877
2878	bridge_rtflush(sc, IFBF_FLUSHDYN);
2879
2880	(void) ifnet_set_flags(ifp, 0, IFF_RUNNING);
2881}
2882
2883/*
2884 * bridge_enqueue:
2885 *
2886 *	Enqueue a packet on a bridge member interface.
2887 *
2888 */
2889static int
2890bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
2891{
2892	int len, error = 0;
2893	short mflags;
2894	struct mbuf *m0;
2895
2896	VERIFY(dst_ifp != NULL);
2897
2898	/*
2899	 * We may be sending a fragment so traverse the mbuf
2900	 *
2901	 * NOTE: bridge_fragment() is called only when PFIL_HOOKS is enabled.
2902	 */
2903	for (; m; m = m0) {
2904		errno_t _error;
2905		struct flowadv adv = { FADV_SUCCESS };
2906
2907		m0 = m->m_nextpkt;
2908		m->m_nextpkt = NULL;
2909
2910		len = m->m_pkthdr.len;
2911		mflags = m->m_flags;
2912		m->m_flags |= M_PROTO1; /* set to avoid loops */
2913
2914#if HAS_IF_CAP
2915		/*
2916		 * If underlying interface can not do VLAN tag insertion itself
2917		 * then attach a packet tag that holds it.
2918		 */
2919		if ((m->m_flags & M_VLANTAG) &&
2920		    (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
2921			m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2922			if (m == NULL) {
2923				printf("%s: %s%d: unable to prepend VLAN "
2924				    "header\n", __func__, ifnet_name(dst_ifp),
2925				    ifnet_unit(dst_ifp));
2926				(void) ifnet_stat_increment_out(dst_ifp,
2927				    0, 0, 1);
2928				continue;
2929			}
2930			m->m_flags &= ~M_VLANTAG;
2931		}
2932#endif /* HAS_IF_CAP */
2933
2934		_error = dlil_output(dst_ifp, 0, m, NULL, NULL, 1, &adv);
2935
2936		/* Preserve existing error value */
2937		if (error == 0) {
2938			if (_error != 0)
2939				error = _error;
2940			else if (adv.code == FADV_FLOW_CONTROLLED)
2941				error = EQFULL;
2942			else if (adv.code == FADV_SUSPENDED)
2943				error = EQSUSPENDED;
2944		}
2945
2946		if (_error == 0) {
2947			(void) ifnet_stat_increment_out(sc->sc_ifp, 1, len, 0);
2948		} else {
2949			(void) ifnet_stat_increment_out(sc->sc_ifp, 0, 0, 1);
2950		}
2951	}
2952
2953	return (error);
2954}
2955
2956#if HAS_BRIDGE_DUMMYNET
2957/*
2958 * bridge_dummynet:
2959 *
2960 *	Receive a queued packet from dummynet and pass it on to the output
2961 *	interface.
2962 *
2963 *	The mbuf has the Ethernet header already attached.
2964 */
2965static void
2966bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
2967{
2968	struct bridge_softc *sc;
2969
2970	sc = ifp->if_bridge;
2971
2972	/*
2973	 * The packet didnt originate from a member interface. This should only
2974	 * ever happen if a member interface is removed while packets are
2975	 * queued for it.
2976	 */
2977	if (sc == NULL) {
2978		m_freem(m);
2979		return;
2980	}
2981
2982	if (PFIL_HOOKED(&inet_pfil_hook)
2983#ifdef INET6
2984	    || PFIL_HOOKED(&inet6_pfil_hook)
2985#endif
2986	    ) {
2987		if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
2988			return;
2989		if (m == NULL)
2990			return;
2991	}
2992
2993	(void) bridge_enqueue(sc, ifp, m);
2994}
2995#endif /* HAS_BRIDGE_DUMMYNET */
2996
2997#if BRIDGE_MEMBER_OUT_FILTER
2998/*
2999 * bridge_member_output:
3000 *
3001 *	Send output from a bridge member interface.  This
3002 *	performs the bridging function for locally originated
3003 *	packets.
3004 *
3005 *	The mbuf has the Ethernet header already attached.  We must
3006 *	enqueue or free the mbuf before returning.
3007 */
3008static int
3009bridge_member_output(struct ifnet *ifp, struct mbuf *m,
3010    __unused struct sockaddr *sa, __unused struct rtentry *rt)
3011{
3012	struct ether_header *eh;
3013	struct ifnet *dst_if;
3014	struct bridge_softc *sc;
3015	uint16_t vlan;
3016
3017#if BRIDGE_DEBUG
3018	if (if_bridge_debug)
3019		printf("%s: ifp %p %s%d\n", __func__, ifp, ifnet_name(ifp),
3020		    ifnet_unit(ifp));
3021#endif /* BRIDGE_DEBUG */
3022
3023	if (m->m_len < ETHER_HDR_LEN) {
3024		m = m_pullup(m, ETHER_HDR_LEN);
3025		if (m == NULL)
3026			return (0);
3027	}
3028
3029	eh = mtod(m, struct ether_header *);
3030	sc = ifp->if_bridge;
3031	vlan = VLANTAGOF(m);
3032
3033	BRIDGE_LOCK(sc);
3034
3035	/*
3036	 * APPLE MODIFICATION
3037	 * If the packet is an 802.1X ethertype, then only send on the
3038	 * original output interface.
3039	 */
3040	if (eh->ether_type == htons(ETHERTYPE_PAE)) {
3041		dst_if = ifp;
3042		goto sendunicast;
3043	}
3044
3045	/*
3046	 * If bridge is down, but the original output interface is up,
3047	 * go ahead and send out that interface.  Otherwise, the packet
3048	 * is dropped below.
3049	 */
3050	if ((sc->sc_ifp->if_flags & IFF_RUNNING) == 0) {
3051		dst_if = ifp;
3052		goto sendunicast;
3053	}
3054
3055	/*
3056	 * If the packet is a multicast, or we don't know a better way to
3057	 * get there, send to all interfaces.
3058	 */
3059	if (ETHER_IS_MULTICAST(eh->ether_dhost))
3060		dst_if = NULL;
3061	else
3062		dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
3063	if (dst_if == NULL) {
3064		struct bridge_iflist *bif;
3065		struct mbuf *mc;
3066		int error = 0, used = 0;
3067
3068		bridge_span(sc, m);
3069
3070		BRIDGE_LOCK2REF(sc, error);
3071		if (error) {
3072			m_freem(m);
3073			return (0);
3074		}
3075
3076		TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
3077			dst_if = bif->bif_ifp;
3078
3079			if (dst_if->if_type == IFT_GIF)
3080				continue;
3081			if ((dst_if->if_flags & IFF_RUNNING) == 0)
3082				continue;
3083
3084			/*
3085			 * If this is not the original output interface,
3086			 * and the interface is participating in spanning
3087			 * tree, make sure the port is in a state that
3088			 * allows forwarding.
3089			 */
3090			if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
3091			    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
3092				continue;
3093
3094			if (LIST_NEXT(bif, bif_next) == NULL) {
3095				used = 1;
3096				mc = m;
3097			} else {
3098				mc = m_copypacket(m, M_DONTWAIT);
3099				if (mc == NULL) {
3100					(void) ifnet_stat_increment_out(
3101					    sc->sc_ifp, 0, 0, 1);
3102					continue;
3103				}
3104			}
3105
3106			(void) bridge_enqueue(sc, dst_if, mc);
3107		}
3108		if (used == 0)
3109			m_freem(m);
3110		BRIDGE_UNREF(sc);
3111		return (0);
3112	}
3113
3114sendunicast:
3115	/*
3116	 * XXX Spanning tree consideration here?
3117	 */
3118
3119	bridge_span(sc, m);
3120	if ((dst_if->if_flags & IFF_RUNNING) == 0) {
3121		m_freem(m);
3122		BRIDGE_UNLOCK(sc);
3123		return (0);
3124	}
3125
3126	BRIDGE_UNLOCK(sc);
3127	(void) bridge_enqueue(sc, dst_if, m);
3128	return (0);
3129}
3130#endif /* BRIDGE_MEMBER_OUT_FILTER */
3131
3132#if APPLE_BRIDGE_HWCKSUM_SUPPORT
3133static struct mbuf *
3134bridge_fix_txcsum(struct mbuf *m)
3135{
3136	/*
3137	 * basic tests indicate that the vast majority of packets being
3138	 * processed here have an Ethernet header mbuf pre-pended to them
3139	 * (the first case below)
3140	 *
3141	 * the second highest are those where the Ethernet and IP/TCP/UDP
3142	 * headers are all in one mbuf (second case below)
3143	 *
3144	 * the third case has, in fact, never hit for me -- although if I
3145	 * comment out the first two cases, that code works for them, so I
3146	 * consider it a decent general solution
3147	 */
3148	int amt = ETHER_HDR_LEN;
3149	int hlen = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
3150	int off = M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data);
3151
3152	/*
3153	 * NOTE we should never get vlan-attached packets here;
3154	 * support for those COULD be added, but we don't use them
3155	 * and it really kinda slows things down to worry about them
3156	 */
3157
3158#ifdef DIAGNOSTIC
3159	if (m_tag_find(m, PACKET_TAG_VLAN, NULL) != NULL) {
3160		printf("%s: transmitting packet tagged with VLAN?\n", __func__);
3161		KASSERT(0);
3162		m_freem(m);
3163		return (NULL);
3164	}
3165#endif
3166
3167	if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
3168		amt += hlen;
3169	}
3170	if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
3171		amt += off + sizeof (uint16_t);
3172	}
3173
3174	if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4) {
3175		amt += off + sizeof (uint16_t);
3176	}
3177
3178	if (m->m_len == ETHER_HDR_LEN) {
3179		/*
3180		 * this is the case where there's an Ethernet header in an
3181		 * mbuf the first mbuf is the Ethernet header -- just strip
3182		 * it off and do the checksum
3183		 */
3184		/* set up m_ip so the cksum operations work */
3185		struct mbuf *m_ip = m->m_next;
3186
3187		/* APPLE MODIFICATION 22 Apr 2008 <mvega@apple.com>
3188		 *  <rdar://5817385> Clear the m_tag list before setting
3189		 *  M_PKTHDR.
3190		 *
3191		 *  If this m_buf chain was extended via M_PREPEND(), then
3192		 *  m_ip->m_pkthdr is identical to m->m_pkthdr (see
3193		 *  M_MOVE_PKTHDR()). The only thing preventing access to this
3194		 *  invalid packet header data is the fact that the M_PKTHDR
3195		 *  flag is clear, i.e., m_ip->m_flag & M_PKTHDR == 0, but we're
3196		 *  about to set the M_PKTHDR flag, so to be safe we initialize,
3197		 *  more accurately, we clear, m_ip->m_pkthdr.tags via
3198		 *  m_tag_init().
3199		 *
3200		 *  Suppose that we do not do this; if m_pullup(), below, fails,
3201		 *  then m_ip will be freed along with m_ip->m_pkthdr.tags, but
3202		 *  we will also free m soon after, via m_freem(), and
3203		 *  consequently attempt to free m->m_pkthdr.tags in the
3204		 *  process. The problem is that m->m_pkthdr.tags will have
3205		 *  already been freed by virtue of being equal to
3206		 *  m_ip->m_pkthdr.tags. Attempts to dereference
3207		 *  m->m_pkthdr.tags in m_tag_delete_chain() will result in a
3208		 *  panic.
3209		 */
3210		m_tag_init(m_ip);
3211		/* END MODIFICATION */
3212		m_ip->m_flags |= M_PKTHDR;
3213		m_ip->m_pkthdr.csum_flags = m->m_pkthdr.csum_flags;
3214		m_ip->m_pkthdr.csum_data = m->m_pkthdr.csum_data;
3215		m_ip->m_pkthdr.len = m->m_pkthdr.len - ETHER_HDR_LEN;
3216
3217		/*
3218		 * set up the header mbuf so we can prepend it
3219		 * back on again later
3220		 */
3221		m->m_pkthdr.csum_flags = 0;
3222		m->m_pkthdr.csum_data = 0;
3223		m->m_pkthdr.len = ETHER_HDR_LEN;
3224		m->m_next = NULL;
3225
3226		/* now do the checksums we need -- first IP */
3227		if (m_ip->m_pkthdr.csum_flags & M_CSUM_IPv4) {
3228			/*
3229			 * make sure the IP header (or at least the part with
3230			 * the cksum) is there
3231			 */
3232			m_ip = m_pullup(m_ip, sizeof (struct ip));
3233			if (m_ip == NULL) {
3234				printf("%s: failed to flatten header\n",
3235				    __func__);
3236				m_freem(m);
3237				return (NULL);
3238			}
3239
3240			/* now do the checksum */
3241			{
3242				struct ip *ip = mtod(m_ip, struct ip *);
3243				ip->ip_sum = in_cksum(m_ip, hlen);
3244
3245#ifdef VERY_VERY_VERY_DIAGNOSTIC
3246				printf("%s: performed IPv4 checksum\n",
3247				    __func__);
3248#endif
3249			}
3250		}
3251
3252		/* now do a TCP or UDP delayed checksum */
3253		if (m_ip->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
3254			in_delayed_cksum(m_ip);
3255
3256#ifdef VERY_VERY_VERY_DIAGNOSTIC
3257			printf("%s: performed TCPv4/UDPv4 checksum\n",
3258			    __func__);
3259#endif
3260		}
3261
3262		/* now attach the ethernet header back onto the IP packet */
3263		m->m_next = m_ip;
3264		m->m_pkthdr.len += m_length(m_ip);
3265
3266		/*
3267		 * clear the M_PKTHDR flags on the ip packet (again,
3268		 * we re-attach later)
3269		 */
3270		m_ip->m_flags &= ~M_PKTHDR;
3271
3272		/* and clear any csum flags */
3273		m->m_pkthdr.csum_flags &=
3274		    ~(M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_IPv4);
3275	} else if (m->m_len >= amt) {
3276		/*
3277		 * everything fits in the first mbuf, so futz with
3278		 * m->m_data, m->m_len and m->m_pkthdr.len to make it work
3279		 */
3280		m->m_len -= ETHER_HDR_LEN;
3281		m->m_data += ETHER_HDR_LEN;
3282		m->m_pkthdr.len -= ETHER_HDR_LEN;
3283
3284		/* now do the checksums we need -- first IP */
3285		if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
3286			struct ip *ip = mtod(m, struct ip *);
3287			ip->ip_sum = in_cksum(m, hlen);
3288
3289#ifdef VERY_VERY_VERY_DIAGNOSTIC
3290			printf("%s: performed IPv4 checksum\n", __func__);
3291#endif
3292		}
3293
3294		// now do a TCP or UDP delayed checksum
3295		if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
3296			in_delayed_cksum(m);
3297
3298#ifdef VERY_VERY_VERY_DIAGNOSTIC
3299			printf("%s: performed TCPv4/UDPv4 checksum\n",
3300			    __func__);
3301#endif
3302		}
3303
3304		/* now stick the ethernet header back on */
3305		m->m_len += ETHER_HDR_LEN;
3306		m->m_data -= ETHER_HDR_LEN;
3307		m->m_pkthdr.len += ETHER_HDR_LEN;
3308
3309		/* and clear any csum flags */
3310		m->m_pkthdr.csum_flags &=
3311		    ~(M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_IPv4);
3312	} else {
3313		struct mbuf *m_ip;
3314
3315		/*
3316		 * general case -- need to simply split it off and deal
3317		 * first, calculate how much needs to be made writable
3318		 * (we may have a read-only mbuf here)
3319		 */
3320		hlen = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
3321#if PARANOID
3322		off = M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data);
3323
3324		if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
3325			amt += hlen;
3326		}
3327
3328		if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4) {
3329			amt += sizeof (struct tcphdr *);
3330			amt += off;
3331		}
3332
3333		if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4) {
3334			amt += sizeof (struct udphdr *);
3335			amt += off;
3336		}
3337#endif
3338
3339		/*
3340		 * now split the ethernet header off of the IP packet
3341		 * (we'll re-attach later)
3342		 */
3343		m_ip = m_split(m, ETHER_HDR_LEN, M_NOWAIT);
3344		if (m_ip == NULL) {
3345			printf("%s: could not split ether header\n", __func__);
3346
3347			m_freem(m);
3348			return (NULL);
3349		}
3350
3351#if PARANOID
3352		/*
3353		 * make sure that the IP packet is writable
3354		 * for the portion we need
3355		 */
3356		if (m_makewritable(&m_ip, 0, amt, M_DONTWAIT) != 0) {
3357			printf("%s: could not make %d bytes writable\n",
3358			    __func__, amt);
3359
3360			m_freem(m);
3361			m_freem(m_ip);
3362			return (NULL);
3363		}
3364#endif
3365
3366		m_ip->m_pkthdr.csum_flags = m->m_pkthdr.csum_flags;
3367		m_ip->m_pkthdr.csum_data = m->m_pkthdr.csum_data;
3368
3369		m->m_pkthdr.csum_flags = 0;
3370		m->m_pkthdr.csum_data = 0;
3371
3372		/* now do the checksums we need -- first IP */
3373		if (m_ip->m_pkthdr.csum_flags & M_CSUM_IPv4) {
3374			/*
3375			 * make sure the IP header (or at least the part
3376			 * with the cksum) is there
3377			 */
3378			m_ip = m_pullup(m_ip, sizeof (struct ip));
3379			if (m_ip == NULL) {
3380				printf("%s: failed to flatten header\n",
3381				    __func__);
3382				m_freem(m);
3383				return (NULL);
3384			}
3385
3386			/* now do the checksum */
3387			{
3388				struct ip *ip = mtod(m_ip, struct ip *);
3389				ip->ip_sum = in_cksum(m_ip, hlen);
3390
3391#ifdef VERY_VERY_VERY_DIAGNOSTIC
3392				printf("%s: performed IPv4 checksum\n",
3393				    __func__);
3394#endif
3395			}
3396		}
3397
3398		/* now do a TCP or UDP delayed checksum */
3399		if (m_ip->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
3400			in_delayed_cksum(m_ip);
3401
3402#ifdef VERY_VERY_VERY_DIAGNOSTIC
3403			printf("%s: performed TCPv4/UDPv4 checksum\n",
3404			    __func__);
3405#endif
3406		}
3407
3408		// now attach the ethernet header back onto the IP packet
3409		m->m_next = m_ip;
3410		m->m_pkthdr.len += m_length(m_ip);
3411
3412		/*
3413		 * clear the M_PKTHDR flags on the ip packet
3414		 * (again, we re-attach later)
3415		 */
3416		m_ip->m_flags &= ~M_PKTHDR;
3417
3418		/* and clear any csum flags */
3419		m->m_pkthdr.csum_flags &=
3420		    ~(M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_IPv4);
3421	}
3422
3423	return (m);
3424}
3425#endif
3426
3427/*
3428 * Output callback.
3429 *
3430 * This routine is called externally from above only when if_bridge_txstart
3431 * is disabled; otherwise it is called internally by bridge_start().
3432 */
3433static int
3434bridge_output(struct ifnet *ifp, struct mbuf *m)
3435{
3436	struct bridge_softc *sc = ifnet_softc(ifp);
3437	struct ether_header *eh;
3438	struct ifnet *dst_if;
3439	int error = 0;
3440
3441	eh = mtod(m, struct ether_header *);
3442	dst_if = NULL;
3443
3444	BRIDGE_LOCK(sc);
3445	if (!(m->m_flags & (M_BCAST|M_MCAST))) {
3446		dst_if = bridge_rtlookup(sc, eh->ether_dhost, 0);
3447	}
3448
3449#if APPLE_BRIDGE_HWCKSUM_SUPPORT
3450	/*
3451	 * APPLE MODIFICATION - if the packet needs a checksum
3452	 * (i.e., checksum has been deferred for HW support)
3453	 * AND the destination interface doesn't support HW
3454	 * checksums, then we need to fix-up the checksum here
3455	 */
3456	if ((m->m_pkthdr.csum_flags &
3457	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_IPv4)) &&
3458	    (dst_if == NULL ||
3459	    (dst_if->if_csum_flags_tx & m->m_pkthdr.csum_flags) !=
3460	    m->m_pkthdr.csum_flags)) {
3461		m = bridge_fix_txcsum(m);
3462		if (m == NULL) {
3463			BRIDGE_UNLOCK(sc);
3464			return (0);
3465		}
3466	}
3467#else
3468	if (eh->ether_type == htons(ETHERTYPE_IP))
3469		mbuf_outbound_finalize(m, PF_INET, sizeof (*eh));
3470	else
3471		m->m_pkthdr.csum_flags = 0;
3472#endif /* APPLE_BRIDGE_HWCKSUM_SUPPORT */
3473
3474	atomic_add_64(&ifp->if_obytes, m->m_pkthdr.len);
3475	atomic_add_64(&ifp->if_opackets, 1);
3476
3477#if NBPFILTER > 0
3478	if (sc->sc_bpf_output)
3479		bridge_bpf_output(ifp, m);
3480#endif
3481
3482	if (dst_if == NULL) {
3483		/* callee will unlock */
3484		bridge_broadcast(sc, ifp, m, 0);
3485	} else {
3486		BRIDGE_UNLOCK(sc);
3487		error = bridge_enqueue(sc, dst_if, m);
3488	}
3489
3490	return (error);
3491}
3492
3493/*
3494 * bridge_start:
3495 *
3496 *	Start output on a bridge.
3497 *
3498 * This routine is invoked by the start worker thread; because we never call
3499 * it directly, there is no need do deploy any serialization mechanism other
3500 * than what's already used by the worker thread, i.e. this is already single
3501 * threaded.
3502 *
3503 * This routine is called only when if_bridge_txstart is enabled.
3504 */
3505static void
3506bridge_start(struct ifnet *ifp)
3507{
3508	struct mbuf *m;
3509
3510	for (;;) {
3511		if (ifnet_dequeue(ifp, &m) != 0)
3512			break;
3513
3514		(void) bridge_output(ifp, m);
3515	}
3516}
3517
3518/*
3519 * bridge_forward:
3520 *
3521 *	The forwarding function of the bridge.
3522 *
3523 *	NOTE: Releases the lock on return.
3524 */
3525static void
3526bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
3527    struct mbuf *m)
3528{
3529	struct bridge_iflist *dbif;
3530	struct ifnet *src_if, *dst_if, *ifp;
3531	struct ether_header *eh;
3532	uint16_t vlan;
3533	uint8_t *dst;
3534	int error;
3535
3536	lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
3537
3538#if BRIDGE_DEBUG
3539	if (if_bridge_debug)
3540		printf("%s: %s%d m%p\n", __func__, ifnet_name(sc->sc_ifp),
3541		    ifnet_unit(sc->sc_ifp), m);
3542#endif /* BRIDGE_DEBUG */
3543
3544	src_if = m->m_pkthdr.rcvif;
3545	ifp = sc->sc_ifp;
3546
3547	(void) ifnet_stat_increment_in(ifp, 1, m->m_pkthdr.len, 0);
3548	vlan = VLANTAGOF(m);
3549
3550
3551	if ((sbif->bif_flags & IFBIF_STP) &&
3552	    sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
3553		goto drop;
3554
3555	eh = mtod(m, struct ether_header *);
3556	dst = eh->ether_dhost;
3557
3558	/* If the interface is learning, record the address. */
3559	if (sbif->bif_flags & IFBIF_LEARNING) {
3560		error = bridge_rtupdate(sc, eh->ether_shost, vlan,
3561		    sbif, 0, IFBAF_DYNAMIC);
3562		/*
3563		 * If the interface has addresses limits then deny any source
3564		 * that is not in the cache.
3565		 */
3566		if (error && sbif->bif_addrmax)
3567			goto drop;
3568	}
3569
3570	if ((sbif->bif_flags & IFBIF_STP) != 0 &&
3571	    sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
3572		goto drop;
3573
3574	/*
3575	 * At this point, the port either doesn't participate
3576	 * in spanning tree or it is in the forwarding state.
3577	 */
3578
3579	/*
3580	 * If the packet is unicast, destined for someone on
3581	 * "this" side of the bridge, drop it.
3582	 */
3583	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
3584		dst_if = bridge_rtlookup(sc, dst, vlan);
3585		if (src_if == dst_if)
3586			goto drop;
3587	} else {
3588		/*
3589		 * Check if its a reserved multicast address, any address
3590		 * listed in 802.1D section 7.12.6 may not be forwarded by the
3591		 * bridge.
3592		 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
3593		 */
3594		if (dst[0] == 0x01 && dst[1] == 0x80 &&
3595		    dst[2] == 0xc2 && dst[3] == 0x00 &&
3596		    dst[4] == 0x00 && dst[5] <= 0x0f)
3597			goto drop;
3598
3599
3600		/* ...forward it to all interfaces. */
3601		atomic_add_64(&ifp->if_imcasts, 1);
3602		dst_if = NULL;
3603	}
3604
3605	/*
3606	 * If we have a destination interface which is a member of our bridge,
3607	 * OR this is a unicast packet, push it through the bpf(4) machinery.
3608	 * For broadcast or multicast packets, don't bother because it will
3609	 * be reinjected into ether_input. We do this before we pass the packets
3610	 * through the pfil(9) framework, as it is possible that pfil(9) will
3611	 * drop the packet, or possibly modify it, making it difficult to debug
3612	 * firewall issues on the bridge.
3613	 */
3614#if NBPFILTER > 0
3615	if (eh->ether_type == htons(ETHERTYPE_RSN_PREAUTH) ||
3616	    dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0) {
3617		m->m_pkthdr.rcvif = ifp;
3618		if (sc->sc_bpf_input)
3619			bridge_bpf_input(ifp, m);
3620	}
3621#endif /* NBPFILTER */
3622
3623#if defined(PFIL_HOOKS)
3624	/* run the packet filter */
3625	if (PFIL_HOOKED(&inet_pfil_hook)
3626#ifdef INET6
3627	    || PFIL_HOOKED(&inet6_pfil_hook)
3628#endif /* INET6 */
3629	    ) {
3630		BRIDGE_UNLOCK(sc);
3631		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
3632			return;
3633		if (m == NULL)
3634			return;
3635		BRIDGE_LOCK(sc);
3636	}
3637#endif /* PFIL_HOOKS */
3638
3639	if (dst_if == NULL) {
3640		/*
3641		 * Clear any in-bound checksum flags for this packet.
3642		 */
3643		mbuf_inbound_modified(m);
3644
3645		bridge_broadcast(sc, src_if, m, 1);
3646
3647		return;
3648	}
3649
3650	/*
3651	 * At this point, we're dealing with a unicast frame
3652	 * going to a different interface.
3653	 */
3654	if ((dst_if->if_flags & IFF_RUNNING) == 0)
3655		goto drop;
3656
3657	dbif = bridge_lookup_member_if(sc, dst_if);
3658	if (dbif == NULL)
3659		/* Not a member of the bridge (anymore?) */
3660		goto drop;
3661
3662	/* Private segments can not talk to each other */
3663	if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
3664		goto drop;
3665
3666	if ((dbif->bif_flags & IFBIF_STP) &&
3667	    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
3668		goto drop;
3669
3670#if HAS_DHCPRA_MASK
3671	/* APPLE MODIFICATION <rdar://6985737> */
3672	if ((dst_if->if_extflags & IFEXTF_DHCPRA_MASK) != 0) {
3673		m = ip_xdhcpra_output(dst_if, m);
3674		if (!m) {
3675			++sc->sc_sc.sc_ifp.if_xdhcpra;
3676			return;
3677		}
3678	}
3679#endif /* HAS_DHCPRA_MASK */
3680
3681	BRIDGE_UNLOCK(sc);
3682
3683#if defined(PFIL_HOOKS)
3684	if (PFIL_HOOKED(&inet_pfil_hook)
3685#ifdef INET6
3686	    || PFIL_HOOKED(&inet6_pfil_hook)
3687#endif
3688	    ) {
3689		if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
3690			return;
3691		if (m == NULL)
3692			return;
3693	}
3694#endif /* PFIL_HOOKS */
3695
3696	/*
3697	 * Clear any in-bound checksum flags for this packet.
3698	 */
3699	mbuf_inbound_modified(m);
3700
3701	(void) bridge_enqueue(sc, dst_if, m);
3702	return;
3703
3704drop:
3705	BRIDGE_UNLOCK(sc);
3706	m_freem(m);
3707}
3708
3709#if BRIDGE_DEBUG
3710
3711char *ether_ntop(char *, size_t, const u_char *);
3712
3713__private_extern__ char *
3714ether_ntop(char *buf, size_t len, const u_char *ap)
3715{
3716	snprintf(buf, len, "%02x:%02x:%02x:%02x:%02x:%02x",
3717	    ap[0], ap[1], ap[2], ap[3], ap[4], ap[5]);
3718
3719	return (buf);
3720}
3721
3722#endif /* BRIDGE_DEBUG */
3723
3724/*
3725 * bridge_input:
3726 *
3727 *	Filter input from a member interface.  Queue the packet for
3728 *	bridging if it is not for us.
3729 */
3730__private_extern__ errno_t
3731bridge_input(struct ifnet *ifp, struct mbuf *m, __unused void *frame_header)
3732{
3733	struct bridge_softc *sc = ifp->if_bridge;
3734	struct bridge_iflist *bif, *bif2;
3735	struct ifnet *bifp;
3736	struct ether_header *eh;
3737	struct mbuf *mc, *mc2;
3738	uint16_t vlan;
3739	int error;
3740
3741#if BRIDGE_DEBUG
3742	if (if_bridge_debug)
3743		printf("%s: %s%d from %s%d m %p data %p\n", __func__,
3744		    ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp),
3745		    ifnet_name(ifp), ifnet_unit(ifp), m, mbuf_data(m));
3746#endif /* BRIDGE_DEBUG */
3747
3748	if ((sc->sc_ifp->if_flags & IFF_RUNNING) == 0) {
3749#if BRIDGE_DEBUG
3750		if (if_bridge_debug)
3751			printf("%s: %s%d not running passing along\n",
3752			    __func__, ifnet_name(sc->sc_ifp),
3753			    ifnet_unit(sc->sc_ifp));
3754#endif /* BRIDGE_DEBUG */
3755		return (0);
3756	}
3757
3758	bifp = sc->sc_ifp;
3759	vlan = VLANTAGOF(m);
3760
3761#ifdef IFF_MONITOR
3762	/*
3763	 * Implement support for bridge monitoring. If this flag has been
3764	 * set on this interface, discard the packet once we push it through
3765	 * the bpf(4) machinery, but before we do, increment the byte and
3766	 * packet counters associated with this interface.
3767	 */
3768	if ((bifp->if_flags & IFF_MONITOR) != 0) {
3769		m->m_pkthdr.rcvif  = bifp;
3770		BRIDGE_BPF_MTAP_INPUT(sc, m);
3771		(void) ifnet_stat_increment_in(bifp, 1, m->m_pkthdr.len, 0);
3772		m_freem(m);
3773		return (EJUSTRETURN);
3774	}
3775#endif /* IFF_MONITOR */
3776
3777	/*
3778	 * Need to clear the promiscous flags otherwise it will be
3779	 * dropped by DLIL after processing filters
3780	 */
3781	if ((mbuf_flags(m) & MBUF_PROMISC))
3782		mbuf_setflags_mask(m, 0, MBUF_PROMISC);
3783
3784	BRIDGE_LOCK(sc);
3785	bif = bridge_lookup_member_if(sc, ifp);
3786	if (bif == NULL) {
3787		BRIDGE_UNLOCK(sc);
3788#if BRIDGE_DEBUG
3789		if (if_bridge_debug)
3790			printf("%s: %s%d bridge_lookup_member_if failed\n",
3791			    __func__, ifnet_name(sc->sc_ifp),
3792			    ifnet_unit(sc->sc_ifp));
3793#endif /* BRIDGE_DEBUG */
3794		return (0);
3795	}
3796
3797	eh = mtod(m, struct ether_header *);
3798
3799	bridge_span(sc, m);
3800
3801	if (m->m_flags & (M_BCAST|M_MCAST)) {
3802
3803#if BRIDGE_DEBUG
3804		if (if_bridge_debug)
3805			if ((m->m_flags & M_MCAST))
3806				printf("%s: mulicast: "
3807				    "%02x:%02x:%02x:%02x:%02x:%02x\n",
3808				    __func__,
3809				    eh->ether_dhost[0], eh->ether_dhost[1],
3810				    eh->ether_dhost[2], eh->ether_dhost[3],
3811				    eh->ether_dhost[4], eh->ether_dhost[5]);
3812#endif /* BRIDGE_DEBUG */
3813
3814		/* Tap off 802.1D packets; they do not get forwarded. */
3815		if (memcmp(eh->ether_dhost, bstp_etheraddr,
3816		    ETHER_ADDR_LEN) == 0) {
3817#if BRIDGESTP
3818			m = bstp_input(&bif->bif_stp, ifp, m);
3819#else /* !BRIDGESTP */
3820			m_freem(m);
3821			m = NULL;
3822#endif /* !BRIDGESTP */
3823			if (m == NULL) {
3824				BRIDGE_UNLOCK(sc);
3825				return (EJUSTRETURN);
3826			}
3827		}
3828
3829		if ((bif->bif_flags & IFBIF_STP) &&
3830		    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
3831			BRIDGE_UNLOCK(sc);
3832			return (0);
3833		}
3834
3835		/*
3836		 * Make a deep copy of the packet and enqueue the copy
3837		 * for bridge processing; return the original packet for
3838		 * local processing.
3839		 */
3840		mc = m_dup(m, M_DONTWAIT);
3841		if (mc == NULL) {
3842			BRIDGE_UNLOCK(sc);
3843			return (0);
3844		}
3845
3846		/*
3847		 * Perform the bridge forwarding function with the copy.
3848		 *
3849		 * Note that bridge_forward calls BRIDGE_UNLOCK
3850		 */
3851		bridge_forward(sc, bif, mc);
3852
3853		/*
3854		 * Reinject the mbuf as arriving on the bridge so we have a
3855		 * chance at claiming multicast packets. We can not loop back
3856		 * here from ether_input as a bridge is never a member of a
3857		 * bridge.
3858		 */
3859		KASSERT(bifp->if_bridge == NULL,
3860		    ("loop created in bridge_input"));
3861		mc2 = m_dup(m, M_DONTWAIT);
3862		if (mc2 != NULL) {
3863			/* Keep the layer3 header aligned */
3864			int i = min(mc2->m_pkthdr.len, max_protohdr);
3865			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
3866		}
3867		if (mc2 != NULL) {
3868			// mark packet as arriving on the bridge
3869			mc2->m_pkthdr.rcvif = bifp;
3870			mc2->m_pkthdr.header = mbuf_data(mc2);
3871
3872#if NBPFILTER > 0
3873			if (sc->sc_bpf_input)
3874				bridge_bpf_input(bifp, mc2);
3875#endif /* NBPFILTER */
3876			(void) mbuf_setdata(mc2,
3877			    (char *)mbuf_data(mc2) + ETHER_HDR_LEN,
3878			    mbuf_len(mc2) - ETHER_HDR_LEN);
3879			(void) mbuf_pkthdr_adjustlen(mc2, - ETHER_HDR_LEN);
3880
3881			(void) ifnet_stat_increment_in(bifp, 1,
3882			    mbuf_pkthdr_len(mc2), 0);
3883
3884#if BRIDGE_DEBUG
3885			if (if_bridge_debug)
3886				printf("%s: %s%d mcast for us\n", __func__,
3887				    ifnet_name(sc->sc_ifp),
3888				    ifnet_unit(sc->sc_ifp));
3889#endif /* BRIDGE_DEBUG */
3890
3891			dlil_input_packet_list(bifp, mc2);
3892		}
3893
3894		/* Return the original packet for local processing. */
3895		return (0);
3896	}
3897
3898	if ((bif->bif_flags & IFBIF_STP) &&
3899	    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
3900		BRIDGE_UNLOCK(sc);
3901		return (0);
3902	}
3903
3904#ifdef DEV_CARP
3905#   define OR_CARP_CHECK_WE_ARE_DST(iface) \
3906	|| ((iface)->if_carp \
3907	    && carp_forus((iface)->if_carp, eh->ether_dhost))
3908#   define OR_CARP_CHECK_WE_ARE_SRC(iface) \
3909	|| ((iface)->if_carp \
3910	    && carp_forus((iface)->if_carp, eh->ether_shost))
3911#else
3912#   define OR_CARP_CHECK_WE_ARE_DST(iface)
3913#   define OR_CARP_CHECK_WE_ARE_SRC(iface)
3914#endif
3915
3916#ifdef INET6
3917#   define OR_PFIL_HOOKED_INET6 \
3918	|| PFIL_HOOKED(&inet6_pfil_hook)
3919#else
3920#   define OR_PFIL_HOOKED_INET6
3921#endif
3922
3923#if defined(PFIL_HOOKS)
3924#define	PFIL_PHYS(sc, ifp, m) do {					\
3925	if (pfil_local_phys &&						\
3926	(PFIL_HOOKED(&inet_pfil_hook) OR_PFIL_HOOKED_INET6)) {		\
3927		if (bridge_pfil(&m, NULL, ifp,				\
3928		    PFIL_IN) != 0 || m == NULL) {			\
3929			BRIDGE_UNLOCK(sc);				\
3930			return (NULL);					\
3931		}							\
3932	}								\
3933} while (0)
3934#else /* PFIL_HOOKS */
3935#define	PFIL_PHYS(sc, ifp, m)
3936#endif /* PFIL_HOOKS */
3937
3938#define	GRAB_OUR_PACKETS(iface)						\
3939	if ((iface)->if_type == IFT_GIF)				\
3940		continue;						\
3941	/* It is destined for us. */					\
3942	if (memcmp(ifnet_lladdr((iface)), eh->ether_dhost,		\
3943	    ETHER_ADDR_LEN) == 0 OR_CARP_CHECK_WE_ARE_DST((iface))) {	\
3944		if ((iface)->if_type == IFT_BRIDGE) {			\
3945			BRIDGE_BPF_MTAP_INPUT(sc, m);			\
3946			/* Filter on the physical interface. */		\
3947			PFIL_PHYS(sc, iface, m);			\
3948		}							\
3949		if (bif->bif_flags & IFBIF_LEARNING) {			\
3950			error = bridge_rtupdate(sc, eh->ether_shost,	\
3951			    vlan, bif, 0, IFBAF_DYNAMIC);		\
3952			if (error && bif->bif_addrmax) {		\
3953				BRIDGE_UNLOCK(sc);			\
3954				return (EJUSTRETURN);			\
3955			}						\
3956		}							\
3957		m->m_pkthdr.rcvif = iface;				\
3958		BRIDGE_UNLOCK(sc);					\
3959		return (0);						\
3960	}								\
3961									\
3962	/* We just received a packet that we sent out. */		\
3963	if (memcmp(ifnet_lladdr((iface)), eh->ether_shost,		\
3964	    ETHER_ADDR_LEN) == 0 OR_CARP_CHECK_WE_ARE_SRC((iface))) {	\
3965		BRIDGE_UNLOCK(sc);					\
3966		return (EJUSTRETURN);					\
3967	}
3968
3969	/*
3970	 * Unicast.
3971	 */
3972	/*
3973	 * If the packet is for us, set the packets source as the
3974	 * bridge, and return the packet back to ether_input for
3975	 * local processing.
3976	 */
3977	if (memcmp(eh->ether_dhost, ifnet_lladdr(bifp),
3978	    ETHER_ADDR_LEN) == 0 OR_CARP_CHECK_WE_ARE_DST(bifp)) {
3979
3980		/* Mark the packet as arriving on the bridge interface */
3981		(void) mbuf_pkthdr_setrcvif(m, bifp);
3982		mbuf_pkthdr_setheader(m, frame_header);
3983
3984		/*
3985		 * If the interface is learning, and the source
3986		 * address is valid and not multicast, record
3987		 * the address.
3988		 */
3989		if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
3990		    ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
3991		    (eh->ether_shost[0] | eh->ether_shost[1] |
3992		    eh->ether_shost[2] | eh->ether_shost[3] |
3993		    eh->ether_shost[4] | eh->ether_shost[5]) != 0) {
3994			(void) bridge_rtupdate(sc, eh->ether_shost,
3995			    vlan, bif, 0, IFBAF_DYNAMIC);
3996		}
3997
3998		BRIDGE_BPF_MTAP_INPUT(sc, m);
3999
4000		(void) mbuf_setdata(m, (char *)mbuf_data(m) + ETHER_HDR_LEN,
4001		    mbuf_len(m) - ETHER_HDR_LEN);
4002		(void) mbuf_pkthdr_adjustlen(m, - ETHER_HDR_LEN);
4003
4004		(void) ifnet_stat_increment_in(bifp, 1, mbuf_pkthdr_len(m), 0);
4005
4006		BRIDGE_UNLOCK(sc);
4007
4008#if BRIDGE_DEBUG
4009		if (if_bridge_debug)
4010			printf("%s: %s%d packet for bridge\n", __func__,
4011			    ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp));
4012#endif /* BRIDGE_DEBUG */
4013
4014		dlil_input_packet_list(bifp, m);
4015
4016		return (EJUSTRETURN);
4017	}
4018
4019	/*
4020	 * if the destination of the packet is for the MAC address of
4021	 * the member interface itself, then we don't need to forward
4022	 * it -- just pass it back.  Note that it'll likely just be
4023	 * dropped by the stack, but if something else is bound to
4024	 * the interface directly (for example, the wireless stats
4025	 * protocol -- although that actually uses BPF right now),
4026	 * then it will consume the packet
4027	 *
4028	 * ALSO, note that we do this check AFTER checking for the
4029	 * bridge's own MAC address, because the bridge may be
4030	 * using the SAME MAC address as one of its interfaces
4031	 */
4032	if (memcmp(eh->ether_dhost, ifnet_lladdr(ifp), ETHER_ADDR_LEN) == 0) {
4033
4034#ifdef VERY_VERY_VERY_DIAGNOSTIC
4035			printf("%s: not forwarding packet bound for member "
4036			    "interface\n", __func__);
4037#endif
4038			BRIDGE_UNLOCK(sc);
4039			return (0);
4040	}
4041
4042	/* Now check the all bridge members. */
4043	TAILQ_FOREACH(bif2, &sc->sc_iflist, bif_next) {
4044		GRAB_OUR_PACKETS(bif2->bif_ifp)
4045	}
4046
4047#undef OR_CARP_CHECK_WE_ARE_DST
4048#undef OR_CARP_CHECK_WE_ARE_SRC
4049#undef OR_PFIL_HOOKED_INET6
4050#undef GRAB_OUR_PACKETS
4051
4052	/*
4053	 * Perform the bridge forwarding function.
4054	 *
4055	 * Note that bridge_forward calls BRIDGE_UNLOCK
4056	 */
4057	bridge_forward(sc, bif, m);
4058
4059	return (EJUSTRETURN);
4060}
4061
4062/*
4063 * bridge_broadcast:
4064 *
4065 *	Send a frame to all interfaces that are members of
4066 *	the bridge, except for the one on which the packet
4067 *	arrived.
4068 *
4069 *	NOTE: Releases the lock on return.
4070 */
4071static void
4072bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
4073    struct mbuf *m, int runfilt)
4074{
4075#ifndef PFIL_HOOKS
4076#pragma unused(runfilt)
4077#endif
4078	struct bridge_iflist *dbif, *sbif;
4079	struct mbuf *mc;
4080	struct ifnet *dst_if;
4081	int error = 0, used = 0;
4082
4083	sbif = bridge_lookup_member_if(sc, src_if);
4084
4085	BRIDGE_LOCK2REF(sc, error);
4086	if (error) {
4087		m_freem(m);
4088		return;
4089	}
4090
4091#ifdef PFIL_HOOKS
4092	/* Filter on the bridge interface before broadcasting */
4093	if (runfilt && (PFIL_HOOKED(&inet_pfil_hook)
4094#ifdef INET6
4095	    || PFIL_HOOKED(&inet6_pfil_hook)
4096#endif /* INET6 */
4097	    )) {
4098		if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
4099			goto out;
4100		if (m == NULL)
4101			goto out;
4102	}
4103#endif /* PFIL_HOOKS */
4104
4105	TAILQ_FOREACH(dbif, &sc->sc_iflist, bif_next) {
4106		dst_if = dbif->bif_ifp;
4107		if (dst_if == src_if)
4108			continue;
4109
4110		/* Private segments can not talk to each other */
4111		if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
4112			continue;
4113
4114		if ((dbif->bif_flags & IFBIF_STP) &&
4115		    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
4116			continue;
4117
4118		if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
4119		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
4120			continue;
4121
4122		if ((dst_if->if_flags & IFF_RUNNING) == 0)
4123			continue;
4124
4125		if (TAILQ_NEXT(dbif, bif_next) == NULL) {
4126			mc = m;
4127			used = 1;
4128		} else {
4129			mc = m_dup(m, M_DONTWAIT);
4130			if (mc == NULL) {
4131				(void) ifnet_stat_increment_out(sc->sc_ifp,
4132				    0, 0, 1);
4133				continue;
4134			}
4135		}
4136
4137#ifdef PFIL_HOOKS
4138		/*
4139		 * Filter on the output interface. Pass a NULL bridge interface
4140		 * pointer so we do not redundantly filter on the bridge for
4141		 * each interface we broadcast on.
4142		 */
4143		if (runfilt && (PFIL_HOOKED(&inet_pfil_hook)
4144#ifdef INET6
4145		    || PFIL_HOOKED(&inet6_pfil_hook)
4146#endif
4147		    )) {
4148			if (used == 0) {
4149				/* Keep the layer3 header aligned */
4150				int i = min(mc->m_pkthdr.len, max_protohdr);
4151				mc = m_copyup(mc, i, ETHER_ALIGN);
4152				if (mc == NULL) {
4153					(void) ifnet_stat_increment_out(
4154					    sc->sc_ifp, 0, 0, 1);
4155					continue;
4156				}
4157			}
4158			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
4159				continue;
4160			if (mc == NULL)
4161				continue;
4162		}
4163#endif /* PFIL_HOOKS */
4164
4165		(void) bridge_enqueue(sc, dst_if, mc);
4166	}
4167	if (used == 0)
4168		m_freem(m);
4169
4170#ifdef PFIL_HOOKS
4171out:
4172#endif /* PFIL_HOOKS */
4173
4174	BRIDGE_UNREF(sc);
4175}
4176
4177/*
4178 * bridge_span:
4179 *
4180 *	Duplicate a packet out one or more interfaces that are in span mode,
4181 *	the original mbuf is unmodified.
4182 */
4183static void
4184bridge_span(struct bridge_softc *sc, struct mbuf *m)
4185{
4186	struct bridge_iflist *bif;
4187	struct ifnet *dst_if;
4188	struct mbuf *mc;
4189
4190	if (TAILQ_EMPTY(&sc->sc_spanlist))
4191		return;
4192
4193	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) {
4194		dst_if = bif->bif_ifp;
4195
4196		if ((dst_if->if_flags & IFF_RUNNING) == 0)
4197			continue;
4198
4199		mc = m_copypacket(m, M_DONTWAIT);
4200		if (mc == NULL) {
4201			(void) ifnet_stat_increment_out(sc->sc_ifp, 0, 0, 1);
4202			continue;
4203		}
4204
4205		(void) bridge_enqueue(sc, dst_if, mc);
4206	}
4207}
4208
4209
4210
4211/*
4212 * bridge_rtupdate:
4213 *
4214 *	Add a bridge routing entry.
4215 */
4216static int
4217bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
4218    struct bridge_iflist *bif, int setflags, uint8_t flags)
4219{
4220	struct bridge_rtnode *brt;
4221	int error;
4222
4223	BRIDGE_LOCK_ASSERT(sc);
4224
4225	/* Check the source address is valid and not multicast. */
4226	if (ETHER_IS_MULTICAST(dst) ||
4227	    (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
4228	    dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
4229		return (EINVAL);
4230
4231
4232	/* 802.1p frames map to vlan 1 */
4233	if (vlan == 0)
4234		vlan = 1;
4235
4236	/*
4237	 * A route for this destination might already exist.  If so,
4238	 * update it, otherwise create a new one.
4239	 */
4240	if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
4241		if (sc->sc_brtcnt >= sc->sc_brtmax) {
4242			sc->sc_brtexceeded++;
4243			return (ENOSPC);
4244		}
4245		/* Check per interface address limits (if enabled) */
4246		if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
4247			bif->bif_addrexceeded++;
4248			return (ENOSPC);
4249		}
4250
4251		/*
4252		 * Allocate a new bridge forwarding node, and
4253		 * initialize the expiration time and Ethernet
4254		 * address.
4255		 */
4256		brt = zalloc_noblock(bridge_rtnode_pool);
4257		if (brt == NULL)
4258			return (ENOMEM);
4259
4260		if (bif->bif_flags & IFBIF_STICKY)
4261			brt->brt_flags = IFBAF_STICKY;
4262		else
4263			brt->brt_flags = IFBAF_DYNAMIC;
4264
4265		memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
4266		brt->brt_vlan = vlan;
4267
4268
4269		if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
4270			zfree(bridge_rtnode_pool, brt);
4271			return (error);
4272		}
4273		brt->brt_dst = bif;
4274		bif->bif_addrcnt++;
4275	}
4276
4277	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
4278	    brt->brt_dst != bif) {
4279		brt->brt_dst->bif_addrcnt--;
4280		brt->brt_dst = bif;
4281		brt->brt_dst->bif_addrcnt++;
4282	}
4283
4284	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
4285		struct timespec now;
4286
4287		nanouptime(&now);
4288		brt->brt_expire = now.tv_sec + sc->sc_brttimeout;
4289	}
4290	if (setflags)
4291		brt->brt_flags = flags;
4292
4293
4294	return (0);
4295}
4296
4297/*
4298 * bridge_rtlookup:
4299 *
4300 *	Lookup the destination interface for an address.
4301 */
4302static struct ifnet *
4303bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
4304{
4305	struct bridge_rtnode *brt;
4306
4307	BRIDGE_LOCK_ASSERT(sc);
4308
4309	if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
4310		return (NULL);
4311
4312	return (brt->brt_ifp);
4313}
4314
4315/*
4316 * bridge_rttrim:
4317 *
4318 *	Trim the routine table so that we have a number
4319 *	of routing entries less than or equal to the
4320 *	maximum number.
4321 */
4322static void
4323bridge_rttrim(struct bridge_softc *sc)
4324{
4325	struct bridge_rtnode *brt, *nbrt;
4326
4327	BRIDGE_LOCK_ASSERT(sc);
4328
4329	/* Make sure we actually need to do this. */
4330	if (sc->sc_brtcnt <= sc->sc_brtmax)
4331		return;
4332
4333	/* Force an aging cycle; this might trim enough addresses. */
4334	bridge_rtage(sc);
4335	if (sc->sc_brtcnt <= sc->sc_brtmax)
4336		return;
4337
4338	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
4339		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
4340			bridge_rtnode_destroy(sc, brt);
4341			if (sc->sc_brtcnt <= sc->sc_brtmax)
4342				return;
4343		}
4344	}
4345}
4346
4347/*
4348 * bridge_timer:
4349 *
4350 *	Aging timer for the bridge.
4351 */
4352static void
4353bridge_timer(void *arg)
4354{
4355	struct bridge_softc *sc = arg;
4356
4357	BRIDGE_LOCK(sc);
4358
4359	bridge_rtage(sc);
4360
4361	BRIDGE_UNLOCK(sc);
4362
4363	if (sc->sc_ifp->if_flags & IFF_RUNNING) {
4364		struct timespec ts;
4365
4366		ts.tv_sec = bridge_rtable_prune_period;
4367		ts.tv_nsec = 0;
4368		bsd_timeout(bridge_timer, sc, &ts);
4369	}
4370}
4371
4372/*
4373 * bridge_rtage:
4374 *
4375 *	Perform an aging cycle.
4376 */
4377static void
4378bridge_rtage(struct bridge_softc *sc)
4379{
4380	struct bridge_rtnode *brt, *nbrt;
4381
4382	BRIDGE_LOCK_ASSERT(sc);
4383
4384	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
4385		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
4386			struct timespec now;
4387
4388			nanouptime(&now);
4389			if ((unsigned long)now.tv_sec >= brt->brt_expire)
4390				bridge_rtnode_destroy(sc, brt);
4391		}
4392	}
4393}
4394
4395/*
4396 * bridge_rtflush:
4397 *
4398 *	Remove all dynamic addresses from the bridge.
4399 */
4400static void
4401bridge_rtflush(struct bridge_softc *sc, int full)
4402{
4403	struct bridge_rtnode *brt, *nbrt;
4404
4405	BRIDGE_LOCK_ASSERT(sc);
4406
4407	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
4408		if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
4409			bridge_rtnode_destroy(sc, brt);
4410	}
4411}
4412
4413/*
4414 * bridge_rtdaddr:
4415 *
4416 *	Remove an address from the table.
4417 */
4418static int
4419bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
4420{
4421	struct bridge_rtnode *brt;
4422	int found = 0;
4423
4424	BRIDGE_LOCK_ASSERT(sc);
4425
4426	/*
4427	 * If vlan is zero then we want to delete for all vlans so the lookup
4428	 * may return more than one.
4429	 */
4430	while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
4431		bridge_rtnode_destroy(sc, brt);
4432		found = 1;
4433	}
4434
4435	return (found ? 0 : ENOENT);
4436}
4437
4438/*
4439 * bridge_rtdelete:
4440 *
4441 *	Delete routes to a speicifc member interface.
4442 */
4443static void
4444bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
4445{
4446	struct bridge_rtnode *brt, *nbrt;
4447
4448	BRIDGE_LOCK_ASSERT(sc);
4449
4450	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
4451		if (brt->brt_ifp == ifp && (full ||
4452		    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
4453			bridge_rtnode_destroy(sc, brt);
4454	}
4455}
4456
4457/*
4458 * bridge_rtable_init:
4459 *
4460 *	Initialize the route table for this bridge.
4461 */
4462static int
4463bridge_rtable_init(struct bridge_softc *sc)
4464{
4465	int i;
4466
4467	sc->sc_rthash = _MALLOC(sizeof (*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
4468	    M_DEVBUF, M_NOWAIT);
4469	if (sc->sc_rthash == NULL)
4470		return (ENOMEM);
4471
4472	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
4473		LIST_INIT(&sc->sc_rthash[i]);
4474
4475	sc->sc_rthash_key = random();
4476
4477	LIST_INIT(&sc->sc_rtlist);
4478
4479	return (0);
4480}
4481
4482/*
4483 * bridge_rtable_fini:
4484 *
4485 *	Deconstruct the route table for this bridge.
4486 */
4487static void
4488bridge_rtable_fini(struct bridge_softc *sc)
4489{
4490
4491	KASSERT(sc->sc_brtcnt == 0,
4492	    ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
4493	_FREE(sc->sc_rthash, M_DEVBUF);
4494}
4495
4496/*
4497 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
4498 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
4499 */
4500#define	mix(a, b, c)							\
4501do {									\
4502	a -= b; a -= c; a ^= (c >> 13);					\
4503	b -= c; b -= a; b ^= (a << 8);					\
4504	c -= a; c -= b; c ^= (b >> 13);					\
4505	a -= b; a -= c; a ^= (c >> 12);					\
4506	b -= c; b -= a; b ^= (a << 16);					\
4507	c -= a; c -= b; c ^= (b >> 5);					\
4508	a -= b; a -= c; a ^= (c >> 3);					\
4509	b -= c; b -= a; b ^= (a << 10);					\
4510	c -= a; c -= b; c ^= (b >> 15);					\
4511} while (/*CONSTCOND*/0)
4512
4513static __inline uint32_t
4514bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
4515{
4516	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
4517
4518	b += addr[5] << 8;
4519	b += addr[4];
4520	a += addr[3] << 24;
4521	a += addr[2] << 16;
4522	a += addr[1] << 8;
4523	a += addr[0];
4524
4525	mix(a, b, c);
4526
4527	return (c & BRIDGE_RTHASH_MASK);
4528}
4529
4530#undef mix
4531
4532static int
4533bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
4534{
4535	int i, d;
4536
4537	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
4538		d = ((int)a[i]) - ((int)b[i]);
4539	}
4540
4541	return (d);
4542}
4543
4544/*
4545 * bridge_rtnode_lookup:
4546 *
4547 *	Look up a bridge route node for the specified destination. Compare the
4548 *	vlan id or if zero then just return the first match.
4549 */
4550static struct bridge_rtnode *
4551bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr,
4552    uint16_t vlan)
4553{
4554	struct bridge_rtnode *brt;
4555	uint32_t hash;
4556	int dir;
4557
4558	BRIDGE_LOCK_ASSERT(sc);
4559
4560	hash = bridge_rthash(sc, addr);
4561	LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
4562		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
4563		if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0))
4564			return (brt);
4565		if (dir > 0)
4566			return (NULL);
4567	}
4568
4569	return (NULL);
4570}
4571
4572/*
4573 * bridge_rtnode_insert:
4574 *
4575 *	Insert the specified bridge node into the route table.  We
4576 *	assume the entry is not already in the table.
4577 */
4578static int
4579bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
4580{
4581	struct bridge_rtnode *lbrt;
4582	uint32_t hash;
4583	int dir;
4584
4585	BRIDGE_LOCK_ASSERT(sc);
4586
4587	hash = bridge_rthash(sc, brt->brt_addr);
4588
4589	lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
4590	if (lbrt == NULL) {
4591		LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
4592		goto out;
4593	}
4594
4595	do {
4596		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
4597		if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
4598			return (EEXIST);
4599		if (dir > 0) {
4600			LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
4601			goto out;
4602		}
4603		if (LIST_NEXT(lbrt, brt_hash) == NULL) {
4604			LIST_INSERT_AFTER(lbrt, brt, brt_hash);
4605			goto out;
4606		}
4607		lbrt = LIST_NEXT(lbrt, brt_hash);
4608	} while (lbrt != NULL);
4609
4610#ifdef DIAGNOSTIC
4611	panic("bridge_rtnode_insert: impossible");
4612#endif
4613
4614out:
4615	LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
4616	sc->sc_brtcnt++;
4617
4618	return (0);
4619}
4620
4621/*
4622 * bridge_rtnode_destroy:
4623 *
4624 *	Destroy a bridge rtnode.
4625 */
4626static void
4627bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
4628{
4629	BRIDGE_LOCK_ASSERT(sc);
4630
4631	LIST_REMOVE(brt, brt_hash);
4632
4633	LIST_REMOVE(brt, brt_list);
4634	sc->sc_brtcnt--;
4635	brt->brt_dst->bif_addrcnt--;
4636	zfree(bridge_rtnode_pool, brt);
4637}
4638
4639#if BRIDGESTP
4640/*
4641 * bridge_rtable_expire:
4642 *
4643 *	Set the expiry time for all routes on an interface.
4644 */
4645static void
4646bridge_rtable_expire(struct ifnet *ifp, int age)
4647{
4648	struct bridge_softc *sc = ifp->if_bridge;
4649	struct bridge_rtnode *brt;
4650
4651	BRIDGE_LOCK(sc);
4652
4653	/*
4654	 * If the age is zero then flush, otherwise set all the expiry times to
4655	 * age for the interface
4656	 */
4657	if (age == 0) {
4658		bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
4659	} else {
4660		LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
4661			struct timespec now;
4662
4663			nanouptime(&now);
4664			/* Cap the expiry time to 'age' */
4665			if (brt->brt_ifp == ifp &&
4666			    brt->brt_expire > (unsigned long)now.tv_sec + age &&
4667			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
4668				brt->brt_expire =
4669				    (unsigned long)now.tv_sec + age;
4670		}
4671	}
4672	BRIDGE_UNLOCK(sc);
4673}
4674
4675/*
4676 * bridge_state_change:
4677 *
4678 *	Callback from the bridgestp code when a port changes states.
4679 */
4680static void
4681bridge_state_change(struct ifnet *ifp, int state)
4682{
4683	struct bridge_softc *sc = ifp->if_bridge;
4684	static const char *stpstates[] = {
4685		"disabled",
4686		"listening",
4687		"learning",
4688		"forwarding",
4689		"blocking",
4690		"discarding"
4691	};
4692
4693	if (log_stp)
4694		log(LOG_NOTICE, "%s%d: state changed to %s on %s%d\n",
4695		    ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp),
4696		    stpstates[state], ifnet_name(ifp), ifnet_unit(ifp));
4697}
4698#endif /* BRIDGESTP */
4699
4700#ifdef PFIL_HOOKS
4701/*
4702 * Send bridge packets through pfil if they are one of the types pfil can deal
4703 * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
4704 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
4705 * that interface.
4706 */
4707static int
4708bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
4709{
4710	int snap, error, i, hlen;
4711	struct ether_header *eh1, eh2;
4712	struct ip_fw_args args;
4713	struct ip *ip;
4714	struct llc llc1;
4715	u_int16_t ether_type;
4716
4717	snap = 0;
4718	error = -1;	/* Default error if not error == 0 */
4719
4720#if 0
4721	/* we may return with the IP fields swapped, ensure its not shared */
4722	KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
4723#endif
4724
4725	if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0)
4726		return (0); /* filtering is disabled */
4727
4728	i = min((*mp)->m_pkthdr.len, max_protohdr);
4729	if ((*mp)->m_len < i) {
4730		*mp = m_pullup(*mp, i);
4731		if (*mp == NULL) {
4732			printf("%s: m_pullup failed\n", __func__);
4733			return (-1);
4734		}
4735	}
4736
4737	eh1 = mtod(*mp, struct ether_header *);
4738	ether_type = ntohs(eh1->ether_type);
4739
4740	/*
4741	 * Check for SNAP/LLC.
4742	 */
4743	if (ether_type < ETHERMTU) {
4744		struct llc *llc2 = (struct llc *)(eh1 + 1);
4745
4746		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
4747		    llc2->llc_dsap == LLC_SNAP_LSAP &&
4748		    llc2->llc_ssap == LLC_SNAP_LSAP &&
4749		    llc2->llc_control == LLC_UI) {
4750			ether_type = htons(llc2->llc_un.type_snap.ether_type);
4751			snap = 1;
4752		}
4753	}
4754
4755	/*
4756	 * If we're trying to filter bridge traffic, don't look at anything
4757	 * other than IP and ARP traffic.  If the filter doesn't understand
4758	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
4759	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
4760	 * but of course we don't have an AppleTalk filter to begin with.
4761	 * (Note that since pfil doesn't understand ARP it will pass *ALL*
4762	 * ARP traffic.)
4763	 */
4764	switch (ether_type) {
4765		case ETHERTYPE_ARP:
4766		case ETHERTYPE_REVARP:
4767			if (pfil_ipfw_arp == 0)
4768				return (0); /* Automatically pass */
4769			break;
4770
4771		case ETHERTYPE_IP:
4772#ifdef INET6
4773		case ETHERTYPE_IPV6:
4774#endif /* INET6 */
4775			break;
4776		default:
4777			/*
4778			 * Check to see if the user wants to pass non-ip
4779			 * packets, these will not be checked by pfil(9) and
4780			 * passed unconditionally so the default is to drop.
4781			 */
4782			if (pfil_onlyip)
4783				goto bad;
4784	}
4785
4786	/* Strip off the Ethernet header and keep a copy. */
4787	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t)&eh2);
4788	m_adj(*mp, ETHER_HDR_LEN);
4789
4790	/* Strip off snap header, if present */
4791	if (snap) {
4792		m_copydata(*mp, 0, sizeof (struct llc), (caddr_t)&llc1);
4793		m_adj(*mp, sizeof (struct llc));
4794	}
4795
4796	/*
4797	 * Check the IP header for alignment and errors
4798	 */
4799	if (dir == PFIL_IN) {
4800		switch (ether_type) {
4801			case ETHERTYPE_IP:
4802				error = bridge_ip_checkbasic(mp);
4803				break;
4804#ifdef INET6
4805			case ETHERTYPE_IPV6:
4806				error = bridge_ip6_checkbasic(mp);
4807				break;
4808#endif /* INET6 */
4809			default:
4810				error = 0;
4811		}
4812		if (error)
4813			goto bad;
4814	}
4815
4816	if (IPFW_LOADED && pfil_ipfw != 0 && dir == PFIL_OUT && ifp != NULL) {
4817		error = -1;
4818		args.rule = ip_dn_claim_rule(*mp);
4819		if (args.rule != NULL && fw_one_pass)
4820			goto ipfwpass; /* packet already partially processed */
4821
4822		args.m = *mp;
4823		args.oif = ifp;
4824		args.next_hop = NULL;
4825		args.eh = &eh2;
4826		args.inp = NULL;	/* used by ipfw uid/gid/jail rules */
4827		i = ip_fw_chk_ptr(&args);
4828		*mp = args.m;
4829
4830		if (*mp == NULL)
4831			return (error);
4832
4833		if (DUMMYNET_LOADED && (i == IP_FW_DUMMYNET)) {
4834
4835			/* put the Ethernet header back on */
4836			M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
4837			if (*mp == NULL)
4838				return (error);
4839			bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
4840
4841			/*
4842			 * Pass the pkt to dummynet, which consumes it. The
4843			 * packet will return to us via bridge_dummynet().
4844			 */
4845			args.oif = ifp;
4846			ip_dn_io_ptr(mp, DN_TO_IFB_FWD, &args, DN_CLIENT_IPFW);
4847			return (error);
4848		}
4849
4850		if (i != IP_FW_PASS) /* drop */
4851			goto bad;
4852	}
4853
4854ipfwpass:
4855	error = 0;
4856
4857	/*
4858	 * Run the packet through pfil
4859	 */
4860	switch (ether_type) {
4861	case ETHERTYPE_IP:
4862		/*
4863		 * before calling the firewall, swap fields the same as
4864		 * IP does. here we assume the header is contiguous
4865		 */
4866		ip = mtod(*mp, struct ip *);
4867
4868		ip->ip_len = ntohs(ip->ip_len);
4869		ip->ip_off = ntohs(ip->ip_off);
4870
4871		/*
4872		 * Run pfil on the member interface and the bridge, both can
4873		 * be skipped by clearing pfil_member or pfil_bridge.
4874		 *
4875		 * Keep the order:
4876		 *   in_if -> bridge_if -> out_if
4877		 */
4878		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
4879			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
4880			    dir, NULL);
4881
4882		if (*mp == NULL || error != 0) /* filter may consume */
4883			break;
4884
4885		if (pfil_member && ifp != NULL)
4886			error = pfil_run_hooks(&inet_pfil_hook, mp, ifp,
4887			    dir, NULL);
4888
4889		if (*mp == NULL || error != 0) /* filter may consume */
4890			break;
4891
4892		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
4893			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
4894			    dir, NULL);
4895
4896		if (*mp == NULL || error != 0) /* filter may consume */
4897			break;
4898
4899		/* check if we need to fragment the packet */
4900		if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
4901			i = (*mp)->m_pkthdr.len;
4902			if (i > ifp->if_mtu) {
4903				error = bridge_fragment(ifp, *mp, &eh2, snap,
4904				    &llc1);
4905				return (error);
4906			}
4907		}
4908
4909		/* Recalculate the ip checksum and restore byte ordering */
4910		ip = mtod(*mp, struct ip *);
4911		hlen = ip->ip_hl << 2;
4912		if (hlen < sizeof (struct ip))
4913			goto bad;
4914		if (hlen > (*mp)->m_len) {
4915			if ((*mp = m_pullup(*mp, hlen)) == 0)
4916				goto bad;
4917			ip = mtod(*mp, struct ip *);
4918			if (ip == NULL)
4919				goto bad;
4920		}
4921		ip->ip_len = htons(ip->ip_len);
4922		ip->ip_off = htons(ip->ip_off);
4923		ip->ip_sum = 0;
4924		if (hlen == sizeof (struct ip))
4925			ip->ip_sum = in_cksum_hdr(ip);
4926		else
4927			ip->ip_sum = in_cksum(*mp, hlen);
4928
4929		break;
4930#ifdef INET6
4931	case ETHERTYPE_IPV6:
4932		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
4933			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
4934			    dir, NULL);
4935
4936		if (*mp == NULL || error != 0) /* filter may consume */
4937			break;
4938
4939		if (pfil_member && ifp != NULL)
4940			error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
4941			    dir, NULL);
4942
4943		if (*mp == NULL || error != 0) /* filter may consume */
4944			break;
4945
4946		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
4947			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
4948			    dir, NULL);
4949		break;
4950#endif
4951	default:
4952		error = 0;
4953		break;
4954	}
4955
4956	if (*mp == NULL)
4957		return (error);
4958	if (error != 0)
4959		goto bad;
4960
4961	error = -1;
4962
4963	/*
4964	 * Finally, put everything back the way it was and return
4965	 */
4966	if (snap) {
4967		M_PREPEND(*mp, sizeof (struct llc), M_DONTWAIT);
4968		if (*mp == NULL)
4969			return (error);
4970		bcopy(&llc1, mtod(*mp, caddr_t), sizeof (struct llc));
4971	}
4972
4973	M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
4974	if (*mp == NULL)
4975		return (error);
4976	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
4977
4978	return (0);
4979
4980bad:
4981	m_freem(*mp);
4982	*mp = NULL;
4983	return (error);
4984}
4985
4986
4987/*
4988 * Perform basic checks on header size since
4989 * pfil assumes ip_input has already processed
4990 * it for it.  Cut-and-pasted from ip_input.c.
4991 * Given how simple the IPv6 version is,
4992 * does the IPv4 version really need to be
4993 * this complicated?
4994 *
4995 * XXX Should we update ipstat here, or not?
4996 * XXX Right now we update ipstat but not
4997 * XXX csum_counter.
4998 */
4999static int
5000bridge_ip_checkbasic(struct mbuf **mp)
5001{
5002	struct mbuf *m = *mp;
5003	struct ip *ip;
5004	int len, hlen;
5005	u_short sum;
5006
5007	if (*mp == NULL)
5008		return (-1);
5009
5010	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
5011		/* max_linkhdr is already rounded up to nearest 4-byte */
5012		if ((m = m_copyup(m, sizeof (struct ip),
5013		    max_linkhdr)) == NULL) {
5014			/* XXXJRT new stat, please */
5015			ipstat.ips_toosmall++;
5016			goto bad;
5017		}
5018	} else if (__predict_false(m->m_len < sizeof (struct ip))) {
5019		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
5020			ipstat.ips_toosmall++;
5021			goto bad;
5022		}
5023	}
5024	ip = mtod(m, struct ip *);
5025	if (ip == NULL) goto bad;
5026
5027	if (ip->ip_v != IPVERSION) {
5028		ipstat.ips_badvers++;
5029		goto bad;
5030	}
5031	hlen = ip->ip_hl << 2;
5032	if (hlen < sizeof (struct ip)) { /* minimum header length */
5033		ipstat.ips_badhlen++;
5034		goto bad;
5035	}
5036	if (hlen > m->m_len) {
5037		if ((m = m_pullup(m, hlen)) == 0) {
5038			ipstat.ips_badhlen++;
5039			goto bad;
5040		}
5041		ip = mtod(m, struct ip *);
5042		if (ip == NULL) goto bad;
5043	}
5044
5045	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
5046		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
5047	} else {
5048		if (hlen == sizeof (struct ip)) {
5049			sum = in_cksum_hdr(ip);
5050		} else {
5051			sum = in_cksum(m, hlen);
5052		}
5053	}
5054	if (sum) {
5055		ipstat.ips_badsum++;
5056		goto bad;
5057	}
5058
5059	/* Retrieve the packet length. */
5060	len = ntohs(ip->ip_len);
5061
5062	/*
5063	 * Check for additional length bogosity
5064	 */
5065	if (len < hlen) {
5066		ipstat.ips_badlen++;
5067		goto bad;
5068	}
5069
5070	/*
5071	 * Check that the amount of data in the buffers
5072	 * is as at least much as the IP header would have us expect.
5073	 * Drop packet if shorter than we expect.
5074	 */
5075	if (m->m_pkthdr.len < len) {
5076		ipstat.ips_tooshort++;
5077		goto bad;
5078	}
5079
5080	/* Checks out, proceed */
5081	*mp = m;
5082	return (0);
5083
5084bad:
5085	*mp = m;
5086	return (-1);
5087}
5088
5089#ifdef INET6
5090/*
5091 * Same as above, but for IPv6.
5092 * Cut-and-pasted from ip6_input.c.
5093 * XXX Should we update ip6stat, or not?
5094 */
5095static int
5096bridge_ip6_checkbasic(struct mbuf **mp)
5097{
5098	struct mbuf *m = *mp;
5099	struct ip6_hdr *ip6;
5100
5101	/*
5102	 * If the IPv6 header is not aligned, slurp it up into a new
5103	 * mbuf with space for link headers, in the event we forward
5104	 * it.  Otherwise, if it is aligned, make sure the entire base
5105	 * IPv6 header is in the first mbuf of the chain.
5106	 */
5107	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
5108		struct ifnet *inifp = m->m_pkthdr.rcvif;
5109		/* max_linkhdr is already rounded up to nearest 4-byte */
5110		if ((m = m_copyup(m, sizeof (struct ip6_hdr),
5111		    max_linkhdr)) == NULL) {
5112			/* XXXJRT new stat, please */
5113			ip6stat.ip6s_toosmall++;
5114			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
5115			goto bad;
5116		}
5117	} else if (__predict_false(m->m_len < sizeof (struct ip6_hdr))) {
5118		struct ifnet *inifp = m->m_pkthdr.rcvif;
5119		if ((m = m_pullup(m, sizeof (struct ip6_hdr))) == NULL) {
5120			ip6stat.ip6s_toosmall++;
5121			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
5122			goto bad;
5123		}
5124	}
5125
5126	ip6 = mtod(m, struct ip6_hdr *);
5127
5128	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
5129		ip6stat.ip6s_badvers++;
5130		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
5131		goto bad;
5132	}
5133
5134	/* Checks out, proceed */
5135	*mp = m;
5136	return (0);
5137
5138bad:
5139	*mp = m;
5140	return (-1);
5141}
5142#endif /* INET6 */
5143
5144/*
5145 * bridge_fragment:
5146 *
5147 *	Return a fragmented mbuf chain.
5148 */
5149static int
5150bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
5151    int snap, struct llc *llc)
5152{
5153	struct mbuf *m0;
5154	struct ip *ip;
5155	int error = -1;
5156
5157	if (m->m_len < sizeof (struct ip) &&
5158	    (m = m_pullup(m, sizeof (struct ip))) == NULL)
5159		goto out;
5160	ip = mtod(m, struct ip *);
5161
5162	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
5163	    CSUM_DELAY_IP);
5164	if (error)
5165		goto out;
5166
5167	/* walk the chain and re-add the Ethernet header */
5168	for (m0 = m; m0; m0 = m0->m_nextpkt) {
5169		if (error == 0) {
5170			if (snap) {
5171				M_PREPEND(m0, sizeof (struct llc), M_DONTWAIT);
5172				if (m0 == NULL) {
5173					error = ENOBUFS;
5174					continue;
5175				}
5176				bcopy(llc, mtod(m0, caddr_t),
5177				    sizeof (struct llc));
5178			}
5179			M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT);
5180			if (m0 == NULL) {
5181				error = ENOBUFS;
5182				continue;
5183			}
5184			bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
5185		} else {
5186			m_freem(m);
5187		}
5188	}
5189
5190	if (error == 0)
5191		ipstat.ips_fragmented++;
5192
5193	return (error);
5194
5195out:
5196	if (m != NULL)
5197		m_freem(m);
5198	return (error);
5199}
5200#endif /* PFIL_HOOKS */
5201
5202static errno_t
5203bridge_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, bpf_packet_func bpf_callback)
5204{
5205	struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
5206
5207	/* TBD locking */
5208	if (sc == NULL || (sc->sc_flags & SCF_DETACHING)) {
5209		return (ENODEV);
5210	}
5211
5212	switch (mode) {
5213		case BPF_TAP_DISABLE:
5214			sc->sc_bpf_input = sc->sc_bpf_output = NULL;
5215			break;
5216
5217		case BPF_TAP_INPUT:
5218			sc->sc_bpf_input = bpf_callback;
5219			break;
5220
5221		case BPF_TAP_OUTPUT:
5222			sc->sc_bpf_output = bpf_callback;
5223			break;
5224
5225		case BPF_TAP_INPUT_OUTPUT:
5226			sc->sc_bpf_input = sc->sc_bpf_output = bpf_callback;
5227			break;
5228
5229		default:
5230			break;
5231	}
5232
5233	return (0);
5234}
5235
5236static void
5237bridge_detach(ifnet_t ifp)
5238{
5239	struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
5240
5241#if BRIDGESTP
5242	bstp_detach(&sc->sc_stp);
5243#endif /* BRIDGESTP */
5244
5245	/* Tear down the routing table. */
5246	bridge_rtable_fini(sc);
5247
5248	lck_mtx_lock(bridge_list_mtx);
5249	LIST_REMOVE(sc, sc_list);
5250	lck_mtx_unlock(bridge_list_mtx);
5251
5252	ifnet_release(ifp);
5253
5254	lck_mtx_free(sc->sc_mtx, bridge_lock_grp);
5255
5256	_FREE(sc, M_DEVBUF);
5257}
5258
5259__private_extern__ errno_t
5260bridge_bpf_input(ifnet_t ifp, struct mbuf *m)
5261{
5262	struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
5263
5264	if (sc->sc_bpf_input) {
5265		if (mbuf_pkthdr_rcvif(m) != ifp) {
5266			printf("%s: rcvif: %p != ifp %p\n", __func__,
5267			    mbuf_pkthdr_rcvif(m), ifp);
5268		}
5269		(*sc->sc_bpf_input)(ifp, m);
5270	}
5271	return (0);
5272}
5273
5274__private_extern__ errno_t
5275bridge_bpf_output(ifnet_t ifp, struct mbuf *m)
5276{
5277	struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
5278
5279	if (sc->sc_bpf_output) {
5280		(*sc->sc_bpf_output)(ifp, m);
5281	}
5282	return (0);
5283}
5284